hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bf81dcb30a4794cb86fb5f2bb8ba29a126370a62
| 5,422
|
py
|
Python
|
graph/cortex_DIM/configs/resnets.py
|
Crazy-Jack/HCL
|
dd2aae0c525859c8498205a791058287f86ab111
|
[
"MIT"
] | 275
|
2020-10-22T22:03:33.000Z
|
2022-03-25T06:08:05.000Z
|
graph/cortex_DIM/configs/resnets.py
|
Crazy-Jack/HCL
|
dd2aae0c525859c8498205a791058287f86ab111
|
[
"MIT"
] | 43
|
2020-10-30T08:28:01.000Z
|
2022-03-31T16:55:12.000Z
|
graph/cortex_DIM/configs/resnets.py
|
Crazy-Jack/HCL
|
dd2aae0c525859c8498205a791058287f86ab111
|
[
"MIT"
] | 70
|
2020-10-28T19:14:18.000Z
|
2022-03-27T06:11:51.000Z
|
"""Configurations for ResNets
"""
from cortex_DIM.nn_modules.encoder import ResnetEncoder, FoldedResnetEncoder
_resnet19_32x32 = dict(
Encoder=ResnetEncoder,
conv_before_args=[(64, 3, 2, 1, True, False, 'ReLU', None)],
res_args=[
([(64, 1, 1, 0, True, False, 'ReLU', None),
(64, 3, 1, 1, True, False, 'ReLU', None),
(64 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(64, 1, 1, 0, True, False, 'ReLU', None),
(64, 3, 1, 1, True, False, 'ReLU', None),
(64 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(128, 1, 1, 0, True, False, 'ReLU', None),
(128, 3, 2, 1, True, False, 'ReLU', None),
(128 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(128, 1, 1, 0, True, False, 'ReLU', None),
(128, 3, 1, 1, True, False, 'ReLU', None),
(128 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(256, 1, 1, 0, True, False, 'ReLU', None),
(256, 3, 2, 1, True, False, 'ReLU', None),
(256 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(256, 1, 1, 0, True, False, 'ReLU', None),
(256, 3, 1, 1, True, False, 'ReLU', None),
(256 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1)
],
fc_args=[(1024, True, False, 'ReLU')],
local_idx=4,
fc_idx=0
)
_foldresnet19_32x32 = dict(
Encoder=FoldedResnetEncoder,
crop_size=8,
conv_before_args=[(64, 3, 2, 1, True, False, 'ReLU', None)],
res_args=[
([(64, 1, 1, 0, True, False, 'ReLU', None),
(64, 3, 1, 1, True, False, 'ReLU', None),
(64 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(64, 1, 1, 0, True, False, 'ReLU', None),
(64, 3, 1, 1, True, False, 'ReLU', None),
(64 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(128, 1, 1, 0, True, False, 'ReLU', None),
(128, 3, 2, 1, True, False, 'ReLU', None),
(128 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(128, 1, 1, 0, True, False, 'ReLU', None),
(128, 3, 1, 1, True, False, 'ReLU', None),
(128 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(256, 1, 1, 0, True, False, 'ReLU', None),
(256, 3, 2, 1, True, False, 'ReLU', None),
(256 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(256, 1, 1, 0, True, False, 'ReLU', None),
(256, 3, 1, 1, True, False, 'ReLU', None),
(256 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1)
],
fc_args=[(1024, True, False, 'ReLU')],
local_idx=6,
fc_idx=0
)
_resnet34_32x32 = dict(
Encoder=ResnetEncoder,
conv_before_args=[(64, 3, 2, 1, True, False, 'ReLU', None)],
res_args=[
([(64, 1, 1, 0, True, False, 'ReLU', None),
(64, 3, 1, 1, True, False, 'ReLU', None),
(64 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(64, 1, 1, 0, True, False, 'ReLU', None),
(64, 3, 1, 1, True, False, 'ReLU', None),
(64 * 4, 1, 1, 0, True, False, 'ReLU', None)],
2),
([(128, 1, 1, 0, True, False, 'ReLU', None),
(128, 3, 2, 1, True, False, 'ReLU', None),
(128 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(128, 1, 1, 0, True, False, 'ReLU', None),
(128, 3, 1, 1, True, False, 'ReLU', None),
(128 * 4, 1, 1, 0, True, False, 'ReLU', None)],
5),
([(256, 1, 1, 0, True, False, 'ReLU', None),
(256, 3, 2, 1, True, False, 'ReLU', None),
(256 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(256, 1, 1, 0, True, False, 'ReLU', None),
(256, 3, 1, 1, True, False, 'ReLU', None),
(256 * 4, 1, 1, 0, True, False, 'ReLU', None)],
2)
],
fc_args=[(1024, True, False, 'ReLU')],
local_idx=2,
fc_idx=0
)
_foldresnet34_32x32 = dict(
Encoder=FoldedResnetEncoder,
crop_size=8,
conv_before_args=[(64, 3, 2, 1, True, False, 'ReLU', None)],
res_args=[
([(64, 1, 1, 0, True, False, 'ReLU', None),
(64, 3, 1, 1, True, False, 'ReLU', None),
(64 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(64, 1, 1, 0, True, False, 'ReLU', None),
(64, 3, 1, 1, True, False, 'ReLU', None),
(64 * 4, 1, 1, 0, True, False, 'ReLU', None)],
2),
([(128, 1, 1, 0, True, False, 'ReLU', None),
(128, 3, 2, 1, True, False, 'ReLU', None),
(128 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(128, 1, 1, 0, True, False, 'ReLU', None),
(128, 3, 1, 1, True, False, 'ReLU', None),
(128 * 4, 1, 1, 0, True, False, 'ReLU', None)],
5),
([(256, 1, 1, 0, True, False, 'ReLU', None),
(256, 3, 2, 1, True, False, 'ReLU', None),
(256 * 4, 1, 1, 0, True, False, 'ReLU', None)],
1),
([(256, 1, 1, 0, True, False, 'ReLU', None),
(256, 3, 1, 1, True, False, 'ReLU', None),
(256 * 4, 1, 1, 0, True, False, 'ReLU', None)],
2)
],
fc_args=[(1024, True, False, 'ReLU')],
local_idx=12,
fc_idx=0
)
configs = dict(
resnet19_32x32=_resnet19_32x32,
resnet34_32x32=_resnet34_32x32,
foldresnet19_32x32=_foldresnet19_32x32,
foldresnet34_32x32=_foldresnet34_32x32
)
| 35.907285
| 76
| 0.453892
| 780
| 5,422
| 3.09359
| 0.057692
| 0.298384
| 0.430999
| 0.535433
| 0.879403
| 0.879403
| 0.879403
| 0.879403
| 0.879403
| 0.879403
| 0
| 0.157967
| 0.328661
| 5,422
| 151
| 77
| 35.907285
| 0.504945
| 0.004795
| 0
| 0.865248
| 0
| 0
| 0.05938
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007092
| 0
| 0.007092
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
bfa9263f89b7852c0ce001461765ef0a31ee0786
| 128
|
py
|
Python
|
tests/conftest.py
|
AOstenfeld/turbopy
|
dc58f6482bd99385574a229739a32bcf3fb18fb2
|
[
"CC0-1.0"
] | 12
|
2020-06-01T14:01:50.000Z
|
2021-11-29T01:09:55.000Z
|
tests/conftest.py
|
AOstenfeld/turbopy
|
dc58f6482bd99385574a229739a32bcf3fb18fb2
|
[
"CC0-1.0"
] | 143
|
2020-05-30T13:18:34.000Z
|
2021-08-25T13:27:10.000Z
|
tests/conftest.py
|
AOstenfeld/turbopy
|
dc58f6482bd99385574a229739a32bcf3fb18fb2
|
[
"CC0-1.0"
] | 34
|
2020-05-30T12:06:02.000Z
|
2021-07-23T14:22:37.000Z
|
import pytest
from fixtures.particle_in_field.particle_in_field import *
from fixtures.block_on_spring.block_on_spring import *
| 32
| 58
| 0.875
| 20
| 128
| 5.2
| 0.5
| 0.230769
| 0.288462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 128
| 3
| 59
| 42.666667
| 0.881356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
44a63b116e3ce00f299bc2d79e3a2e065dffb0f1
| 13,619
|
py
|
Python
|
optimization/prac1/tests/test_optimizer.py
|
shaandesai1/AIMS
|
fee0be214b393af2184d565eb1e9aebb4eb6eeec
|
[
"MIT"
] | null | null | null |
optimization/prac1/tests/test_optimizer.py
|
shaandesai1/AIMS
|
fee0be214b393af2184d565eb1e9aebb4eb6eeec
|
[
"MIT"
] | null | null | null |
optimization/prac1/tests/test_optimizer.py
|
shaandesai1/AIMS
|
fee0be214b393af2184d565eb1e9aebb4eb6eeec
|
[
"MIT"
] | null | null | null |
import unittest
import itertools
import math
import torch
from torch.utils import data
import numpy as np
from .utils import assert_all_close, assert_all_close_dict
from optim import GD, SGD, BCFW, ClosedForm
from optim import HParamsBCFW, HParamsGD, HParamsClosedForm, HParamsSGD
from objective import Ridge_Gradient, Ridge_ClosedForm, SVM_ConditionalGradient, \
SVM_SubGradient
class TestOpt_BCFW(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
torch.manual_seed(1234)
self.n_features = 3
self.n_samples = 4
self.batch_size = 2
self.n_classes = 3
self.verbose = 0
self.mu = 1
self.n_epochs = 500
x = torch.randn(self.n_samples, self.n_features)
label_dist = torch.distributions.categorical.Categorical(torch.ones(self.n_classes))
y = label_dist.sample((self.n_samples,))
self.dataset = data.TensorDataset(x, y)
self.dataset.target_type = torch.LongTensor()
self.oracle_info = dict(i=1,
w_s=torch.randn(self.n_features, self.n_classes),
l_s=torch.rand([]))
self.hparams = HParamsBCFW(n_features=self.n_features,
n_samples=self.n_samples,
batch_size=self.batch_size,
n_classes=self.n_classes,
verbose=self.verbose,
mu=self.mu)
def test_step(self):
w_ref = torch.tensor([[-0.0421, 0.0984, -0.0016],
[-0.2332, -0.0329, 0.1408],
[-0.1098, 0.1021, 0.0310]])
l_ref = torch.tensor(0.1102)
w_i_ref = torch.tensor([[[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000]],
[[-0.0421, 0.0984, -0.0016],
[-0.2332, -0.0329, 0.1408],
[-0.1098, 0.1021, 0.0310]]])
l_i_ref = torch.tensor([0., 0.1102])
opt = BCFW(self.hparams)
opt.step(self.oracle_info)
assert_all_close(opt.variables.w, w_ref, "variable w_ref")
assert_all_close(opt.variables.w_i, w_i_ref, "variable w_i_ref")
assert_all_close(opt.variables.ll, l_ref, "variable l_ref")
assert_all_close(opt.variables.l_i, l_i_ref, "variable l_i_ref")
def test_convergence(self):
w_ref = torch.tensor([[0.2977, -0.1279, -0.1698],
[0.2401, -0.1616, -0.0784],
[0.0239, 0.1476, -0.1715]])
opt = BCFW(self.hparams)
obj = SVM_ConditionalGradient(self.hparams)
for _ in range(self.n_epochs):
for i, x, y in opt.get_sampler(self.dataset):
oracle_info = obj.oracle(opt.variables.w, x, y)
oracle_info['i'] = i
opt.step(oracle_info)
w_test = opt.variables.w
assert_all_close(w_test, w_ref, "final value after training")
def test_variables_size(self):
opt = BCFW(self.hparams)
self.assertEqual(opt.variables.ll.size(),
torch.Size(),
"size of ll variable")
self.assertEqual(opt.variables.l_i.size(),
torch.Size([self.hparams.n_blocks]),
"size of l_i variable")
self.assertEqual(opt.variables.w.size(),
torch.Size([self.hparams.n_features, self.hparams.n_classes]),
"size of w variable")
self.assertEqual(opt.variables.w_i.size(),
torch.Size([self.hparams.n_blocks,
self.hparams.n_features,
self.hparams.n_classes]),
"size of w_i variable")
class TestOpt_GD(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
torch.manual_seed(1234)
self.n_features = 3
self.n_samples = 4
self.n_classes = 1
self.verbose = 0
self.init_lr = 1e-2
self.fix_lr = True
self.mu = 10
self.n_epochs = 500
x = torch.randn(self.n_samples, self.n_features)
y = torch.randn(self.n_samples)
self.dataset = data.TensorDataset(x, y)
self.dataset.target_type = torch.FloatTensor()
self.oracle_info = dict(dw=torch.randn(self.n_features, 1, requires_grad=True))
self.hparams = HParamsGD(n_features=self.n_features,
n_samples=self.n_samples,
n_classes=self.n_classes,
verbose=self.verbose,
init_lr=self.init_lr,
fix_lr=self.fix_lr,
mu=self.mu)
def test_step(self):
init_lr_vals = [1, 1e-1, 1e-2]
fix_lr_vals = [True, False]
w_refs = [
torch.tensor([[0.6719],
[-0.6090],
[0.5513]]),
torch.tensor([[0.7289],
[0.0941],
[0.7109]]),
torch.tensor([[0.4451],
[0.0045],
[0.8849]]),
torch.tensor([[0.8170],
[0.7325],
[0.8992]]),
torch.tensor([[0.6865],
[0.7597],
[0.9150]]),
torch.tensor([[0.4001],
[0.1080],
[0.2541]])
]
it_refs = [10] * 6
lr_refs = [1., 1. / math.sqrt(9), 0.1, 0.1 / math.sqrt(9), 0.01, 0.01 / math.sqrt(9)]
for i, (init_lr, fix_lr) in enumerate(itertools.product(init_lr_vals, fix_lr_vals)):
hparams = HParamsGD(n_features=self.n_features,
n_samples=self.n_samples,
n_classes=self.n_classes,
verbose=self.verbose,
init_lr=init_lr,
fix_lr=fix_lr)
opt = GD(hparams)
opt.variables.it.fill_(9)
opt.step(self.oracle_info)
assert_all_close(opt.variables.w, w_refs[i],
"variable w for init_lr={} and fix_lr={}".format(init_lr, fix_lr))
assert_all_close(opt.variables.lr, lr_refs[i],
"variable lr for init_lr={} and fix_lr={}".format(init_lr, fix_lr))
assert_all_close(opt.variables.it, it_refs[i],
"variable it for init_lr={} and fix_lr={}".format(init_lr, fix_lr))
def test_convergence(self):
opt = GD(self.hparams)
obj = Ridge_Gradient(self.hparams)
w_ref = torch.tensor([[0.0630], [0.0347], [-0.0308]])
for i in range(self.n_epochs):
for _, x, y in opt.get_sampler(self.dataset):
oracle_info = obj.oracle(opt.variables.w, x, y)
opt.step(oracle_info)
w_test = opt.variables.w
assert_all_close(w_test, w_ref, "final value after training")
def test_variables_size(self):
opt = GD(self.hparams)
self.assertEqual(opt.variables.w.size(),
torch.Size([self.hparams.n_features,
self.hparams.n_classes]), "size of w variable")
self.assertEqual(opt.variables.lr.size(), torch.Size(), "size of lr variable")
self.assertEqual(opt.variables.it.size(), torch.Size(), "size of it variable")
class TestOpt_SGD(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
torch.manual_seed(1234)
self.n_features = 3
self.n_samples = 4
self.batch_size = 2
self.n_classes = 1
self.verbose = 0
self.init_lr = 1e-3
self.fix_lr = True
self.mu = 10
self.n_epochs = 500
x = torch.randn(self.n_samples, self.n_features)
y = torch.randn(self.n_samples)
self.dataset = data.TensorDataset(x, y)
self.dataset.target_type = torch.FloatTensor()
self.oracle_info = dict(dw=torch.randn(self.n_features, 1, requires_grad=True))
self.hparams = HParamsSGD(n_features=self.n_features,
n_samples=self.n_samples,
n_classes=self.n_classes,
mu=self.mu,
verbose=self.verbose,
batch_size=self.batch_size,
init_lr=self.init_lr,
fix_lr=self.fix_lr)
def test_step(self):
init_lr_vals = [1, 1e-1, 1e-2]
fix_lr_vals = [True, False]
w_refs = [
torch.tensor([[0.6719],
[-0.6090],
[0.5513]]),
torch.tensor([[0.7289],
[0.0941],
[0.7109]]),
torch.tensor([[0.4451],
[0.0045],
[0.8849]]),
torch.tensor([[0.8170],
[0.7325],
[0.8992]]),
torch.tensor([[0.6865],
[0.7597],
[0.9150]]),
torch.tensor([[0.4001],
[0.1080],
[0.2541]])
]
it_refs = [10] * 6
lr_refs = [1., 1. / math.sqrt(9), 0.1, 0.1 / math.sqrt(9), 0.01, 0.01 / math.sqrt(9)]
for i, (init_lr, fix_lr) in enumerate(itertools.product(init_lr_vals, fix_lr_vals)):
hparams = HParamsSGD(n_features=self.n_features,
n_samples=self.n_samples,
n_classes=self.n_classes,
mu=self.mu,
verbose=self.verbose,
batch_size=self.batch_size,
init_lr=init_lr,
fix_lr=fix_lr)
opt = SGD(hparams)
opt.variables.it.fill_(9)
opt.step(self.oracle_info)
assert_all_close(opt.variables.w, w_refs[i],
"variable w for init_lr={} and fix_lr={}".format(init_lr, fix_lr))
assert_all_close(opt.variables.lr, lr_refs[i],
"variable lr for init_lr={} and fix_lr={}".format(init_lr, fix_lr))
assert_all_close(opt.variables.it, it_refs[i],
"variable it for init_lr={} and fix_lr={}".format(init_lr, fix_lr))
def test_convergence(self):
opt = SGD(self.hparams)
obj = Ridge_Gradient(self.hparams)
w_ref = torch.tensor([[0.0630], [0.0347], [-0.0308]])
for i in range(self.n_epochs):
for _, x, y in opt.get_sampler(self.dataset):
oracle_info = obj.oracle(opt.variables.w, x, y)
opt.step(oracle_info)
w_test = opt.variables.w
assert_all_close(w_test, w_ref, "final value after training")
def test_variables_size(self):
opt = SGD(self.hparams)
self.assertEqual(opt.variables.w.size(),
torch.Size([self.hparams.n_features,
self.hparams.n_classes]), "size of w variable")
self.assertEqual(opt.variables.lr.size(), torch.Size(), "size of lr variable")
self.assertEqual(opt.variables.it.size(), torch.Size(), "size of it variable")
class TestOpt_ClosedForm(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
torch.manual_seed(1234)
self.n_features = 3
self.n_samples = 4
self.mu = 10
self.n_epochs = 10
self.hparams = HParamsClosedForm(n_features=self.n_features,
n_samples=self.n_samples,
verbose=0,
mu=self.mu)
x = torch.randn(self.n_samples, self.n_features)
y = torch.randn(self.n_samples)
self.dataset = data.TensorDataset(x, y)
self.dataset.target_type = torch.FloatTensor()
self.sol = dict(sol=torch.randn((self.n_features, 1), requires_grad=True))
def test_step(self):
opt = ClosedForm(self.hparams)
opt.step(self.sol)
w_refs = torch.Tensor([[-0.2611], [0.6104], [-0.0098]])
assert_all_close(opt.variables.w, w_refs, "variable w")
def test_convergence(self):
opt = ClosedForm(self.hparams)
obj = Ridge_ClosedForm(self.hparams)
w_ref = torch.tensor([[0.0630], [0.0347], [-0.0308]])
for i in range(self.n_epochs):
for _, x, y in opt.get_sampler(self.dataset):
oracle_info = obj.oracle(opt.variables.w, x, y)
opt.step(oracle_info)
if i > 0:
w_test = opt.variables.w
assert_all_close(w_test, w_ref, "final value after training")
def test_variables_size(self):
opt = ClosedForm(self.hparams)
self.assertEqual(opt.variables.w.size(),
torch.Size([self.hparams.n_features, 1]), "size of w variable")
if __name__ == '__main__':
unittest.main(argv=argv)
| 39.590116
| 96
| 0.503781
| 1,649
| 13,619
| 3.967253
| 0.103699
| 0.041272
| 0.03852
| 0.020177
| 0.85295
| 0.812748
| 0.78126
| 0.776521
| 0.756038
| 0.744115
| 0
| 0.065853
| 0.380057
| 13,619
| 343
| 97
| 39.705539
| 0.70899
| 0
| 0
| 0.72069
| 0
| 0
| 0.046112
| 0
| 0
| 0
| 0
| 0
| 0.093103
| 1
| 0.055172
| false
| 0
| 0.034483
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
44ae96909cd08945733557966191811bb5b027f4
| 6,706
|
py
|
Python
|
tests/test_app_update_status_tracking.py
|
fotahub/fotahub-device-sdk-yocto
|
33c16cf626569dac747ebf27c29e43ace9a01d72
|
[
"Apache-2.0"
] | null | null | null |
tests/test_app_update_status_tracking.py
|
fotahub/fotahub-device-sdk-yocto
|
33c16cf626569dac747ebf27c29e43ace9a01d72
|
[
"Apache-2.0"
] | null | null | null |
tests/test_app_update_status_tracking.py
|
fotahub/fotahub-device-sdk-yocto
|
33c16cf626569dac747ebf27c29e43ace9a01d72
|
[
"Apache-2.0"
] | 1
|
2021-11-16T14:39:37.000Z
|
2021-11-16T14:39:37.000Z
|
import json
import time
import tempfile
from fotahubclient.config_loader import ConfigLoader
from fotahubclient.json_document_models import UpdateCompletionState
from fotahubclient.update_status_tracker import UpdateStatusTracker
def test_app_update_status__consecutive_updates():
with tempfile.NamedTemporaryFile() as temp:
config = ConfigLoader()
config.update_status_path = temp.name
app_name = 'my-app'
# First app update
with UpdateStatusTracker(config) as tracker:
tracker.record_app_update_status(app_name, revision='3fa209348038674d5e701515d3e26746b18c2cbf555044d4f93f8c424e3642d8', completion_state=UpdateCompletionState.initiated)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.downloaded)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.verified)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.applied)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.confirmed, message='Application update successfully completed')
temp.seek(0)
json_data = json.load(temp)
assert 'UpdateStatuses' in json_data
assert len(json_data['UpdateStatuses']) == 1
update_status_data = json_data['UpdateStatuses'][0]
assert update_status_data['ArtifactName'] == app_name
assert update_status_data['ArtifactKind'] == 'Application'
assert update_status_data['Revision'] == '3fa209348038674d5e701515d3e26746b18c2cbf555044d4f93f8c424e3642d8'
assert type(update_status_data['Timestamp']) == int
assert update_status_data['Timestamp'] - int(time.time()) < 1
assert update_status_data['CompletionState'] == 'Confirmed'
assert type(update_status_data['Status']) == bool
assert update_status_data['Status'] == True
assert update_status_data['Message'] == 'Application update successfully completed'
# Second app update
with UpdateStatusTracker(config) as tracker:
tracker.record_app_update_status(app_name, revision='46a89ce4ecbcd0c8f53f34e53c6fd4736ec21019487ee9525933596d2be72fbd', completion_state=UpdateCompletionState.initiated)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.downloaded)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.verified)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.applied)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.confirmed, message='Application update successfully completed')
temp.seek(0)
json_data = json.load(temp)
assert 'UpdateStatuses' in json_data
assert len(json_data['UpdateStatuses']) == 1
update_status_data = json_data['UpdateStatuses'][0]
assert update_status_data['ArtifactName'] == app_name
assert update_status_data['ArtifactKind'] == 'Application'
assert update_status_data['Revision'] == '46a89ce4ecbcd0c8f53f34e53c6fd4736ec21019487ee9525933596d2be72fbd'
assert type(update_status_data['Timestamp']) == int
assert update_status_data['Timestamp'] - int(time.time()) < 1
assert update_status_data['CompletionState'] == 'Confirmed'
assert type(update_status_data['Status']) == bool
assert update_status_data['Status'] == True
assert update_status_data['Message'] == 'Application update successfully completed'
def test_app_update_status__update_rollback():
with tempfile.NamedTemporaryFile() as temp:
config = ConfigLoader()
config.update_status_path = temp.name
app_name = 'my-app'
# App update
with UpdateStatusTracker(config) as tracker:
tracker.record_app_update_status(app_name, revision='3fa209348038674d5e701515d3e26746b18c2cbf555044d4f93f8c424e3642d8', completion_state=UpdateCompletionState.initiated)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.downloaded)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.verified)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.applied)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.confirmed, message='Application update successfully completed')
temp.seek(0)
json_data = json.load(temp)
assert 'UpdateStatuses' in json_data
assert len(json_data['UpdateStatuses']) == 1
update_status_data = json_data['UpdateStatuses'][0]
assert update_status_data['ArtifactName'] == app_name
assert update_status_data['ArtifactKind'] == 'Application'
assert update_status_data['Revision'] == '3fa209348038674d5e701515d3e26746b18c2cbf555044d4f93f8c424e3642d8'
assert type(update_status_data['Timestamp']) == int
assert update_status_data['Timestamp'] - int(time.time()) < 1
assert update_status_data['CompletionState'] == 'Confirmed'
assert type(update_status_data['Status']) == bool
assert update_status_data['Status'] == True
assert update_status_data['Message'] == 'Application update successfully completed'
# App rollback
with UpdateStatusTracker(config) as tracker:
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.invalidated)
tracker.record_app_update_status(app_name, completion_state=UpdateCompletionState.rolled_back, message='Update rolled back due to application-level or external request')
temp.seek(0)
json_data = json.load(temp)
assert 'UpdateStatuses' in json_data
assert len(json_data['UpdateStatuses']) == 1
update_status_data = json_data['UpdateStatuses'][0]
assert update_status_data['ArtifactName'] == app_name
assert update_status_data['ArtifactKind'] == 'Application'
assert update_status_data['Revision'] == '3fa209348038674d5e701515d3e26746b18c2cbf555044d4f93f8c424e3642d8'
assert type(update_status_data['Timestamp']) == int
assert update_status_data['Timestamp'] - int(time.time()) < 1
assert update_status_data['CompletionState'] == 'RolledBack'
assert type(update_status_data['Status']) == bool
assert update_status_data['Status'] == True
assert update_status_data['Message'] == 'Update rolled back due to application-level or external request'
| 58.824561
| 181
| 0.73904
| 686
| 6,706
| 6.919825
| 0.106414
| 0.156731
| 0.134822
| 0.129766
| 0.912155
| 0.902886
| 0.902886
| 0.902886
| 0.902886
| 0.902886
| 0
| 0.059591
| 0.176707
| 6,706
| 114
| 182
| 58.824561
| 0.800217
| 0.008649
| 0
| 0.849462
| 0
| 0
| 0.213275
| 0.067429
| 0
| 0
| 0
| 0
| 0.473118
| 1
| 0.021505
| false
| 0
| 0.064516
| 0
| 0.086022
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
784997dea3f44f35001b22dbe6219a74057a93a0
| 62
|
py
|
Python
|
Python/Tests/TestData/AddImport/test_package/__init__.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/AddImport/test_package/__init__.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/AddImport/test_package/__init__.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
def package_method(): pass
def package_method_two(): pass
| 15.5
| 31
| 0.741935
| 9
| 62
| 4.777778
| 0.555556
| 0.465116
| 0.744186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 62
| 3
| 32
| 20.666667
| 0.826923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
153f58cb2d7e44ebd96b2438ee4473c7b51e5244
| 5,813
|
py
|
Python
|
ljwtrader/data/models.py
|
leowotzak/LJWEquities
|
adcb245b6fd54be424aed2c27879d77e221db751
|
[
"MIT"
] | null | null | null |
ljwtrader/data/models.py
|
leowotzak/LJWEquities
|
adcb245b6fd54be424aed2c27879d77e221db751
|
[
"MIT"
] | null | null | null |
ljwtrader/data/models.py
|
leowotzak/LJWEquities
|
adcb245b6fd54be424aed2c27879d77e221db751
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from sqlalchemy import Column, DateTime, Float, ForeignKey, Integer, String
from sqlalchemy.orm import declarative_base
Base = declarative_base()
class Symbols(Base):
__tablename__ = 'symbols'
symbol_id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String)
ticker = Column(String)
description = Column(String)
sector = Column(String)
asset_type = Column(String)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f'Symbol(symbol_id={self.symbol_id}, ticker={self.ticker}, name={self.name})'
class OneMinuteBar(Base):
__tablename__ = 'one_minute_bar_data'
timestamp = Column(DateTime, primary_key=True)
symbol_id = Column(Integer,
ForeignKey('symbols.symbol_id'),
primary_key=True)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
volume = Column(Float)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f"DailyBar(symbol_id={self.symbol_id}, timestamp={self.timestamp}, close_price={self.close_price})"
class FiveMinuteBar(Base):
__tablename__ = 'five_minute_bar_data'
timestamp = Column(DateTime, primary_key=True)
symbol_id = Column(Integer,
ForeignKey('symbols.symbol_id'),
primary_key=True)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
volume = Column(Float)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f"DailyBar(symbol_id={self.symbol_id}, timestamp={self.timestamp}, close_price={self.close_price})"
class FifteenMinuteBar(Base):
__tablename__ = 'fifteen_minute_bar_data'
timestamp = Column(DateTime, primary_key=True)
symbol_id = Column(Integer,
ForeignKey('symbols.symbol_id'),
primary_key=True)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
volume = Column(Float)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f"DailyBar(symbol_id={self.symbol_id}, timestamp={self.timestamp}, close_price={self.close_price})"
class ThirtyMinuteBar(Base):
__tablename__ = 'thirty_minute_bar_data'
timestamp = Column(DateTime, primary_key=True)
symbol_id = Column(Integer,
ForeignKey('symbols.symbol_id'),
primary_key=True)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
volume = Column(Float)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f"DailyBar(symbol_id={self.symbol_id}, timestamp={self.timestamp}, close_price={self.close_price})"
class SixtyMinuteBar(Base):
__tablename__ = 'sixty_minute_bar_data'
timestamp = Column(DateTime, primary_key=True)
symbol_id = Column(Integer,
ForeignKey('symbols.symbol_id'),
primary_key=True)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
volume = Column(Float)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f"DailyBar(symbol_id={self.symbol_id}, timestamp={self.timestamp}, close_price={self.close_price})"
class DailyBar(Base):
__tablename__ = 'daily_bar_data'
timestamp = Column(DateTime, primary_key=True)
symbol_id = Column(Integer,
ForeignKey('symbols.symbol_id'),
primary_key=True)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
adj_close_price = Column(Float)
volume = Column(Float)
dividend_amount = Column(Float)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f"DailyBar(symbol_id={self.symbol_id}, timestamp={self.timestamp}, close_price={self.close_price})"
class WeeklyBar(Base):
__tablename__ = 'weekly_bar_data'
timestamp = Column(DateTime, primary_key=True)
symbol_id = Column(Integer,
ForeignKey('symbols.symbol_id'),
primary_key=True)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
adj_close_price = Column(Float)
volume = Column(Float)
dividend_amount = Column(Float)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f"WeeklyBar(symbol_id={self.symbol_id}, timestamp={self.timestamp}, close_price={self.close_price})"
class MonthlyBar(Base):
__tablename__ = 'monthly_bar_data'
timestamp = Column(DateTime, primary_key=True)
symbol_id = Column(Integer,
ForeignKey('symbols.symbol_id'),
primary_key=True)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
adj_close_price = Column(Float)
volume = Column(Float)
dividend_amount = Column(Float)
created_date = Column(DateTime)
last_updated_date = Column(DateTime)
def __repr__(self):
return f"MonthlyBar(symbol_id={self.symbol_id}, timestamp={self.timestamp}, close_price={self.close_price})"
| 30.920213
| 116
| 0.672974
| 678
| 5,813
| 5.427729
| 0.10177
| 0.1375
| 0.152174
| 0.062772
| 0.832609
| 0.827174
| 0.827174
| 0.827174
| 0.827174
| 0.827174
| 0
| 0
| 0.223981
| 5,813
| 187
| 117
| 31.085562
| 0.815784
| 0.003441
| 0
| 0.787234
| 0
| 0
| 0.196478
| 0.151243
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.014184
| 0.06383
| 0.879433
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
154dcea0763f9b2e7020679d1ee26bea747ae750
| 9,111
|
py
|
Python
|
owasp-top10-2017-apps/a7/gossip-world/app/model/db.py
|
sandroventura/secDevLabs
|
51afbe04151a96e9113c0d812a5e976f9e0cd352
|
[
"BSD-3-Clause"
] | 684
|
2018-12-21T20:08:24.000Z
|
2022-03-28T17:39:55.000Z
|
owasp-top10-2017-apps/a7/gossip-world/app/model/db.py
|
sandroventura/secDevLabs
|
51afbe04151a96e9113c0d812a5e976f9e0cd352
|
[
"BSD-3-Clause"
] | 306
|
2018-12-18T15:03:54.000Z
|
2022-03-31T14:26:52.000Z
|
owasp-top10-2017-apps/a7/gossip-world/app/model/db.py
|
sandroventura/secDevLabs
|
51afbe04151a96e9113c0d812a5e976f9e0cd352
|
[
"BSD-3-Clause"
] | 305
|
2018-12-27T18:36:52.000Z
|
2022-03-27T22:15:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
class DataBase:
def __init__(self, host, user, password, database):
self.host = host
self.user = user
self.password = password
self.database = database
self.connect()
def connect(self):
self.db = MySQLdb.connect(host=self.host,
user=self.user,
passwd=self.password,
db=self.database,
charset='utf8')
self.c = self.db.cursor()
def get_user_password(self, username):
try:
self.c.execute(
'SELECT password FROM users WHERE user = %s', [username])
user_password = self.c.fetchone()
except (AttributeError, MySQLdb.OperationalError):
self.connect()
self.c.execute(
'SELECT password FROM users WHERE username = %s', [username])
user_password = self.c.fetchone()
except MySQLdb.Error as e:
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return user_password, 1
def insert_user(self, user, password):
try:
self.c.execute(
'INSERT INTO users (user, password) VALUES (%s, %s);',
(user, password))
self.db.commit()
except (AttributeError, MySQLdb.OperationalError):
self.connect()
self.c.execute(
'INSERT INTO users (user, password) VALUES (%s, %s);',
(user, password))
self.db.commit()
except MySQLdb.Error as e:
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return '', 1
def get_latest_gossips(self):
try:
self.c.execute(
'SELECT id, text, author, title, subtitle, date FROM gossips')
gossips = self.c.fetchall()
except (AttributeError, MySQLdb.OperationalError):
self.connect()
self.c.execute(
'SELECT id, text, author, title, subtitle, date FROM gossips LIMIT')
gossips = self.c.fetchall()
except MySQLdb.Error as e:
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return gossips, 1
def search_gossips(self, search_str):
try:
self.c.execute(
'SELECT id, text, author, title, subtitle, date FROM gossips WHERE title LIKE %s', ['%'+ search_str + '%'])
gossips = self.c.fetchall()
except (AttributeError, MySQLdb.OperationalError):
self.connect()
self.c.execute(
'SELECT id, text, author, title, subtitle, date FROM gossips WHERE title LIKE %s', ['%'+ search_str + '%'])
gossips = self.c.fetchall()
except MySQLdb.Error as e:
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return gossips, 1
def get_gossip(self, id):
try:
self.c.execute(
'SELECT id, text, author, title, subtitle, date FROM gossips WHERE id = %s', [id])
gossip = self.c.fetchone()
except (AttributeError, MySQLdb.OperationalError):
self.connect()
self.c.execute(
'SELECT id, text, author, title, subtitle, date FROM gossips WHERE id = %s', [id])
gossip = self.c.fetchone()
except MySQLdb.Error as e:
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return gossip, 1
def get_comments(self, id):
try:
self.c.execute(
'SELECT author, comment, date FROM comments WHERE gossip_id = %s', [id])
comments = self.c.fetchall()
# comments = (1,2)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
self.c.execute(
'SELECT author, comment, date FROM comments WHERE gossip_id = %s', [id])
comments = self.c.fetchall()
except MySQLdb.Error as e:
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
if comments == ():
return None, 1
return comments, 1
def post_comment(self, author, comment, gossip_id, date):
try:
self.c.execute(
'INSERT INTO comments (author, comment, gossip_id, date) VALUES (%s, %s, %s, %s);',
(author, comment, gossip_id, date))
self.db.commit()
except (AttributeError, MySQLdb.OperationalError):
self.connect()
self.c.execute(
'INSERT INTO comments (author, comment, gossip_id, date) VALUES (%s, %s, %s, %s);',
(author, comment, gossip_id, date))
self.db.commit()
except MySQLdb.Error as e:
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return '', 1
def post_gossip(self, author, text, title, subtitle, date):
try:
self.c.execute(
'INSERT INTO gossips (author, text, title, subtitle, date) VALUES (%s, %s, %s, %s, %s);',
(author, text, title, subtitle, date))
self.db.commit()
except (AttributeError, MySQLdb.OperationalError):
self.connect()
self.c.execute(
'INSERT INTO gossips (author, text, title, subtitle, date) VALUES (%s, %s, %s, %s, %s);',
(author, text, title, subtitle, date))
self.db.commit()
except MySQLdb.Error as e:
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return '', 1
def init_table_user(self):
try:
self.c.execute(
'CREATE TABLE users (user VARCHAR(100) NOT NULL, password VARCHAR(100) NOT NULL)')
self.db.commit()
except (AttributeError, MySQLdb.OperationalError, MySQLdb.Error) as e:
self.connect()
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return '', 1
def init_table_gossips(self):
try:
self.c.execute(
'CREATE TABLE gossips (id INT(10) NOT NULL AUTO_INCREMENT, author VARCHAR(100) NOT NULL, text VARCHAR(2000) NOT NULL, title VARCHAR(100) NOT NULL, subtitle VARCHAR(200), date DATE NOT NULL, PRIMARY KEY (id))')
self.db.commit()
except (AttributeError, MySQLdb.OperationalError, MySQLdb.Error) as e:
self.connect()
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return '',
def init_table_comments(self):
try:
self.c.execute(
'CREATE TABLE comments (author VARCHAR(100) NOT NULL, comment VARCHAR(100) NOT NULL, gossip_id INT NOT NULL, date DATE NOT NULL)')
self.db.commit()
except (AttributeError, MySQLdb.OperationalError, MySQLdb.Error) as e:
self.connect()
try:
message = 'MySQL Error [%d]: %s' % (e.args[0], e.args[1])
return message, 0
except IndexError:
message = 'MySQL Error: %s' % str(e)
return message, 0
return '',
| 39.441558
| 229
| 0.495884
| 976
| 9,111
| 4.593238
| 0.088115
| 0.03346
| 0.083426
| 0.036806
| 0.830471
| 0.809726
| 0.805041
| 0.778497
| 0.755298
| 0.755298
| 0
| 0.015302
| 0.390297
| 9,111
| 230
| 230
| 39.613043
| 0.791719
| 0.006476
| 0
| 0.822967
| 0
| 0.028708
| 0.208089
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062201
| false
| 0.07177
| 0.004785
| 0
| 0.23445
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
1572fcf92f8b1d72c4656aa93c5d9ca9c7c2a60c
| 10,528
|
py
|
Python
|
fcos_core/data/datasets/lym.py
|
ZhangXX54/DDTNet
|
1de4cb5652bca6c45a80d93100ae5ab8e699344f
|
[
"BSD-2-Clause"
] | null | null | null |
fcos_core/data/datasets/lym.py
|
ZhangXX54/DDTNet
|
1de4cb5652bca6c45a80d93100ae5ab8e699344f
|
[
"BSD-2-Clause"
] | null | null | null |
fcos_core/data/datasets/lym.py
|
ZhangXX54/DDTNet
|
1de4cb5652bca6c45a80d93100ae5ab8e699344f
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import torch
import torch.utils.data
from PIL import Image
import numpy as np
import sys
import cv2
from torchvision.transforms import functional as F
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
from fcos_core.structures.bounding_box import BoxList
class LYMDataset(torch.utils.data.Dataset):
CLASSES = (
"__background__ ",
"lym",
)
def __init__(self, data_dir, data_name, split, use_difficult=False,transforms=None):
self.data_dir = data_dir
self.data_name = data_name
self.split = split
self.root = data_dir
self.image_set = split
self.keep_difficult = use_difficult
self.transforms = transforms
image_sets_file = os.path.join(self.data_dir, "ImageSets/%s/%s.txt" % (self.data_name, self.split))
self.ids = LYMDataset._read_image_ids(image_sets_file)
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
cls = LYMDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
def __getitem__(self, index):
img_id = self.ids[index]
img = self._read_image(img_id)
contour = self._read_contour(img_id)
centerness = self._read_centerness(img_id)
mask, target = self.get_groundtruth(img_id)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target, mask, contour, centerness = self.transforms(img, target, mask, contour, centerness)
return img, target, mask, contour, index, centerness
def __len__(self):
return len(self.ids)
@staticmethod
def _read_image_ids(image_sets_file):
ids = []
with open(image_sets_file) as f:
for line in f:
ids.append(line.rstrip())
return ids
def _read_image(self, image_id):
image_file = os.path.join(self.data_dir, "Images/320", "%s.png" % image_id)
image = Image.open(image_file).convert("RGB")
image = np.array(image)
return image
def _read_contour(self, image_id):
contour_file = os.path.join(self.data_dir, 'Masks/320','edgemask_' + "%s.png" % image_id)
contour = cv2.imread(contour_file, cv2.IMREAD_GRAYSCALE)
contour = np.array(contour, np.float32)
contour = np.expand_dims(contour, axis=2)
contour = contour/255.0
return contour
def _read_centerness(self, image_id):
centerness_file = os.path.join(self.data_dir, 'Masks/320','Dis_' + "%s.png" % image_id)
centerness = cv2.imread(centerness_file, cv2.IMREAD_GRAYSCALE)
centerness = np.array(centerness, np.float32)
centerness = np.expand_dims(centerness, axis=2)
return centerness
def get_groundtruth(self, image_id):
mask, anno = self._preprocess_annotation(image_id)
height, width = anno["im_info"]
target = BoxList(anno["boxes"], (width, height), mode="xyxy")
target.add_field("labels", anno["labels"])
return mask, target
def _preprocess_annotation(self, img_id):
anno_file = os.path.join(self.data_dir, 'Masks/320','mask_' + "%s.png" % img_id)
mask = cv2.imread(anno_file, cv2.IMREAD_GRAYSCALE)
mask = np.where(mask>0, 1, mask)
mask = np.array(mask, np.float32)
mask = np.expand_dims(mask, axis=2)
bboxes = []
labels = []
mask_gt = cv2.imread(anno_file)
h, w, _ = mask_gt.shape
cond1 = mask_gt[:, :, 0] != mask_gt[:, :, 1]
cond2 = mask_gt[:, :, 1] != mask_gt[:, :, 2]
cond3 = mask_gt[:, :, 2] != mask_gt[:, :, 0]
r, c = np.where(np.logical_or(np.logical_or(cond1, cond2), cond3))
if len(r):
unique_colors = np.unique(mask_gt[r, c, :], axis=0)
for color in unique_colors:
cond1 = mask_gt[:, :, 0] == color[0]
cond2 = mask_gt[:, :, 1] == color[1]
cond3 = mask_gt[:, :, 2] == color[2]
r, c = np.where(np.logical_and(np.logical_and(cond1, cond2), cond3))
y1 = np.min(r)
x1 = np.min(c)
y2 = np.max(r)
x2 = np.max(c)
if (abs(y2 - y1) <= 1 or abs(x2 - x1) <= 1):
continue
bboxes.append([x1, y1, x2, y2]) # 512 x 640
labels.append(1)
if len(bboxes) == 0:
bboxes.append([0, 0, 2, 2])
labels.append(0)
if len(r) == 0:
bboxes.append([0, 0, 2, 2])
labels.append(0)
im_info = tuple(map(int, (h, w)))
res = {
"boxes": torch.tensor(bboxes, dtype=torch.float32),
"labels": torch.tensor(labels),
"im_info": im_info,
}
return mask, res
def get_img_info(self, index):
img_id = self.ids[index]
image_file = os.path.join(self.data_dir, "Images/320", "%s.png" % img_id)
img = cv2.imread(image_file)
h, w, _ = img.shape
im_info = tuple(map(int, (h, w)))
return {"height": im_info[0], "width": im_info[1], "id":img_id}
def map_class_id_to_class_name(self, class_id):
return LYMDataset.CLASSES[class_id]
class LYMTestDataset(torch.utils.data.Dataset):
CLASSES = (
"__background__ ",
"lym",
)
def __init__(self, data_dir, data_name, split, use_difficult=False,transforms=None):
self.data_dir = data_dir
self.data_name = data_name
self.split = split
self.root = data_dir
self.image_set = split
self.keep_difficult = use_difficult
self.transforms = transforms
image_sets_file = os.path.join(self.data_dir, "ImageSets/%s/%s.txt" % (self.data_name, self.split))
self.ids = LYMTestDataset._read_image_ids(image_sets_file)
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
cls = LYMTestDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
def __getitem__(self, index):
img_id = self.ids[index]
img = self._read_image(img_id)
contour = self._read_contour(img_id)
centerness = self._read_centerness(img_id)
mask = self._read_gt(img_id)
target = self.get_groundtruth(img_id)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target, mask, contour, centerness = self.transforms(img, target, mask, contour, centerness)
return img, target, mask, contour, index, centerness
def __len__(self):
return len(self.ids)
@staticmethod
def _read_image_ids(image_sets_file):
ids = []
with open(image_sets_file) as f:
for line in f:
ids.append(line.rstrip())
return ids
def _read_image(self, image_id):
image_file = os.path.join(self.data_dir, "Images/320", "%s.png" % image_id)
image = Image.open(image_file).convert("RGB")
image = np.array(image)
return image
def _read_gt(self, image_id):
gt_file = os.path.join(self.data_dir, 'Masks/320','mask_' + "%s.png" % image_id)
gt = cv2.imread(gt_file, cv2.IMREAD_GRAYSCALE)
gt = np.where(gt > 0, 1, gt)
gt = np.array(gt, np.float32)
gt = np.expand_dims(gt, axis=2)
return gt
def _read_contour(self, image_id):
contour_file = os.path.join(self.data_dir, 'Masks/320','edgemask_' + "%s.png" % image_id)
contour = cv2.imread(contour_file, cv2.IMREAD_GRAYSCALE)
contour = np.array(contour, np.float32)
contour = np.expand_dims(contour, axis=2)
contour = contour/255.0
return contour
def _read_centerness(self, image_id):
centerness_file = os.path.join(self.data_dir, 'Masks/320','Dis_' + "%s.png" % image_id)
centerness = cv2.imread(centerness_file, cv2.IMREAD_GRAYSCALE)
centerness = np.array(centerness, np.float32)
centerness = np.expand_dims(centerness, axis=2)
return centerness
def get_groundtruth(self, image_id):
anno = self._preprocess_annotation(image_id)
height, width = anno["im_info"]
target = BoxList(anno["boxes"], (width, height), mode="xyxy")
target.add_field("labels", anno["labels"])
# target.add_field("difficult", anno["difficult"])
return target
def _preprocess_annotation(self, img_id):
anno_file = os.path.join(self.data_dir, 'Masks/320','mask_' + "%s.png" % img_id)
bboxes = []
labels = []
mask_gt = cv2.imread(anno_file)
h, w, _ = mask_gt.shape
cond1 = mask_gt[:, :, 0] != mask_gt[:, :, 1]
cond2 = mask_gt[:, :, 1] != mask_gt[:, :, 2]
cond3 = mask_gt[:, :, 2] != mask_gt[:, :, 0]
r, c = np.where(np.logical_or(np.logical_or(cond1, cond2), cond3))
if len(r):
unique_colors = np.unique(mask_gt[r, c, :], axis=0)
for color in unique_colors:
cond1 = mask_gt[:, :, 0] == color[0]
cond2 = mask_gt[:, :, 1] == color[1]
cond3 = mask_gt[:, :, 2] == color[2]
r, c = np.where(np.logical_and(np.logical_and(cond1, cond2), cond3))
y1 = np.min(r)
x1 = np.min(c)
y2 = np.max(r)
x2 = np.max(c)
if (abs(y2 - y1) <= 1 or abs(x2 - x1) <= 1):
continue
bboxes.append([x1, y1, x2, y2]) # 512 x 640
labels.append(1)
if len(bboxes) == 0:
bboxes.append([0, 0, 2, 2])
labels.append(0)
if len(r) == 0:
bboxes.append([0, 0, 2, 2])
labels.append(0)
im_info = tuple(map(int, (h, w)))
res = {
"boxes": torch.tensor(bboxes, dtype=torch.float32),
"labels": torch.tensor(labels),
"im_info": im_info,
}
return res
def get_img_info(self, index):
img_id = self.ids[index]
image_file = os.path.join(self.data_dir, "Images/320", "%s.png" % img_id)
img = cv2.imread(image_file)
h, w, _ = img.shape
im_info = tuple(map(int, (h, w)))
return {"height": im_info[0], "width": im_info[1], "id":img_id}
def map_class_id_to_class_name(self, class_id):
return LYMTestDataset.CLASSES[class_id]
| 34.976744
| 108
| 0.579217
| 1,423
| 10,528
| 4.066058
| 0.113141
| 0.024888
| 0.032319
| 0.031455
| 0.879018
| 0.879018
| 0.879018
| 0.879018
| 0.879018
| 0.879018
| 0
| 0.027511
| 0.288754
| 10,528
| 300
| 109
| 35.093333
| 0.745192
| 0.006459
| 0
| 0.813278
| 0
| 0
| 0.039021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095436
| false
| 0
| 0.045643
| 0.016598
| 0.244813
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
15863c1ffec48d5cfdddf7a4d1517baca541652a
| 560
|
py
|
Python
|
eval_ricord1a_timm-regnetx_002_ElasticTransform.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_ricord1a_timm-regnetx_002_ElasticTransform.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_ricord1a_timm-regnetx_002_ElasticTransform.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_0_ElasticTransform.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_1_ElasticTransform.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_2_ElasticTransform.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_3_ElasticTransform.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_4_ElasticTransform.yml",
]
for l in ls:
os.system(l)
| 50.909091
| 106
| 0.851786
| 80
| 560
| 5.5875
| 0.3
| 0.111857
| 0.134228
| 0.212528
| 0.885906
| 0.885906
| 0.885906
| 0.885906
| 0.885906
| 0.885906
| 0
| 0.047259
| 0.055357
| 560
| 11
| 107
| 50.909091
| 0.797732
| 0
| 0
| 0
| 0
| 0
| 0.882353
| 0.659537
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
159c57346bfd9a6ea51d7cb102236d3c58eee86b
| 15,681
|
py
|
Python
|
tests/test_factories.py
|
yi-jiayu/nlbsg
|
0b6589706aab334daf768ecdaa8975ead8ea028e
|
[
"MIT"
] | 5
|
2019-04-21T03:11:22.000Z
|
2021-11-13T04:13:32.000Z
|
tests/test_factories.py
|
yi-jiayu/nlbsg
|
0b6589706aab334daf768ecdaa8975ead8ea028e
|
[
"MIT"
] | 10
|
2019-04-03T07:52:30.000Z
|
2021-06-01T23:56:30.000Z
|
tests/test_factories.py
|
yi-jiayu/nlbsg
|
0b6589706aab334daf768ecdaa8975ead8ea028e
|
[
"MIT"
] | 1
|
2019-04-21T04:04:26.000Z
|
2019-04-21T04:04:26.000Z
|
from nlbsg._factories import (
get_availability_info_response_factory,
get_title_details_response_factory,
search_response_factory,
)
from nlbsg.types import (
GetAvailabilityInfoResponse,
GetTitleDetailsResponse,
Item,
SearchResponse,
Title,
TitleDetail,
)
class TestSearchResponseFactory:
def test_success(self):
response = {
"Status": "OK",
"Message": "Operation completed successfully",
"ErrorMessage": None,
"TotalRecords": 52,
"NextRecordPosition": 4,
"SetId": "PGE3676",
"Titles": {
"Title": [
{
"BID": "203125808",
"ISBN": "1328915336 (paperback)",
"TitleName": "Beren and Lúthien / by J.R.R. Tolkien ; edited by Christopher Tolkien ; with illustrations by Alan Lee.",
"Author": "Tolkien, J. R. R. (John Ronald Reuel), 1892-1973",
"PublishYear": "2018",
"MediaCode": "BK",
"MediaDesc": "Books",
},
{
"BID": "204576140",
"ISBN": "9780008214210 (electronic bk)",
"TitleName": "Beren and l℗♭©ʻthien [electronic resource]. J. R. R Tolkien.",
"Author": "Tolkien, J. R. R.",
"PublishYear": "2017",
"MediaCode": "BK",
"MediaDesc": "Books",
},
]
},
}
actual = search_response_factory(response)
expected = SearchResponse(
status="OK",
message="Operation completed successfully",
error_message=None,
total_records=52,
next_record_position=4,
set_id="PGE3676",
titles=tuple(
[
Title(
bid="203125808",
isbn="1328915336 (paperback)",
title_name="Beren and Lúthien / by J.R.R. Tolkien ; edited by Christopher Tolkien ; with illustrations by Alan Lee.",
author="Tolkien, J. R. R. (John Ronald Reuel), 1892-1973",
publish_year="2018",
media_code="BK",
media_desc="Books",
),
Title(
bid="204576140",
isbn="9780008214210 (electronic bk)",
title_name="Beren and l℗♭©ʻthien [electronic resource]. J. R. R Tolkien.",
author="Tolkien, J. R. R.",
publish_year="2017",
media_code="BK",
media_desc="Books",
),
]
),
)
assert actual == expected
def test_error(self):
response = {
"Status": "ERROR",
"Message": "System error encountered. Please contact the administrator",
"ErrorMessage": "Wrong API authorization key.",
"TotalRecords": None,
"NextRecordPosition": None,
"SetId": None,
"Titles": None,
}
actual = search_response_factory(response)
expected = SearchResponse(
status="ERROR",
message="System error encountered. Please contact the administrator",
error_message="Wrong API authorization key.",
total_records=None,
next_record_position=None,
set_id=None,
titles=None,
)
assert actual == expected
class TestGetTitleDetailsResponseFactory:
def test_success(self):
response = {
"Status": "OK",
"Message": "Operation completed successfully",
"ErrorMessage": None,
"TitleDetail": {
"BID": "203125808",
"TitleName": "Beren and Lúthien / by J.R.R. Tolkien ; edited by Christopher Tolkien ; with illustrations by Alan Lee.",
"Author": "Tolkien, J. R. R.",
"OtherAuthors": "Tolkien, J. R. R. (John Ronald Reuel), 1892-1973|Tolkien, Christopher|Lee, Alan",
"Publisher": None,
"PhysicalDesc": "288 pages (pages numbered 8-288) :chiefly color illustrations ;21 cm",
"Subjects": {
"Subject": [
"Middle Earth (Imaginary place) Fiction",
"Elves Fiction",
"Fantasy fiction",
]
},
"Summary": "The epic tale of Beren and Lúthien became an essential element in the evolution of The Silmarillion, the myths and legends of J.R.R. Tolkien's First Age of the World. Always key to the story is the fate that shadowed their love: Beren was a mortal man, Lúthien an immortal Elf. Her father, a great Elvish lord, imposed on Beren an impossible task before he might wed Lúthien: to rob the greatest of all evil beings, Melkor, of a Silmaril.Painstakingly restored from Tolkien's manuscripts and presented for the first time as a continuous and standalone story, Beren and Lúthien reunites fans of The Hobbit and The Lord of the Rings with Elves and Men, along with the rich landscape and creatures unique to Tolkien's Middle-earth. Christopher Tolkien tells the story in his father's own words by giving its original form as well as prose and verse passages from later texts that illustrate the narrative as it changed. -- from back cover.",
"Notes": "First published by Harper Collins Publishers 2017.Includes abstractsThe epic tale of Beren and Lúthien became an essential element in the evolution of The Silmarillion, the myths and legends of J.R.R. Tolkien's First Age of the World. Always key to the story is the fate that shadowed their love: Beren was a mortal man, Lúthien an immortal Elf. Her father, a great Elvish lord, imposed on Beren an impossible task before he might wed Lúthien: to rob the greatest of all evil beings, Melkor, of a Silmaril.Painstakingly restored from Tolkien's manuscripts and presented for the first time as a continuous and standalone story, Beren and Lúthien reunites fans of The Hobbit and The Lord of the Rings with Elves and Men, along with the rich landscape and creatures unique to Tolkien's Middle-earth. Christopher Tolkien tells the story in his father's own words by giving its original form as well as prose and verse passages from later texts that illustrate the narrative as it changed. -- from back cover.",
"ISBN": "1328915336 (paperback)",
"ISSN": None,
"NTitleName": None,
"NAuthor": None,
"NPublisher": None,
},
}
actual = get_title_details_response_factory(response)
expected = GetTitleDetailsResponse(
status="OK",
message="Operation completed successfully",
error_message=None,
title_detail=TitleDetail(
bid="203125808",
title_name="Beren and Lúthien / by J.R.R. Tolkien ; edited by Christopher Tolkien ; with illustrations by Alan Lee.",
author="Tolkien, J. R. R.",
other_authors="Tolkien, J. R. R. (John Ronald Reuel), 1892-1973|Tolkien, Christopher|Lee, Alan",
publisher=None,
physical_desc="288 pages (pages numbered 8-288) :chiefly color illustrations ;21 cm",
subjects=(
"Middle Earth (Imaginary place) Fiction",
"Elves Fiction",
"Fantasy fiction",
),
summary="The epic tale of Beren and Lúthien became an essential element in the evolution of The Silmarillion, the myths and legends of J.R.R. Tolkien's First Age of the World. Always key to the story is the fate that shadowed their love: Beren was a mortal man, Lúthien an immortal Elf. Her father, a great Elvish lord, imposed on Beren an impossible task before he might wed Lúthien: to rob the greatest of all evil beings, Melkor, of a Silmaril.Painstakingly restored from Tolkien's manuscripts and presented for the first time as a continuous and standalone story, Beren and Lúthien reunites fans of The Hobbit and The Lord of the Rings with Elves and Men, along with the rich landscape and creatures unique to Tolkien's Middle-earth. Christopher Tolkien tells the story in his father's own words by giving its original form as well as prose and verse passages from later texts that illustrate the narrative as it changed. -- from back cover.",
notes="First published by Harper Collins Publishers 2017.Includes abstractsThe epic tale of Beren and Lúthien became an essential element in the evolution of The Silmarillion, the myths and legends of J.R.R. Tolkien's First Age of the World. Always key to the story is the fate that shadowed their love: Beren was a mortal man, Lúthien an immortal Elf. Her father, a great Elvish lord, imposed on Beren an impossible task before he might wed Lúthien: to rob the greatest of all evil beings, Melkor, of a Silmaril.Painstakingly restored from Tolkien's manuscripts and presented for the first time as a continuous and standalone story, Beren and Lúthien reunites fans of The Hobbit and The Lord of the Rings with Elves and Men, along with the rich landscape and creatures unique to Tolkien's Middle-earth. Christopher Tolkien tells the story in his father's own words by giving its original form as well as prose and verse passages from later texts that illustrate the narrative as it changed. -- from back cover.",
isbn="1328915336 (paperback)",
issn=None,
n_title_name=None,
n_author=None,
n_publisher=None,
),
)
assert actual == expected
def test_error(self):
response = {
"Status": "ERROR",
"Message": "System error encountered. Please contact the administrator",
"ErrorMessage": "Wrong API authorization key.",
"TitleDetail": None,
}
actual = get_title_details_response_factory(response)
expected = GetTitleDetailsResponse(
status="ERROR",
message="System error encountered. Please contact the administrator",
error_message="Wrong API authorization key.",
title_detail=None,
)
assert actual == expected
class TestGetAvailabilityInfoResponseFactory:
def test_success(self):
response = {
"Status": "OK",
"Message": "Operation completed successfully",
"ErrorMessage": None,
"NextRecordPosition": 2,
"SetId": "3709",
"Items": {
"Item": [
{
"ItemNo": "B33315114J",
"BranchID": "AMKPL",
"BranchName": "Ang Mo Kio Public Library",
"LocationCode": "____",
"LocationDesc": "Adult Lending",
"CallNumber": "English TOL -[FN]",
"StatusCode": "S",
"StatusDesc": "Not On Loan",
"MediaCode": None,
"MediaDesc": "Book",
"StatusDate": "06/09/2018",
"DueDate": None,
"ClusterName": None,
"CategoryName": None,
"CollectionCode": None,
"CollectionMinAgeLimit": None,
},
{
"ItemNo": "B33315118C",
"BranchID": "BBPL",
"BranchName": "Bukit Batok Public Library",
"LocationCode": "____",
"LocationDesc": "Adult Lending",
"CallNumber": "English TOL -[FN]",
"StatusCode": "C",
"StatusDesc": "On Loan",
"MediaCode": None,
"MediaDesc": "Book",
"StatusDate": "08/11/2018",
"DueDate": "08/11/2018",
"ClusterName": None,
"CategoryName": None,
"CollectionCode": None,
"CollectionMinAgeLimit": None,
},
]
},
}
actual = get_availability_info_response_factory(response)
expected = GetAvailabilityInfoResponse(
status="OK",
message="Operation completed successfully",
error_message=None,
next_record_position=2,
set_id="3709",
items=tuple(
[
Item(
item_no="B33315114J",
branch_id="AMKPL",
branch_name="Ang Mo Kio Public Library",
location_code="____",
location_desc="Adult Lending",
call_number="English TOL -[FN]",
status_code="S",
status_desc="Not On Loan",
media_code=None,
media_desc="Book",
status_date="06/09/2018",
due_date=None,
cluster_name=None,
category_name=None,
collection_code=None,
collection_min_age_limit=None,
available=True,
),
Item(
item_no="B33315118C",
branch_id="BBPL",
branch_name="Bukit Batok Public Library",
location_code="____",
location_desc="Adult Lending",
call_number="English TOL -[FN]",
status_code="C",
status_desc="On Loan",
media_code=None,
media_desc="Book",
status_date="08/11/2018",
due_date="08/11/2018",
cluster_name=None,
category_name=None,
collection_code=None,
collection_min_age_limit=None,
available=False,
),
]
),
)
assert actual == expected
def test_error(self):
response = {
"Status": "ERROR",
"Message": "System error encountered. Please contact the administrator",
"ErrorMessage": "Wrong API authorization key.",
"NextRecordPosition": None,
"SetId": None,
"Items": None,
}
actual = get_availability_info_response_factory(response)
expected = GetAvailabilityInfoResponse(
status="ERROR",
message="System error encountered. Please contact the administrator",
error_message="Wrong API authorization key.",
next_record_position=None,
set_id=None,
items=None,
)
assert actual == expected
| 52.620805
| 1,036
| 0.536127
| 1,554
| 15,681
| 5.33269
| 0.187259
| 0.004344
| 0.006516
| 0.012067
| 0.850006
| 0.833353
| 0.807771
| 0.790153
| 0.75902
| 0.738385
| 0
| 0.030772
| 0.382437
| 15,681
| 297
| 1,037
| 52.79798
| 0.824349
| 0
| 0
| 0.527778
| 0
| 0.034722
| 0.468975
| 0.00829
| 0
| 0
| 0
| 0
| 0.020833
| 1
| 0.020833
| false
| 0.013889
| 0.006944
| 0
| 0.038194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ecbc05a30819222d6d0c8d51ca7d546d863054b2
| 88
|
py
|
Python
|
data/path.py
|
thekindler/object-detection-training-pipeline
|
c28ad996ebcb06f636c4e255419ccc92b07fbc37
|
[
"MIT"
] | null | null | null |
data/path.py
|
thekindler/object-detection-training-pipeline
|
c28ad996ebcb06f636c4e255419ccc92b07fbc37
|
[
"MIT"
] | null | null | null |
data/path.py
|
thekindler/object-detection-training-pipeline
|
c28ad996ebcb06f636c4e255419ccc92b07fbc37
|
[
"MIT"
] | null | null | null |
from os.path import dirname, realpath
def get():
return dirname(realpath(__file__))
| 22
| 38
| 0.75
| 12
| 88
| 5.166667
| 0.833333
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147727
| 88
| 4
| 38
| 22
| 0.826667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
bf22c3c010e6a96e20cbe61a8c19be91a00cf996
| 8,835
|
py
|
Python
|
u24_lymphocyte/third_party/treeano/nodes/stochastic.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 45
|
2015-04-26T04:45:51.000Z
|
2022-01-24T15:03:55.000Z
|
u24_lymphocyte/third_party/treeano/nodes/stochastic.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 8
|
2018-07-20T20:54:51.000Z
|
2020-06-12T05:36:04.000Z
|
u24_lymphocyte/third_party/treeano/nodes/stochastic.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 22
|
2018-05-21T23:57:20.000Z
|
2022-02-21T00:48:32.000Z
|
import warnings
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from .. import core
from .. import utils
fX = theano.config.floatX
# TODO: Refactor to extract shared logic from these nodes.
@core.register_node("dropout")
class DropoutNode(core.NodeImpl):
"""
node that drops out random units
"""
hyperparameter_names = ("dropout_probability",
"probability",
"p",
"deterministic")
def compute_output(self, network, in_vw):
deterministic = network.find_hyperparameter(["deterministic"])
p = network.find_hyperparameter(["dropout_probability",
"probability",
"p"],
0)
if deterministic or p == 0:
network.copy_vw(
name="default",
previous_vw=in_vw,
tags={"output"},
)
else:
rescale_factor = 1 / (1 - p)
mask_shape = in_vw.shape
if any(s is None for s in mask_shape):
# NOTE: this uses symbolic shape - can be an issue with
# theano.clone and random numbers
# https://groups.google.com/forum/#!topic/theano-users/P7Mv7Fg0kUs
warnings.warn("using symbolic shape for dropout mask, "
"which can be an issue with theano.clone")
mask_shape = in_vw.variable.shape
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
# set bernoulli probability to be inverse of dropout probability
# because 1 means to keep the unit
bernoulli_prob = 1 - p
mask = rescale_factor * srng.binomial(mask_shape,
p=bernoulli_prob,
dtype=fX)
network.create_vw(
"default",
variable=in_vw.variable * mask,
shape=in_vw.shape,
tags={"output"},
)
@core.register_node("gaussian_dropout")
class GaussianDropoutNode(core.NodeImpl):
"""
node that adds gaussian noise to units
"""
hyperparameter_names = ("sigma",
"dropout_probability",
"probability",
"p",
"deterministic")
def compute_output(self, network, in_vw):
deterministic = network.find_hyperparameter(["deterministic"])
sigma = network.find_hyperparameter(["sigma"], None)
if sigma is None:
p = network.find_hyperparameter(["dropout_probability",
"probability",
"p"],
0)
if p == 0:
sigma = 0
else:
# derive gaussian dropout variance from bernoulli dropout
# probability
sigma = T.sqrt(p / (1 - p))
if deterministic or sigma == 0:
network.copy_vw(
name="default",
previous_vw=in_vw,
tags={"output"},
)
else:
mask_shape = in_vw.shape
if any(s is None for s in mask_shape):
# NOTE: this uses symbolic shape - can be an issue with
# theano.clone and random numbers
# https://groups.google.com/forum/#!topic/theano-users/P7Mv7Fg0kUs
warnings.warn("using symbolic shape for dropout mask, "
"which can be an issue with theano.clone")
mask_shape = in_vw.variable.shape
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
mask = srng.normal(mask_shape, avg=1.0, std=sigma, dtype=fX)
network.create_vw(
"default",
variable=in_vw.variable * mask,
shape=in_vw.shape,
tags={"output"},
)
@core.register_node("spatial_dropout")
class SpatialDropoutNode(core.NodeImpl):
"""
node that drops out random filters
Each filter is either on or off.
"""
hyperparameter_names = ("dropout_probability",
"probability",
"p",
"deterministic")
def compute_output(self, network, in_vw):
deterministic = network.find_hyperparameter(["deterministic"])
p = network.find_hyperparameter(["dropout_probability",
"probability",
"p"],
0)
if deterministic or p == 0:
network.copy_vw(
name="default",
previous_vw=in_vw,
tags={"output"},
)
else:
rescale_factor = 1 / (1 - p)
mask_shape = in_vw.shape
if any(s is None for s in mask_shape):
# NOTE: this uses symbolic shape - can be an issue with
# theano.clone and random numbers
# https://groups.google.com/forum/#!topic/theano-users/P7Mv7Fg0kUs
warnings.warn("using symbolic shape for dropout mask, "
"which can be an issue with theano.clone")
mask_shape = in_vw.symbolic_shape()
# FIXME generalize to other shape dimensions.
# assume this is of the form bc01 (batch, channel, width, height)
mask_shape = mask_shape[:2]
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
# set bernoulli probability to be inverse of dropout probability
# because 1 means to keep the unit
bernoulli_prob = 1 - p
mask = rescale_factor * srng.binomial(mask_shape,
p=bernoulli_prob,
dtype=fX)
mask = mask.dimshuffle(0, 1, 'x', 'x')
network.create_vw(
"default",
variable=in_vw.variable * mask,
shape=in_vw.shape,
tags={"output"},
)
@core.register_node("gaussian_spatial_dropout")
class GaussianSpatialDropoutNode(core.NodeImpl):
"""
node that adds gaussian noise to each filters
"""
hyperparameter_names = ("sigma",
"dropout_probability",
"probability",
"p",
"deterministic")
def compute_output(self, network, in_vw):
deterministic = network.find_hyperparameter(["deterministic"])
sigma = network.find_hyperparameter(["sigma"], None)
if sigma is None:
p = network.find_hyperparameter(["dropout_probability",
"probability",
"p"],
0)
if p == 0:
sigma = 0
else:
# derive gaussian dropout variance from bernoulli dropout
# probability
sigma = T.sqrt(p / (1 - p))
if deterministic or sigma == 0:
network.copy_vw(
name="default",
previous_vw=in_vw,
tags={"output"},
)
else:
mask_shape = in_vw.shape
if any(s is None for s in mask_shape):
# NOTE: this uses symbolic shape - can be an issue with
# theano.clone and random numbers
# https://groups.google.com/forum/#!topic/theano-users/P7Mv7Fg0kUs
warnings.warn("using symbolic shape for dropout mask, "
"which can be an issue with theano.clone")
mask_shape = in_vw.symbolic_shape()
# FIXME generalize to other shape dimensions.
# assume this is of the form bc01 (batch, channel, width, height)
mask_shape = mask_shape[:2]
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
mask = srng.normal(mask_shape, avg=1.0, std=sigma, dtype=fX)
mask = mask.dimshuffle(0, 1, 'x', 'x')
network.create_vw(
"default",
variable=in_vw.variable * mask,
shape=in_vw.shape,
tags={"output"},
)
| 38.75
| 82
| 0.49236
| 864
| 8,835
| 4.912037
| 0.166667
| 0.02262
| 0.031103
| 0.036758
| 0.907634
| 0.907634
| 0.907634
| 0.891612
| 0.873233
| 0.873233
| 0
| 0.009475
| 0.426599
| 8,835
| 227
| 83
| 38.920705
| 0.828267
| 0.179287
| 0
| 0.857143
| 0
| 0
| 0.119374
| 0.003355
| 0
| 0
| 0
| 0.022026
| 0
| 1
| 0.024845
| false
| 0
| 0.037267
| 0
| 0.111801
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
da0ed89b786efe035ec359e7d943271d4244203d
| 22,340
|
py
|
Python
|
nbexchange/tests/test_handlers_collection.py
|
edina/nbexchange
|
dd82fa0fe1f3360c5bc2d051456b3c9e09665dd9
|
[
"BSD-3-Clause"
] | 7
|
2020-04-30T20:16:18.000Z
|
2021-09-11T20:31:51.000Z
|
nbexchange/tests/test_handlers_collection.py
|
edina/nbexchange
|
dd82fa0fe1f3360c5bc2d051456b3c9e09665dd9
|
[
"BSD-3-Clause"
] | 86
|
2020-03-06T15:34:55.000Z
|
2022-03-07T11:58:06.000Z
|
nbexchange/tests/test_handlers_collection.py
|
edina/nbexchange
|
dd82fa0fe1f3360c5bc2d051456b3c9e09665dd9
|
[
"BSD-3-Clause"
] | 1
|
2020-07-25T23:04:51.000Z
|
2020-07-25T23:04:51.000Z
|
import logging
import re
import sys
import pytest
from mock import patch
from nbexchange.handlers.base import BaseHandler
from nbexchange.tests.utils import (
async_requests,
clear_database,
get_files_dict,
user_brobbere_student,
user_kiz,
user_kiz_instructor,
user_kiz_student,
)
logger = logging.getLogger(__file__)
logger.setLevel(logging.ERROR)
# set up the file to be uploaded as part of the testing later
files = get_files_dict(sys.argv[0]) # ourself :)
##### POST /collection #####
# No method available (501, because we've hard-coded it)
@pytest.mark.gen_test
def test_post_collection_is_501(app):
r = yield async_requests.post(app.url + "/collection")
assert r.status_code == 501
# subscribed user makes no difference (501, because we've hard-coded it)
@pytest.mark.gen_test
def test_post_collection_is_501_even_authenticaated(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(app.url + "/collection?course_id=course_2")
assert r.status_code == 501
##### GET /collection (download/collect student submissions) #####
# require authenticated user
@pytest.mark.gen_test
def test_get_collection_requires_authentication(app, clear_database):
r = yield async_requests.get(app.url + "/collection")
assert r.status_code == 403
# Requires three params (none)
@pytest.mark.gen_test
def test_get_collection_requires_parameters(app):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.get(app.url + "/collection")
assert r.status_code == 200
response_data = r.json()
assert response_data["success"] == False
assert (
response_data["note"]
== "Collection call requires a course code, an assignment code, and a path"
)
# Requires three params (given course & assignment)
# (needs to be submitted before it can listed for collection )
# (needs to be fetched before it can be submitted )
# (needs to be released before it can be fetched )
@pytest.mark.gen_test
def test_get_collection_catches_missing_path(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.get(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a"
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
collected_data = None
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
) ## Get the data we need to make test the call we want to make
response_data = r.json()
collected_data = response_data["value"][0]
r = yield async_requests.get(
app.url
+ f"/collection?course_id={collected_data['course_id']}&assignment_id={collected_data['assignment_id']}"
)
assert r.status_code == 200
response_data = r.json()
assert response_data["success"] == False
assert (
response_data["note"]
== "Collection call requires a course code, an assignment code, and a path"
)
# Requires three params (given course & path)
# (needs to be submitted before it can listed for collection )
# (needs to be fetched before it can be submitted )
# (needs to be released before it can be fetched )
@pytest.mark.gen_test
def test_get_collection_catches_missing_assignment(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.get(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a"
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
collected_data = None
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
) ## Get the data we need to make test the call we want to make
response_data = r.json()
collected_data = response_data["value"][0]
r = yield async_requests.get(
app.url
+ f"/collection?course_id={collected_data['course_id']}&path={collected_data['path']}"
)
assert r.status_code == 200
response_data = r.json()
assert response_data["success"] == False
assert (
response_data["note"]
== "Collection call requires a course code, an assignment code, and a path"
)
# Requires three params (given assignment & path)
# (needs to be submitted before it can listed for collection )
# (needs to be fetched before it can be submitted )
# (needs to be released before it can be fetched )
@pytest.mark.gen_test
def test_get_collection_catches_missing_course(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.get(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a"
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
collected_data = None
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
) ## Get the data we need to make test the call we want to make
response_data = r.json()
collected_data = response_data["value"][0]
r = yield async_requests.get(
app.url
+ f"/collection?path={collected_data['path']}&assignment_id={collected_data['assignment_id']}"
)
assert r.status_code == 200
response_data = r.json()
assert response_data["success"] == False
assert (
response_data["note"]
== "Collection call requires a course code, an assignment code, and a path"
)
# Has all three params, not subscribed to course
# (needs to be submitted before it can listed for collection )
# (needs to be fetched before it can be submitted )
# (needs to be released before it can be fetched )
@pytest.mark.gen_test
def test_get_collection_checks_for_user_subscription(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.get(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a"
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
collected_data = None
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
) ## Get the data we need to make test the call we want to make
response_data = r.json()
collected_data = response_data["value"][0]
r = yield async_requests.get(
app.url
+ f"/collection?course_id=course_1&path={collected_data['path']}&assignment_id={collected_data['assignment_id']}"
)
assert r.status_code == 200
response_data = r.json()
assert response_data["success"] == False
assert response_data["note"] == "User not subscribed to course course_1"
# Has all three params, student can't collect (note this is hard-coded params, as students can list items available for collection)
# (needs to be released to register the assignment )
@pytest.mark.gen_test
def test_get_collection_check_catches_student_role(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_brobbere_student
):
r = yield async_requests.get(
app.url
+ f"/collection?course_id=course_2&path=/foo/car/file.gz&assignment_id=assign_a"
)
assert r.status_code == 200
response_data = r.json()
assert response_data["success"] == False
assert response_data["note"] == f"User not an instructor to course course_2"
# Has all three params, instructor can collect
# (needs to be submitted before it can listed for collection )
# (needs to be fetched before it can be submitted )
# (needs to be released before it can be fetched )
@pytest.mark.gen_test
def test_get_collection_confirm_instructor_does_download(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.get(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a"
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
collected_data = None
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
) ## Get the data we need to make test the call we want to make
response_data = r.json()
collected_data = response_data["value"][0]
r = yield async_requests.get(
app.url
+ f"/collection?course_id={collected_data['course_id']}&path={collected_data['path']}&assignment_id={collected_data['assignment_id']}"
)
assert r.status_code == 200
assert r.headers["Content-Type"] == "application/gzip"
assert int(r.headers["Content-Length"]) > 0
# broken nbex_user throws a 500 error on the server
# (needs to be submitted before it can listed for collection )
# (needs to be fetched before it can be submitted )
# (needs to be released before it can be fetched )
@pytest.mark.gen_test
def test_get_collection_broken_nbex_user(app, clear_database, caplog):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.get(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a"
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
collected_data = None
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
) ## Get the data we need to make test the call we want to make
response_data = r.json()
collected_data = response_data["value"][0]
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz):
r = yield async_requests.get(
app.url
+ f"/collection?course_id={collected_data['course_id']}&path={collected_data['path']}&assignment_id={collected_data['assignment_id']}"
)
assert r.status_code == 500
assert (
"Both current_course ('None') and current_role ('None') must have values. User was '1-kiz'"
in caplog.text
)
# Confirm that multiple submissions are listed
@pytest.mark.gen_test
async def test_collection_actions_show_correctly(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = await async_requests.post( # release
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
r = await async_requests.get( # fetch
app.url + "/assignment?&course_id=course_2&assignment_id=assign_a"
)
r = await async_requests.post( # submit
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
r = await async_requests.post( # submit
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_brobbere_student
):
r = yield async_requests.get(
app.url + "/assignment?&course_id=course_2&assignment_id=assign_a"
) # fetch as another user
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
) # submit as that user
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
) # submit as that user again
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
)
# The 'collections' call returns only submissions, but all for that assignment
assert r.status_code == 200
response_data = r.json()
assert response_data["success"] == True
assert "note" not in response_data # just that it's missing
paths = list(map(lambda assignment: assignment["path"], response_data["value"]))
assert len(paths) == 4 # the collections call only returns submitted items
# path in submission contains org + course + assignment + user
assert re.search("1/submitted/course_2/assign_a/1-kiz", paths[0])
assert re.search("1/submitted/course_2/assign_a/1-kiz", paths[1])
assert re.search("1/submitted/course_2/assign_a/1-brobbere", paths[2])
assert re.search("1/submitted/course_2/assign_a/1-brobbere", paths[3])
collected_items = response_data["value"]
for collected_data in collected_items:
r = yield async_requests.get( # collect submission
app.url
+ f"/collection?course_id={collected_data['course_id']}&path={collected_data['path']}&assignment_id={collected_data['assignment_id']}"
)
r = yield async_requests.get(app.url + "/assignments?course_id=course_2")
# The 'assignments' call returns only the actions for the specific user
# So this instructor has submitted twice, but collected 4 times
assert r.status_code == 200
response_data = r.json()
assert response_data["success"] == True
assert "note" not in response_data # just that it's missing
paths = list(map(lambda assignment: assignment["path"], response_data["value"]))
actions = list(
map(lambda assignment: assignment["status"], response_data["value"])
)
assert len(paths) == 8
assert actions == [
"released",
"fetched",
"submitted",
"submitted",
"collected",
"collected",
"collected",
"collected",
]
assert paths[2] == paths[4] # 1st submit = 1st collect
assert paths[3] == paths[5] # 2nd submit = 2nd collect
# As a different user, we get a different return
with patch.object(
BaseHandler, "get_current_user", return_value=user_brobbere_student
):
r = yield async_requests.get(app.url + "/assignments?course_id=course_2")
response_data = r.json()
actions = list(
map(lambda assignment: assignment["status"], response_data["value"])
)
assert len(actions) == 4
assert actions == ["released", "fetched", "submitted", "submitted"]
# what happens the path doesn't match
# (needs to be submitted before it can listed for collection )
# (needs to be fetched before it can be submitted )
# (needs to be released before it can be fetched )
@pytest.mark.gen_test
def test_get_collection_path_is_incorrect(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.get(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a"
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
collected_data = None
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
) ## Get the data we need to make test the call we want to make
response_data = r.json()
collected_data = response_data["value"][0]
r = yield async_requests.get(
app.url
+ f"/collection?course_id={collected_data['course_id']}&path=/some/random/path&assignment_id={collected_data['assignment_id']}"
)
assert r.status_code == 200
assert r.headers["Content-Type"] == "application/gzip"
assert int(r.headers["Content-Length"]) == 0
# what happens when collections returns a fetched_feedback with an empty path
# (needs to be submitted before it can listed for collection )
# (needs to be fetched before it can be submitted )
# (needs to be released before it can be fetched )
@pytest.mark.gen_test
def test_get_collection_with_a_blank_feedback_path_injected(app, clear_database):
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
r = yield async_requests.post(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a",
files=files,
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.get(
app.url + "/assignment?course_id=course_2&assignment_id=assign_a"
)
with patch.object(BaseHandler, "get_current_user", return_value=user_kiz_student):
r = yield async_requests.post(
app.url + "/submission?course_id=course_2&assignment_id=assign_a",
files=files,
)
# Now manually inject a `feedback_fetched` action
import nbexchange.models.actions
from nbexchange.database import scoped_session
with scoped_session() as session:
action = nbexchange.models.actions.Action(
user_id=3,
assignment_id="assign_a",
action="feedback_fetched",
location=None,
)
session.add(action)
with patch.object(
BaseHandler, "get_current_user", return_value=user_kiz_instructor
):
collected_data = None
r = yield async_requests.get(
app.url + "/collections?course_id=course_2&assignment_id=assign_a"
) ## Get the data we need to make test the call we want to make
response_data = r.json()
collected_data = response_data["value"][0]
r = yield async_requests.get(
app.url
+ f"/collection?course_id={collected_data['course_id']}&path={collected_data['path']}&assignment_id={collected_data['assignment_id']}"
)
assert r.status_code == 200
assert r.headers["Content-Type"] == "application/gzip"
assert int(r.headers["Content-Length"]) > 0
| 40.990826
| 150
| 0.666294
| 2,910
| 22,340
| 4.872509
| 0.076976
| 0.033853
| 0.041117
| 0.071021
| 0.844841
| 0.837436
| 0.829607
| 0.828408
| 0.822343
| 0.815995
| 0
| 0.00877
| 0.234378
| 22,340
| 544
| 151
| 41.066176
| 0.820217
| 0.148881
| 0
| 0.705752
| 0
| 0.013274
| 0.268436
| 0.185865
| 0
| 0
| 0
| 0
| 0.108407
| 1
| 0.028761
| false
| 0
| 0.019912
| 0
| 0.048673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
da143de4e53b1946a40ffeb9a36f92178c9b193a
| 129,711
|
py
|
Python
|
nb/lentil/grad.py
|
AurelienNioche/leitnerq
|
340744dcb00098cce018043eeb02dc181bf9425f
|
[
"Apache-2.0"
] | 12
|
2016-03-21T03:00:21.000Z
|
2021-07-04T13:01:02.000Z
|
lentil/grad.py
|
wpmarinho/lentil
|
957227de8c48b6c3c5dfd754da30a259b7409084
|
[
"Apache-2.0"
] | 1
|
2017-03-27T09:25:04.000Z
|
2017-03-27T09:25:04.000Z
|
lentil/grad.py
|
wpmarinho/lentil
|
957227de8c48b6c3c5dfd754da30a259b7409084
|
[
"Apache-2.0"
] | 7
|
2015-09-09T21:54:34.000Z
|
2017-11-20T20:00:42.000Z
|
"""
Module for gradients of the cost function in parameter estimation
See https://www.dropbox.com/s/k9qlgn0pd6wtqlw/LSE_Gradient.pdf?dl=0
for a bunch of equations that summarize most of the functions below
@author Siddharth Reddy <sgr45@cornell.edu>
"""
from __future__ import division
import logging
import numpy as np
from . import models
_logger = logging.getLogger(__name__)
def without_scipy_without_lessons(
assessment_interactions,
lesson_interactions,
learning_update_variance,
forgetting_penalty_terms,
regularization_constant,
graph_regularization_constant,
student_participation_in_assessment_ixns,
student_bias_participation_in_assessment_ixns,
assessment_participation_in_assessment_ixns,
curr_student_participation_in_lesson_ixns,
prev_student_participation_in_lesson_ixns,
lesson_participation_in_lesson_ixns,
assessment_participation_in_concepts,
lesson_participation_in_concepts,
concept_participation_in_assessments,
concept_participation_in_lessons,
prereq_edge_concept_idxes,
concept_participation_in_prereq_edges,
last_student_embedding_idx,
last_assessment_embedding_idx,
last_lesson_embedding_idx,
last_prereq_embedding_idx,
last_student_bias_idx,
last_assessment_bias_idx,
num_timesteps,
using_bias,
using_graph_prior,
using_l1_regularizer):
"""
Setup a function that will compute gradients and evaluate the cost function
at supplied parameter values, for an embedding model without lessons and
a parameter estimation routine that uses gradient descent for optimization
:param (np.array,np.array,np.array) assessment_interactions:
For each assessment interaction, (student_idx, assessment_idx, outcome),
where outcome is -1 or 1
:param (np.array,np.array,np.array) lesson_interactions:
For each lesson interaction, (student_idx, lesson_idx, time_since_previous_interaction)
:param np.array|float learning_update_variance:
Variance of the Gaussian learning update. If float, then the variance
is constant across all interactions. If np.array, then the variance is
different for each lesson interaction.
:param np.array|float forgetting_penalty_terms:
Penalty term for the forgetting effect in the Gaussian learning update.
If float, then the penalty term is constant across all interactions. If
np.array, then the penalty is different for each lesson interaction.
:param (float,float,float,float,float) regularization_constant:
Coefficients of the regularization terms for (students, assessments,
lessons, prereqs, concepts)
:param float graph_regularization_constant:
Coefficient of the graph regularization term
:param scipy.sparse.csr_matrix student_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of assessment interactions] where a non-zero entry indicates that the student at a
specific timestep participated in the assessment interaction
:param scipy.sparse.csr_matrix student_bias_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students] X
[number of assessment interactions] where a non-zero entry indicates that the student
participated in the assessment interaction
:param scipy.sparse.csr_matrix assessment_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique assessments] X
[number of assessment interactions] where a non-zero entry indicates that the assessment
participated in the assessment interaction
:param scipy.sparse.csr_matrix curr_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the post-update student state for the lesson interaction
:param scipy.sparse.csr_matrix prev_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the pre-update student state for the lesson interaction
:param scipy.sparse.csr_matrix lesson_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique lessons] X [number of lesson interactions]
where a non-zero entry indicates that the lesson participated in the lesson interaction
:param scipy.sparse.csr_matrix assessment_participation_in_concepts:
A binary matrix of dimensions [number of unique assessments] X [number of unique concepts],
where an entry indicates assessment-concept association. Concept associations for a given
assessment sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix lesson_participation_in_concepts:
A binary matrix of dimensions [number of unique lessons] X [number of unique concepts],
where an entry indicates lesson-concept association. Concept associations for a given
lesson sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix concept_participation_in_assessments:
The transpose of assessment_participation_in_concepts
:param scipy.sparse.csr_matrix concept_participation_in_lessons:
The transpose of lesson_participation_in_lessons
:param (np.array,np.array) prereq_edge_concept_idxes:
(Indices of prereq concepts, Indices of postreq concepts)
:param (scipy.sparse.csr_matrix,scipy.sparse.csr_matrix) concept_participation_in_prereq_edges:
The first binary matrix has dimensions [number of unique concepts] X
[number of prereq edges], where a non-zero entry indicates that the concept is the prereq
in the edge.
The second binary matrix has the same dimensions,
where a non-zero entry indicates that the concept is the postreq in the edge.
:param int last_student_embedding_idx:
Index of the last student embedding parameter in the flattened gradient
:param int last_assessment_embedding_idx:
Index of the last assessment embedding parameter in the flattened gradient
:param int last_lesson_embedding_idx:
Index of the last lesson embedding parameter in the flattened gradient
:param int last_prereq_embedding_idx:
Index of the last prereq embedding parameter in the flattened gradient
:param int last_student_bias_idx:
Index of the last student bias parameter in the flattened gradient
:param int last_assessment_bias_idx:
Index of the last assessment bias parameter in the flattened gradient
:param int num_timesteps:
Maximum number of timesteps in a student history, i.e.,
the output of InteractionHistory.duration()
:param bool using_bias:
Including bias terms in the assessment result likelihood
:param bool using_graph_prior:
Including the graph regularization term
:param bool using_l1_regularizer:
True => use L1 regularization on lesson and assessment embeddings
False => use L2 regularization on lesson and assessment embeddings
:rtype: function
:return:
A function that computes gradients and evaluates the cost function
at supplied parameter values. See the docstring below for my_grads
for further details.
"""
# pull regularization constants for different parameters out of tuple
(
student_regularization_constant,
assessment_regularization_constant,
lesson_regularization_constant,
prereq_regularization_constant,
concept_regularization_constant) = regularization_constant
def my_grads(param_vals):
"""
Compute the gradient of the cost function with respect to model parameters
:param dict[str,np.ndarray] param_vals:
A dictionary mapping a parameter's name to its current value
:rtype: (dict[str,np.ndarray], float)
:return:
A dictionary mapping a parameter's name to the gradient
of the cost function with respect to that parameter
(evaluated at the supplied parameter values), and the value of the cost function
(evaluated at the supplied parameter values)
"""
# pull parameters from param_vals into separate variables
student_embeddings = param_vals[models.STUDENT_EMBEDDINGS]
assessment_embeddings = param_vals[models.ASSESSMENT_EMBEDDINGS]
if using_graph_prior:
concept_embeddings = param_vals[models.CONCEPT_EMBEDDINGS]
# split assessment interactions into students, assessments, outcomes
(
student_idxes_for_assessment_ixns,
assessment_idxes_for_assessment_ixns,
outcomes_for_assessment_ixns) = assessment_interactions
# use dummy lesson interactions to get students in temporal process
student_idxes_for_temporal_process, _, _ = lesson_interactions
# get biases for assessment interactions
if using_bias:
student_biases = param_vals[models.STUDENT_BIASES][\
student_idxes_for_assessment_ixns // num_timesteps][:, None]
assessment_biases = param_vals[models.ASSESSMENT_BIASES][\
assessment_idxes_for_assessment_ixns][:, None]
else:
student_biases = assessment_biases = 0
# shape outcomes as a column vector
outcomes = outcomes_for_assessment_ixns[:, None]
# get the assessment embedding for each assessment interaction
assessment_embeddings_for_assessment_ixns = \
assessment_embeddings[assessment_idxes_for_assessment_ixns, :]
# compute the L2 norm of the assessment embedding for each assessment interaction
assessment_embedding_norms_for_assessment_ixns = np.linalg.norm(
assessment_embeddings_for_assessment_ixns, axis=1)[:, None]
# get the student embedding for each assessment interaction
student_embeddings_for_assessment_ixns = \
student_embeddings[student_idxes_for_assessment_ixns, :]
# compute the dot product of the student embedding
# and assessment embedding for each interaction
student_dot_assessment = np.einsum(
'ij, ij->i',
student_embeddings_for_assessment_ixns,
assessment_embeddings_for_assessment_ixns)[:, None]
# compute intermediate quantities for the gradient that get reused
exp_diff = np.exp(outcomes * (
assessment_embedding_norms_for_assessment_ixns - student_dot_assessment / \
assessment_embedding_norms_for_assessment_ixns - student_biases - \
assessment_biases))
one_plus_exp_diff = 1 + exp_diff
mult_diff = outcomes * exp_diff / one_plus_exp_diff
using_temporal_process = len(student_idxes_for_temporal_process) > 0
if using_temporal_process:
# get embeddings of student states resulting from lesson interactions
curr_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_temporal_process, :]
# get embeddings of student states prior to lesson interactions
prev_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_temporal_process - 1, :]
# compute intermediate quantities for the gradient that get reused
diffs = curr_student_embeddings_for_lesson_ixns - \
prev_student_embeddings_for_lesson_ixns + forgetting_penalty_terms
diffs_over_var = diffs / learning_update_variance
else:
diffs = diffs_over_var = 0
# compute intermediate quantities for graph regularization terms in the gradient
if using_graph_prior:
# get distance from an assessment embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the assessment's
# governing concepts
assessment_diffs_from_concept_centers = assessment_embeddings - \
assessment_participation_in_concepts.dot(concept_embeddings)
# grab the concept dependency graph
prereq_concept_idxes, postreq_concept_idxes = prereq_edge_concept_idxes
concept_participation_in_prereqs, concept_participation_in_postreqs = \
concept_participation_in_prereq_edges
# get prereq and postreq concept embeddings
prereq_concept_embeddings = concept_embeddings[prereq_concept_idxes, :]
postreq_concept_embeddings = concept_embeddings[postreq_concept_idxes, :]
# compute column vector of L2 norms for postreq concept embeddings
postreq_concept_norms = np.linalg.norm(postreq_concept_embeddings, axis=1)[:, None]
# compute the dot product of the prereq concept embedding
# and postreq concept embedding for each edge in the concept dependency graph
prereq_dot_postreq = np.einsum(
'ij, ij->i',
prereq_concept_embeddings,
postreq_concept_embeddings)[:, None]
# intermediate quantity, useful and reusable later
prereq_edge_diffs = prereq_dot_postreq / postreq_concept_norms - postreq_concept_norms
# compute the gradient w.r.t. student embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
stud_grad_from_asmt_ixns = -student_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * \
assessment_embeddings_for_assessment_ixns)
stud_grad_from_norm_regularization = 2 * student_regularization_constant * \
student_embeddings
if using_temporal_process:
stud_grad_from_temporal_process = curr_student_participation_in_lesson_ixns.dot(
diffs_over_var)
else:
stud_grad_from_temporal_process = 0
gradient_wrt_student_embedding = stud_grad_from_asmt_ixns + \
stud_grad_from_norm_regularization + + stud_grad_from_temporal_process
# compute the gradient w.r.t. assessment embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
asmt_grad_from_asmt_ixns = -assessment_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * (
student_embeddings_for_assessment_ixns - \
assessment_embeddings_for_assessment_ixns - \
student_dot_assessment / np.einsum(
'ij, ij->ij',
assessment_embedding_norms_for_assessment_ixns,
assessment_embedding_norms_for_assessment_ixns) * \
assessment_embeddings_for_assessment_ixns))
if using_l1_regularizer:
asmt_grad_from_norm_regularization = assessment_regularization_constant * np.sign(
assessment_embeddings)
else:
asmt_grad_from_norm_regularization = 2 * assessment_regularization_constant * \
assessment_embeddings
if using_graph_prior:
asmt_grad_from_graph_regularization = 2 * graph_regularization_constant * \
assessment_diffs_from_concept_centers
else:
asmt_grad_from_graph_regularization = 0
gradient_wrt_assessment_embedding = asmt_grad_from_asmt_ixns + \
asmt_grad_from_norm_regularization + asmt_grad_from_graph_regularization
# compute the gradient w.r.t. student biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient_wrt_student_biases = \
-student_bias_participation_in_assessment_ixns.dot(mult_diff)[:,0]
# compute the gradient w.r.t. assessment biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient_wrt_assessment_biases = \
-assessment_participation_in_assessment_ixns.dot(mult_diff)[:,0]
if using_graph_prior:
# compute the gradient w.r.t. concept embeddings,
# which is the sum of gradient of the log-likelihood
# of assessment and lesson interactions and the gradient
# of the regularization terms
concept_grad_from_assessments = -concept_participation_in_assessments.dot(
2 * assessment_diffs_from_concept_centers)
concept_grad_from_prereqs = concept_participation_in_prereqs.dot(
postreq_concept_embeddings / postreq_concept_norms)
concept_grad_from_postreqs = concept_participation_in_postreqs.dot(
(prereq_concept_embeddings - 2 * postreq_concept_embeddings) / \
postreq_concept_norms + 2 * postreq_concept_embeddings * \
prereq_dot_postreq / postreq_concept_norms**3)
gradient_wrt_concept_embedding = graph_regularization_constant * (
concept_grad_from_assessments + concept_grad_from_prereqs + \
concept_grad_from_postreqs) + 2 * concept_regularization_constant * \
concept_embeddings
else:
gradient_wrt_concept_embedding = None
gradient = {
models.STUDENT_EMBEDDINGS : gradient_wrt_student_embedding,
models.ASSESSMENT_EMBEDDINGS : gradient_wrt_assessment_embedding,
models.STUDENT_BIASES : gradient_wrt_student_biases,
models.ASSESSMENT_BIASES : gradient_wrt_assessment_biases,
models.CONCEPT_EMBEDDINGS : gradient_wrt_concept_embedding
}
cost_from_assessment_ixns = np.einsum('ij->', np.log(one_plus_exp_diff))
if using_temporal_process:
cost_from_temporal_process = np.einsum(
'ij, ij', diffs, diffs) / (2 * learning_update_variance)
else:
cost_from_temporal_process = 0
cost_from_student_regularization = student_regularization_constant * np.einsum(
'ij, ij', student_embeddings, student_embeddings)
if using_l1_regularizer:
cost_from_assessment_regularization = assessment_regularization_constant * np.absolute(
assessment_embeddings).sum()
else:
cost_from_assessment_regularization = assessment_regularization_constant * np.einsum(
'ij, ij', assessment_embeddings, assessment_embeddings)
if using_graph_prior:
cost_from_concept_regularization = concept_regularization_constant * np.einsum(
'ij, ij', concept_embeddings, concept_embeddings)
cost_from_graph_regularization = graph_regularization_constant * (
(assessment_diffs_from_concept_centers**2).sum() + prereq_edge_diffs.sum())
else:
cost_from_concept_regularization = cost_from_graph_regularization = 0
cost_from_regularization = cost_from_student_regularization + \
cost_from_assessment_regularization + cost_from_concept_regularization + \
cost_from_graph_regularization
cost = cost_from_assessment_ixns + cost_from_temporal_process + cost_from_regularization
return gradient, cost
return my_grads
def without_scipy_with_prereqs(
assessment_interactions,
lesson_interactions,
learning_update_variance,
forgetting_penalty_terms,
regularization_constant,
graph_regularization_constant,
student_participation_in_assessment_ixns,
student_bias_participation_in_assessment_ixns,
assessment_participation_in_assessment_ixns,
curr_student_participation_in_lesson_ixns,
prev_student_participation_in_lesson_ixns,
lesson_participation_in_lesson_ixns,
assessment_participation_in_concepts,
lesson_participation_in_concepts,
concept_participation_in_assessments,
concept_participation_in_lessons,
prereq_edge_concept_idxes,
concept_participation_in_prereq_edges,
last_student_embedding_idx,
last_assessment_embedding_idx,
last_lesson_embedding_idx,
last_prereq_embedding_idx,
last_student_bias_idx,
last_assessment_bias_idx,
num_timesteps,
using_bias,
using_graph_prior,
using_l1_regularizer):
"""
Setup a function that will compute gradients and evaluate the cost function
at supplied parameter values, for a full embedding model and
a parameter estimation routine that uses gradient descent for optimization
:param (np.array,np.array,np.array) assessment_interactions:
For each assessment interaction, (student_idx, assessment_idx, outcome),
where outcome is -1 or 1
:param (np.array,np.array,np.array) lesson_interactions:
For each lesson interaction, (student_idx, lesson_idx, time_since_previous_interaction)
:param np.array|float learning_update_variance:
Variance of the Gaussian learning update. If float, then the variance
is constant across all interactions. If np.array, then the variance is
different for each lesson interaction.
:param np.array|float forgetting_penalty_terms:
Penalty term for the forgetting effect in the Gaussian learning update.
If float, then the penalty term is constant across all interactions. If
np.array, then the penalty is different for each lesson interaction.
:param (float,float,float,float,float) regularization_constant:
Coefficients of the regularization terms for (students, assessments,
lessons, prereqs, concepts)
:param float graph_regularization_constant:
Coefficient of the graph regularization term
:param scipy.sparse.csr_matrix student_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of assessment interactions] where a non-zero entry indicates that the student at a
specific timestep participated in the assessment interaction
:param scipy.sparse.csr_matrix student_bias_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students] X
[number of assessment interactions] where a non-zero entry indicates that the student
participated in the assessment interaction
:param scipy.sparse.csr_matrix assessment_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique assessments] X
[number of assessment interactions] where a non-zero entry indicates that the assessment
participated in the assessment interaction
:param scipy.sparse.csr_matrix curr_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the post-update student state for the lesson interaction
:param scipy.sparse.csr_matrix prev_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the pre-update student state for the lesson interaction
:param scipy.sparse.csr_matrix lesson_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique lessons] X [number of lesson interactions]
where a non-zero entry indicates that the lesson participated in the lesson interaction
:param scipy.sparse.csr_matrix assessment_participation_in_concepts:
A binary matrix of dimensions [number of unique assessments] X [number of unique concepts],
where an entry indicates assessment-concept association. Concept associations for a given
assessment sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix lesson_participation_in_concepts:
A binary matrix of dimensions [number of unique lessons] X [number of unique concepts],
where an entry indicates lesson-concept association. Concept associations for a given
lesson sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix concept_participation_in_assessments:
The transpose of assessment_participation_in_concepts
:param scipy.sparse.csr_matrix concept_participation_in_lessons:
The transpose of lesson_participation_in_lessons
:param (np.array,np.array) prereq_edge_concept_idxes:
(Indices of prereq concepts, Indices of postreq concepts)
:param (scipy.sparse.csr_matrix,scipy.sparse.csr_matrix) concept_participation_in_prereq_edges:
The first binary matrix has dimensions [number of unique concepts] X
[number of prereq edges], where a non-zero entry indicates that the concept is the prereq
in the edge.
The second binary matrix has the same dimensions,
where a non-zero entry indicates that the concept is the postreq in the edge.
:param int last_student_embedding_idx:
Index of the last student embedding parameter in the flattened gradient
:param int last_assessment_embedding_idx:
Index of the last assessment embedding parameter in the flattened gradient
:param int last_lesson_embedding_idx:
Index of the last lesson embedding parameter in the flattened gradient
:param int last_prereq_embedding_idx:
Index of the last prereq embedding parameter in the flattened gradient
:param int last_student_bias_idx:
Index of the last student bias parameter in the flattened gradient
:param int last_assessment_bias_idx:
Index of the last assessment bias parameter in the flattened gradient
:param int num_timesteps:
Maximum number of timesteps in a student history, i.e.,
the output of InteractionHistory.duration()
:param bool using_bias:
Including bias terms in the assessment result likelihood
:param bool using_graph_prior:
Including the graph regularization term
:param bool using_l1_regularizer:
True => use L1 regularization on lesson and assessment embeddings
False => use L2 regularization on lesson and assessment embeddings
:rtype: function
:return:
A function that computes gradients and evaluates the cost function
at supplied parameter values. See the docstring below for my_grads
for further details.
"""
# pull regularization constants for different parameters out of tuple
(
student_regularization_constant,
assessment_regularization_constant,
lesson_regularization_constant,
prereq_regularization_constant,
concept_regularization_constant) = regularization_constant
def my_grads(param_vals):
"""
Compute the gradient of the cost function with respect to model parameters
:param dict[str,np.ndarray] param_vals:
A dictionary mapping a parameter's name to its current value
:rtype: (dict[str,np.ndarray],float)
:return:
A dictionary mapping a parameter's name to the gradient
of the cost function with respect to that parameter
(evaluated at the supplied parameter values),
and the value of the cost function
(evaluated at the supplied parameter values)
"""
# pull parameters from param_vals into separate variables
student_embeddings = param_vals[models.STUDENT_EMBEDDINGS]
assessment_embeddings = param_vals[models.ASSESSMENT_EMBEDDINGS]
lesson_embeddings = param_vals[models.LESSON_EMBEDDINGS]
prereq_embeddings = param_vals[models.PREREQ_EMBEDDINGS]
if using_graph_prior:
concept_embeddings = param_vals[models.CONCEPT_EMBEDDINGS]
# split assessment interactions into students, assessments, outcomes
(
student_idxes_for_assessment_ixns,
assessment_idxes_for_assessment_ixns,
outcomes_for_assessment_ixns) = assessment_interactions
# split lesson interactions into students, lessons
student_idxes_for_lesson_ixns, lesson_idxes_for_lesson_ixns, _ = lesson_interactions
# get biases for assessment interactions
if using_bias:
student_biases = param_vals[models.STUDENT_BIASES][\
student_idxes_for_assessment_ixns // num_timesteps][:, None]
assessment_biases = param_vals[models.ASSESSMENT_BIASES][\
assessment_idxes_for_assessment_ixns][:, None]
else:
student_biases = assessment_biases = 0
# shape outcomes as a column vector
outcomes = outcomes_for_assessment_ixns[:, None]
# get the assessment embedding for each assessment interaction
assessment_embeddings_for_assessment_ixns = \
assessment_embeddings[assessment_idxes_for_assessment_ixns, :]
# compute the L2 norm of the assessment embedding for each assessment interaction
assessment_embedding_norms_for_assessment_ixns = np.linalg.norm(
assessment_embeddings_for_assessment_ixns, axis=1)[:, None]
# get the student embedding for each assessment interaction
student_embeddings_for_assessment_ixns = \
student_embeddings[student_idxes_for_assessment_ixns, :]
# compute the dot product of the student embedding
# and assessment embedding for each interaction
student_dot_assessment = np.einsum(
'ij, ij->i',
student_embeddings_for_assessment_ixns,
assessment_embeddings_for_assessment_ixns)[:, None]
# compute intermediate quantities for the gradient that get reused
exp_diff = np.exp(outcomes * (
assessment_embedding_norms_for_assessment_ixns - student_dot_assessment / \
assessment_embedding_norms_for_assessment_ixns - student_biases - \
assessment_biases))
one_plus_exp_diff = 1 + exp_diff
mult_diff = outcomes * exp_diff / one_plus_exp_diff
# get lesson embeddings for lesson interactions
lesson_embeddings_for_lesson_ixns = lesson_embeddings[lesson_idxes_for_lesson_ixns, :]
# get lesson prereq embeddings for lesson interactions
prereq_embeddings_for_lesson_ixns = prereq_embeddings[lesson_idxes_for_lesson_ixns, :]
# get embeddings of student states resulting from lesson interactions
curr_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_lesson_ixns, :]
# get embeddings of student states prior to lesson interactions
prev_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_lesson_ixns - 1, :]
# compute the L2 norm of the lesson embedding for each lesson interaction
prereq_embedding_norms_for_lesson_ixns = np.linalg.norm(
prereq_embeddings_for_lesson_ixns, axis=1)[:, None]
# compute the dot product of the student embedding prior
# to the lesson interaction and the lesson prereq embedding,
# for each interaction
prev_student_dot_prereq = np.einsum(
'ij, ij->i',
prev_student_embeddings_for_lesson_ixns,
prereq_embeddings_for_lesson_ixns)[:, None]
# compute intermediate quantities for the gradient that get reused
update_exp_diff = np.exp(
prereq_embedding_norms_for_lesson_ixns - prev_student_dot_prereq / \
prereq_embedding_norms_for_lesson_ixns)
update_one_plus_exp_diff = 1 + update_exp_diff
diffs = curr_student_embeddings_for_lesson_ixns - prev_student_embeddings_for_lesson_ixns \
- lesson_embeddings_for_lesson_ixns / update_one_plus_exp_diff + \
forgetting_penalty_terms
diffs_over_var = diffs / learning_update_variance
update_mult_diff = np.einsum(
'ij, ij->i',
diffs_over_var,
lesson_embeddings_for_lesson_ixns)[:, None] * update_exp_diff / (
np.einsum(
'ij, ij->ij',
update_one_plus_exp_diff,
update_one_plus_exp_diff) * prereq_embedding_norms_for_lesson_ixns)
if using_graph_prior:
# get distance from an assessment embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the assessment's
# governing concepts
assessment_diffs_from_concept_centers = assessment_embeddings - \
assessment_participation_in_concepts.dot(concept_embeddings)
# get distance from a lesson embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the lesson's
# governing concepts
lesson_diffs_from_concept_centers = lesson_embeddings - \
lesson_participation_in_concepts.dot(concept_embeddings)
# grab the concept dependency graph
prereq_concept_idxes, postreq_concept_idxes = prereq_edge_concept_idxes
concept_participation_in_prereqs, concept_participation_in_postreqs = \
concept_participation_in_prereq_edges
# get prereq and postreq concept embeddings
prereq_concept_embeddings = concept_embeddings[prereq_concept_idxes, :]
postreq_concept_embeddings = concept_embeddings[postreq_concept_idxes, :]
# compute column vector of L2 norms for postreq concept embeddings
postreq_concept_norms = np.linalg.norm(postreq_concept_embeddings, axis=1)[:, None]
# compute the dot product of the prereq concept embedding
# and postreq concept embedding for each edge in the concept dependency graph
prereq_dot_postreq = np.einsum(
'ij, ij->i',
prereq_concept_embeddings,
postreq_concept_embeddings)[:, None]
# intermediate quantity, useful and reusable later
prereq_edge_diffs = prereq_dot_postreq / postreq_concept_norms - postreq_concept_norms
# compute the gradient w.r.t. student embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
stud_grad_from_asmt_ixns = -student_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * \
assessment_embeddings_for_assessment_ixns)
stud_grad_from_lesson_ixns = curr_student_participation_in_lesson_ixns.dot(
diffs_over_var) - prev_student_participation_in_lesson_ixns.dot(
update_mult_diff * prereq_embeddings_for_lesson_ixns + diffs_over_var)
stud_grad_from_norm_regularization = 2 * student_regularization_constant * \
student_embeddings
gradient_wrt_student_embedding = stud_grad_from_asmt_ixns + stud_grad_from_lesson_ixns + \
stud_grad_from_norm_regularization
# compute the gradient w.r.t. assessment embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
asmt_grad_from_asmt_ixns = -assessment_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * (
student_embeddings_for_assessment_ixns - \
assessment_embeddings_for_assessment_ixns - \
student_dot_assessment / np.einsum(
'ij, ij->ij',
assessment_embedding_norms_for_assessment_ixns,
assessment_embedding_norms_for_assessment_ixns) * \
assessment_embeddings_for_assessment_ixns))
if using_l1_regularizer:
asmt_grad_from_norm_regularization = assessment_regularization_constant * np.sign(
assessment_embeddings)
else:
asmt_grad_from_norm_regularization = 2 * assessment_regularization_constant * \
assessment_embeddings
if using_graph_prior:
asmt_grad_from_graph_regularization = 2 * graph_regularization_constant * \
assessment_diffs_from_concept_centers
else:
asmt_grad_from_graph_regularization = 0
gradient_wrt_assessment_embedding = asmt_grad_from_asmt_ixns + \
asmt_grad_from_norm_regularization + asmt_grad_from_graph_regularization
# compute the gradient w.r.t. lesson embeddings,
# which is the sum of gradient of the log-likelihood of assessment and lesson interactions
# and the gradient of the regularization terms
lesson_grad_from_lesson_ixns = -lesson_participation_in_lesson_ixns.dot(
diffs_over_var / update_one_plus_exp_diff)
if using_l1_regularizer:
lesson_grad_from_norm_regularization = lesson_regularization_constant * np.sign(
lesson_embeddings)
else:
lesson_grad_from_norm_regularization = 2 * lesson_regularization_constant * \
lesson_embeddings
if using_graph_prior:
lesson_grad_from_graph_regularization = 2 * graph_regularization_constant * \
lesson_diffs_from_concept_centers
else:
lesson_grad_from_graph_regularization = 0
gradient_wrt_lesson_embedding = lesson_grad_from_lesson_ixns + \
lesson_grad_from_norm_regularization + lesson_grad_from_graph_regularization
# compute the gradient w.r.t. prereq embeddings,
# which is the sum of gradient of the log-likelihood of assessment and lesson interactions
# and the gradient of the regularization terms
prereq_grad_from_lesson_ixns = lesson_participation_in_lesson_ixns.dot(
update_mult_diff * (prev_student_dot_prereq / np.einsum(
'ij, ij->ij',
prereq_embedding_norms_for_lesson_ixns,
prereq_embedding_norms_for_lesson_ixns) * \
prereq_embeddings_for_lesson_ixns - \
prev_student_embeddings_for_lesson_ixns + \
prereq_embeddings_for_lesson_ixns))
prereq_grad_from_norm_regularization = 2 * prereq_regularization_constant * \
prereq_embeddings
gradient_wrt_prereq_embedding = \
prereq_grad_from_lesson_ixns + prereq_grad_from_norm_regularization
# compute the gradient w.r.t. student biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient_wrt_student_biases = \
-student_bias_participation_in_assessment_ixns.dot(mult_diff)[:,0]
# compute the gradient w.r.t. assessment biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient_wrt_assessment_biases = \
-assessment_participation_in_assessment_ixns.dot(mult_diff)[:,0]
if using_graph_prior:
# compute the gradient w.r.t. concept embeddings,
# which is the sum of gradient of the log-likelihood of assessment
# and lesson interactions and the gradient of the regularization terms
concept_grad_from_assessments = -concept_participation_in_assessments.dot(
2 * assessment_diffs_from_concept_centers)
concept_grad_from_lessons = concept_participation_in_lessons.dot(
2 * lesson_diffs_from_concept_centers)
concept_grad_from_prereqs = concept_participation_in_prereqs.dot(
postreq_concept_embeddings / postreq_concept_norms)
concept_grad_from_postreqs = concept_participation_in_postreqs.dot(
(prereq_concept_embeddings - 2 * postreq_concept_embeddings) / \
postreq_concept_norms - 2 * prereq_dot_postreq * \
postreq_concept_embeddings / postreq_concept_norms**3)
gradient_wrt_concept_embedding = graph_regularization_constant * (
concept_grad_from_assessments + concept_grad_from_lessons + \
concept_grad_from_prereqs + concept_grad_from_postreqs)
else:
gradient_wrt_concept_embedding = None
gradient = {
models.STUDENT_EMBEDDINGS : gradient_wrt_student_embedding,
models.ASSESSMENT_EMBEDDINGS : gradient_wrt_assessment_embedding,
models.LESSON_EMBEDDINGS : gradient_wrt_lesson_embedding,
models.PREREQ_EMBEDDINGS : gradient_wrt_prereq_embedding,
models.STUDENT_BIASES : gradient_wrt_student_biases,
models.ASSESSMENT_BIASES : gradient_wrt_assessment_biases,
models.CONCEPT_EMBEDDINGS : gradient_wrt_concept_embedding
}
cost_from_assessment_ixns = np.einsum('ij->', np.log(one_plus_exp_diff))
cost_from_lesson_ixns = np.einsum('ij, ij', diffs, diffs) / (2 * learning_update_variance)
cost_from_student_regularization = student_regularization_constant * np.einsum(
'ij, ij', student_embeddings, student_embeddings)
if using_l1_regularizer:
cost_from_assessment_regularization = assessment_regularization_constant * np.absolute(
assessment_embeddings).sum()
cost_from_lesson_regularization = lesson_regularization_constant * np.absolute(
lesson_embeddings).sum()
else:
cost_from_assessment_regularization = assessment_regularization_constant * np.einsum(
'ij, ij', assessment_embeddings, assessment_embeddings)
cost_from_lesson_regularization = lesson_regularization_constant * np.einsum(
'ij, ij', lesson_embeddings, lesson_embeddings)
cost_from_prereq_regularization = prereq_regularization_constant * np.einsum(
'ij, ij', prereq_embeddings, prereq_embeddings)
if using_graph_prior:
cost_from_concept_regularization = concept_regularization_constant * np.einsum(
'ij, ij', concept_embeddings, concept_embeddings)
cost_from_graph_regularization = graph_regularization_constant * ((
assessment_diffs_from_concept_centers**2).sum() + (
lesson_diffs_from_concept_centers**2).sum() + prereq_edge_diffs.sum())
else:
cost_from_concept_regularization = cost_from_graph_regularization = 0
cost_from_ixns = cost_from_assessment_ixns + cost_from_lesson_ixns
cost_from_regularization = cost_from_student_regularization + \
cost_from_assessment_regularization + cost_from_lesson_regularization + \
cost_from_prereq_regularization + cost_from_concept_regularization + \
cost_from_graph_regularization
cost = cost_from_ixns + cost_from_regularization
return gradient, cost
return my_grads
def without_scipy_without_prereqs(
assessment_interactions,
lesson_interactions,
learning_update_variance,
forgetting_penalty_terms,
regularization_constant,
graph_regularization_constant,
student_participation_in_assessment_ixns,
student_bias_participation_in_assessment_ixns,
assessment_participation_in_assessment_ixns,
curr_student_participation_in_lesson_ixns,
prev_student_participation_in_lesson_ixns,
lesson_participation_in_lesson_ixns,
assessment_participation_in_concepts,
lesson_participation_in_concepts,
concept_participation_in_assessments,
concept_participation_in_lessons,
prereq_edge_concept_idxes,
concept_participation_in_prereq_edges,
last_student_embedding_idx,
last_assessment_embedding_idx,
last_lesson_embedding_idx,
last_prereq_embedding_idx,
last_student_bias_idx,
last_assessment_bias_idx,
num_timesteps,
using_bias,
using_graph_prior,
using_l1_regularizer):
"""
Setup a function that will compute gradients and evaluate the cost function
at supplied parameter values, for an embedding model without prereqs and
a parameter estimation routine that uses gradient descent for optimization
:param (np.array,np.array,np.array) assessment_interactions:
For each assessment interaction, (student_idx, assessment_idx, outcome),
where outcome is -1 or 1
:param (np.array,np.array,np.array) lesson_interactions:
For each lesson interaction, (student_idx, lesson_idx, time_since_previous_interaction)
:param np.array|float learning_update_variance:
Variance of the Gaussian learning update. If float, then the variance
is constant across all interactions. If np.array, then the variance is
different for each lesson interaction.
:param np.array|float forgetting_penalty_terms:
Penalty term for the forgetting effect in the Gaussian learning update.
If float, then the penalty term is constant across all interactions. If
np.array, then the penalty is different for each lesson interaction.
:param (float,float,float,float,float) regularization_constant:
Coefficients of the regularization terms for (students, assessments,
lessons, prereqs, concepts)
:param float graph_regularization_constant:
Coefficient of the graph regularization term
:param scipy.sparse.csr_matrix student_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of assessment interactions] where a non-zero entry indicates that the student at a
specific timestep participated in the assessment interaction
:param scipy.sparse.csr_matrix student_bias_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students] X
[number of assessment interactions] where a non-zero entry indicates that the student
participated in the assessment interaction
:param scipy.sparse.csr_matrix assessment_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique assessments] X
[number of assessment interactions] where a non-zero entry indicates that the assessment
participated in the assessment interaction
:param scipy.sparse.csr_matrix curr_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the post-update student state for the lesson interaction
:param scipy.sparse.csr_matrix prev_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the pre-update student state for the lesson interaction
:param scipy.sparse.csr_matrix lesson_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique lessons] X [number of lesson interactions]
where a non-zero entry indicates that the lesson participated in the lesson interaction
:param scipy.sparse.csr_matrix assessment_participation_in_concepts:
A binary matrix of dimensions [number of unique assessments] X [number of unique concepts],
where an entry indicates assessment-concept association. Concept associations for a given
assessment sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix lesson_participation_in_concepts:
A binary matrix of dimensions [number of unique lessons] X [number of unique concepts],
where an entry indicates lesson-concept association. Concept associations for a given
lesson sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix concept_participation_in_assessments:
The transpose of assessment_participation_in_concepts
:param scipy.sparse.csr_matrix concept_participation_in_lessons:
The transpose of lesson_participation_in_lessons
:param (np.array,np.array) prereq_edge_concept_idxes:
(Indices of prereq concepts, Indices of postreq concepts)
:param (scipy.sparse.csr_matrix,scipy.sparse.csr_matrix) concept_participation_in_prereq_edges:
The first binary matrix has dimensions [number of unique concepts] X
[number of prereq edges], where a non-zero entry indicates that the concept is the prereq
in the edge.
The second binary matrix has the same dimensions,
where a non-zero entry indicates that the concept is the postreq in the edge.
:param int last_student_embedding_idx:
Index of the last student embedding parameter in the flattened gradient
:param int last_assessment_embedding_idx:
Index of the last assessment embedding parameter in the flattened gradient
:param int last_lesson_embedding_idx:
Index of the last lesson embedding parameter in the flattened gradient
:param int last_prereq_embedding_idx:
Index of the last prereq embedding parameter in the flattened gradient
:param int last_student_bias_idx:
Index of the last student bias parameter in the flattened gradient
:param int last_assessment_bias_idx:
Index of the last assessment bias parameter in the flattened gradient
:param int num_timesteps:
Maximum number of timesteps in a student history, i.e.,
the output of InteractionHistory.duration()
:param bool using_bias:
Including bias terms in the assessment result likelihood
:param bool using_graph_prior:
Including the graph regularization term
:param bool using_l1_regularizer:
True => use L1 regularization on lesson and assessment embeddings
False => use L2 regularization on lesson and assessment embeddings
:rtype: function
:return:
A function that computes gradients and evaluates the cost function
at supplied parameter values. See the docstring below for my_grads
for further details.
"""
# pull regularization constants for different parameters out of tuple
(
student_regularization_constant,
assessment_regularization_constant,
lesson_regularization_constant,
prereq_regularization_constant,
concept_regularization_constant) = regularization_constant
def my_grads(param_vals):
"""
Compute the gradient of the cost function with respect to model parameters
:param dict[str,np.ndarray] param_vals:
A dictionary mapping a parameter's name to its current value
:rtype: (dict[str,np.ndarray],float)
:return:
A dictionary mapping a parameter's name to the gradient
of the cost function with respect to that parameter
(evaluated at the supplied parameter values),
and the value of the cost function
(evaluated at the supplied parameter values)
"""
# pull parameters from param_vals into separate variables
student_embeddings = param_vals[models.STUDENT_EMBEDDINGS]
assessment_embeddings = param_vals[models.ASSESSMENT_EMBEDDINGS]
lesson_embeddings = param_vals[models.LESSON_EMBEDDINGS]
if using_graph_prior:
concept_embeddings = param_vals[models.CONCEPT_EMBEDDINGS]
# split assessment interactions into students, assessments, outcomes
(
student_idxes_for_assessment_ixns,
assessment_idxes_for_assessment_ixns,
outcomes_for_assessment_ixns) = assessment_interactions
# split lesson interactions into students, lessons
student_idxes_for_lesson_ixns, lesson_idxes_for_lesson_ixns, _ = lesson_interactions
# get biases for assessment interactions
if using_bias:
student_biases = param_vals[models.STUDENT_BIASES][\
student_idxes_for_assessment_ixns // num_timesteps][:, None]
assessment_biases = param_vals[models.ASSESSMENT_BIASES][\
assessment_idxes_for_assessment_ixns][:, None]
else:
student_biases = assessment_biases = 0
# shape outcomes as a column vector
outcomes = outcomes_for_assessment_ixns[:, None]
# get the assessment embedding for each assessment interaction
assessment_embeddings_for_assessment_ixns = \
assessment_embeddings[assessment_idxes_for_assessment_ixns, :]
# compute the L2 norm of the assessment embedding for each assessment interaction
assessment_embedding_norms_for_assessment_ixns = np.linalg.norm(
assessment_embeddings_for_assessment_ixns, axis=1)[:, None]
# get the student embedding for each assessment interaction
student_embeddings_for_assessment_ixns = \
student_embeddings[student_idxes_for_assessment_ixns, :]
# compute the dot product of the student embedding
# and assessment embedding for each interaction
student_dot_assessment = np.einsum(
'ij, ij->i',
student_embeddings_for_assessment_ixns,
assessment_embeddings_for_assessment_ixns)[:, None]
# compute intermediate quantities for the gradient that get reused
exp_diff = np.exp(outcomes * (
assessment_embedding_norms_for_assessment_ixns - student_dot_assessment / \
assessment_embedding_norms_for_assessment_ixns - student_biases - \
assessment_biases))
one_plus_exp_diff = 1 + exp_diff
mult_diff = outcomes * exp_diff / one_plus_exp_diff
# get lesson embeddings for lesson interactions
lesson_embeddings_for_lesson_ixns = lesson_embeddings[lesson_idxes_for_lesson_ixns, :]
# get embeddings of student states resulting from lesson interactions
curr_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_lesson_ixns, :]
# get embeddings of student states prior to lesson interactions
prev_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_lesson_ixns - 1, :]
# compute intermediate quantities for the gradient that get reused
diffs = curr_student_embeddings_for_lesson_ixns - prev_student_embeddings_for_lesson_ixns \
- lesson_embeddings_for_lesson_ixns + forgetting_penalty_terms
diffs_over_var = diffs / learning_update_variance
if using_graph_prior:
# get distance from an assessment embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the assessment's
# governing concepts
assessment_diffs_from_concept_centers = assessment_embeddings - \
assessment_participation_in_concepts.dot(concept_embeddings)
# get distance from a lesson embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the lesson's
# governing concepts
lesson_diffs_from_concept_centers = lesson_embeddings - \
lesson_participation_in_concepts.dot(concept_embeddings)
# grab the concept dependency graph
prereq_concept_idxes, postreq_concept_idxes = prereq_edge_concept_idxes
concept_participation_in_prereqs, concept_participation_in_postreqs = \
concept_participation_in_prereq_edges
# get prereq and postreq concept embeddings
prereq_concept_embeddings = concept_embeddings[prereq_concept_idxes, :]
postreq_concept_embeddings = concept_embeddings[postreq_concept_idxes, :]
# compute column vector of L2 norms for postreq concept embeddings
postreq_concept_norms = np.linalg.norm(postreq_concept_embeddings, axis=1)[:, None]
# compute the dot product of the prereq concept embedding
# and postreq concept embedding for each edge in the concept dependency graph
prereq_dot_postreq = np.einsum(
'ij, ij->i',
prereq_concept_embeddings,
postreq_concept_embeddings)[:, None]
# intermediate quantity, useful and reusable later
prereq_edge_diffs = prereq_dot_postreq / postreq_concept_norms - postreq_concept_norms
# compute the gradient w.r.t. student embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
stud_grad_from_asmt_ixns = -student_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * \
assessment_embeddings_for_assessment_ixns)
stud_grad_from_lesson_ixns = curr_student_participation_in_lesson_ixns.dot(diffs_over_var)
stud_grad_from_norm_regularization = 2 * student_regularization_constant * \
student_embeddings
gradient_wrt_student_embedding = stud_grad_from_asmt_ixns + stud_grad_from_lesson_ixns + \
stud_grad_from_norm_regularization
# compute the gradient w.r.t. assessment embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
asmt_grad_from_asmt_ixns = -assessment_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * (
student_embeddings_for_assessment_ixns - \
assessment_embeddings_for_assessment_ixns - student_dot_assessment / \
np.einsum(
'ij, ij->ij',
assessment_embedding_norms_for_assessment_ixns,
assessment_embedding_norms_for_assessment_ixns) * \
assessment_embeddings_for_assessment_ixns))
if using_l1_regularizer:
asmt_grad_from_norm_regularization = assessment_regularization_constant * np.sign(
assessment_embeddings)
else:
asmt_grad_from_norm_regularization = 2 * assessment_regularization_constant * \
assessment_embeddings
if using_graph_prior:
asmt_grad_from_graph_regularization = 2 * graph_regularization_constant * \
assessment_diffs_from_concept_centers
else:
asmt_grad_from_graph_regularization = 0
gradient_wrt_assessment_embedding = asmt_grad_from_asmt_ixns + \
asmt_grad_from_norm_regularization + asmt_grad_from_graph_regularization
# compute the gradient w.r.t. lesson embeddings,
# which is the sum of gradient of the log-likelihood of assessment and lesson interactions
# and the gradient of the regularization terms
lesson_grad_from_lesson_ixns = -lesson_participation_in_lesson_ixns.dot(diffs_over_var)
if using_l1_regularizer:
lesson_grad_from_norm_regularization = lesson_regularization_constant * np.sign(
lesson_embeddings)
else:
lesson_grad_from_norm_regularization = 2 * lesson_regularization_constant * \
lesson_embeddings
if using_graph_prior:
lesson_grad_from_graph_regularization = 2 * graph_regularization_constant * \
lesson_diffs_from_concept_centers
else:
lesson_grad_from_graph_regularization = 0
gradient_wrt_lesson_embedding = lesson_grad_from_lesson_ixns + \
lesson_grad_from_norm_regularization + lesson_grad_from_graph_regularization
# compute the gradient w.r.t. student biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient_wrt_student_biases = \
-student_bias_participation_in_assessment_ixns.dot(mult_diff)[:,0]
# compute the gradient w.r.t. assessment biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient_wrt_assessment_biases = \
-assessment_participation_in_assessment_ixns.dot(mult_diff)[:,0]
if using_graph_prior:
# compute the gradient w.r.t. concept embeddings,
# which is the sum of gradient of the log-likelihood of assessment
# and lesson interactions and the gradient of the regularization terms
concept_grad_from_assessments = -concept_participation_in_assessments.dot(
2 * assessment_diffs_from_concept_centers)
concept_grad_from_lessons = concept_participation_in_lessons.dot(
2 * lesson_diffs_from_concept_centers)
concept_grad_from_prereqs = concept_participation_in_prereqs.dot(
postreq_concept_embeddings / postreq_concept_norms)
concept_grad_from_postreqs = concept_participation_in_postreqs.dot(
(prereq_concept_embeddings - 2 * postreq_concept_embeddings) / \
postreq_concept_norms - 2 * prereq_dot_postreq * \
postreq_concept_embeddings / postreq_concept_norms**3)
gradient_wrt_concept_embedding = graph_regularization_constant * (
concept_grad_from_assessments + concept_grad_from_lessons + \
concept_grad_from_prereqs + concept_grad_from_postreqs)
else:
gradient_wrt_concept_embedding = None
gradient = {
models.STUDENT_EMBEDDINGS : gradient_wrt_student_embedding,
models.ASSESSMENT_EMBEDDINGS : gradient_wrt_assessment_embedding,
models.LESSON_EMBEDDINGS : gradient_wrt_lesson_embedding,
models.STUDENT_BIASES : gradient_wrt_student_biases,
models.ASSESSMENT_BIASES : gradient_wrt_assessment_biases,
models.CONCEPT_EMBEDDINGS : gradient_wrt_concept_embedding
}
cost_from_assessment_ixns = np.einsum('ij->', np.log(one_plus_exp_diff))
cost_from_lesson_ixns = np.einsum('ij, ij', diffs, diffs) / (2 * learning_update_variance)
cost_from_student_regularization = student_regularization_constant * np.einsum(
'ij, ij', student_embeddings, student_embeddings)
if using_l1_regularizer:
cost_from_assessment_regularization = assessment_regularization_constant * np.absolute(
assessment_embeddings).sum()
cost_from_lesson_regularization = lesson_regularization_constant * np.absolute(
lesson_embeddings).sum()
else:
cost_from_assessment_regularization = assessment_regularization_constant * np.einsum(
'ij, ij', assessment_embeddings, assessment_embeddings)
cost_from_lesson_regularization = lesson_regularization_constant * np.einsum(
'ij, ij', lesson_embeddings, lesson_embeddings)
if using_graph_prior:
cost_from_concept_regularization = concept_regularization_constant * np.einsum(
'ij, ij', concept_embeddings, concept_embeddings)
cost_from_graph_regularization = graph_regularization_constant * ((
assessment_diffs_from_concept_centers**2).sum() + (
lesson_diffs_from_concept_centers**2).sum() + prereq_edge_diffs.sum())
else:
cost_from_concept_regularization = cost_from_graph_regularization = 0
cost_from_ixns = cost_from_assessment_ixns + cost_from_lesson_ixns
cost_from_regularization = cost_from_student_regularization + \
cost_from_assessment_regularization + cost_from_lesson_regularization + \
cost_from_concept_regularization + cost_from_graph_regularization
cost = cost_from_ixns + cost_from_regularization
return gradient, cost
return my_grads
def with_scipy_without_lessons(
param_vals,
param_shapes,
assessment_interactions,
lesson_interactions,
learning_update_variance,
forgetting_penalty_terms,
regularization_constant,
graph_regularization_constant,
student_participation_in_assessment_ixns,
student_bias_participation_in_assessment_ixns,
assessment_participation_in_assessment_ixns,
curr_student_participation_in_lesson_ixns,
prev_student_participation_in_lesson_ixns,
lesson_participation_in_lesson_ixns,
assessment_participation_in_concepts,
lesson_participation_in_concepts,
concept_participation_in_assessments,
concept_participation_in_lessons,
prereq_edge_concept_idxes,
concept_participation_in_prereq_edges,
last_student_embedding_idx,
last_assessment_embedding_idx,
last_lesson_embedding_idx,
last_prereq_embedding_idx,
last_student_bias_idx,
last_assessment_bias_idx,
num_timesteps,
using_bias,
using_graph_prior,
using_l1_regularizer,
gradient):
"""
Compute the gradient of the cost function with respect to model parameters
:param np.array param_vals:
Flattened, concatenated parameter values
:param dict[str,tuple] param_shapes:
A dictionary mapping a parameter's name to the shape of its np.ndarray
:param (np.array,np.array,np.array) assessment_interactions:
For each assessment interaction, (student_idx, assessment_idx, outcome),
where outcome is -1 or 1
:param (np.array,np.array,np.array) lesson_interactions:
For each lesson interaction, (student_idx, lesson_idx, time_since_previous_interaction)
:param np.array|float learning_update_variance:
Variance of the Gaussian learning update. If float, then the variance
is constant across all interactions. If np.array, then the variance is
different for each lesson interaction.
:param np.array|float forgetting_penalty_terms:
Penalty term for the forgetting effect in the Gaussian learning update.
If float, then the penalty term is constant across all interactions. If
np.array, then the penalty is different for each lesson interaction.
:param (float,float,float,float,float) regularization_constant:
Coefficients of the regularization terms for (students, assessments,
lessons, prereqs, concepts)
:param float graph_regularization_constant:
Coefficient of the graph regularization term
:param scipy.sparse.csr_matrix student_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of assessment interactions] where a non-zero entry indicates that the student at a
specific timestep participated in the assessment interaction
:param scipy.sparse.csr_matrix student_bias_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students] X
[number of assessment interactions] where a non-zero entry indicates that the student
participated in the assessment interaction
:param scipy.sparse.csr_matrix assessment_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique assessments] X
[number of assessment interactions] where a non-zero entry indicates that the assessment
participated in the assessment interaction
:param scipy.sparse.csr_matrix curr_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the post-update student state for the lesson interaction
:param scipy.sparse.csr_matrix prev_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the pre-update student state for the lesson interaction
:param scipy.sparse.csr_matrix lesson_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique lessons] X [number of lesson interactions]
where a non-zero entry indicates that the lesson participated in the lesson interaction
:param scipy.sparse.csr_matrix assessment_participation_in_concepts:
A binary matrix of dimensions [number of unique assessments] X [number of unique concepts],
where an entry indicates assessment-concept association. Concept associations for a given
assessment sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix lesson_participation_in_concepts:
A binary matrix of dimensions [number of unique lessons] X [number of unique concepts],
where an entry indicates lesson-concept association. Concept associations for a given
lesson sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix concept_participation_in_assessments:
The transpose of assessment_participation_in_concepts
:param scipy.sparse.csr_matrix concept_participation_in_lessons:
The transpose of lesson_participation_in_lessons
:param (np.array,np.array) prereq_edge_concept_idxes:
(Indices of prereq concepts, Indices of postreq concepts)
:param (scipy.sparse.csr_matrix,scipy.sparse.csr_matrix) concept_participation_in_prereq_edges:
The first binary matrix has dimensions [number of unique concepts] X
[number of prereq edges], where a non-zero entry indicates that the concept is the prereq
in the edge.
The second binary matrix has the same dimensions,
where a non-zero entry indicates that the concept is the postreq in the edge.
:param int last_student_embedding_idx:
Index of the last student embedding parameter in the flattened gradient
:param int last_assessment_embedding_idx:
Index of the last assessment embedding parameter in the flattened gradient
:param int last_lesson_embedding_idx:
Index of the last lesson embedding parameter in the flattened gradient
:param int last_prereq_embedding_idx:
Index of the last prereq embedding parameter in the flattened gradient
:param int last_student_bias_idx:
Index of the last student bias parameter in the flattened gradient
:param int last_assessment_bias_idx:
Index of the last assessment bias parameter in the flattened gradient
:param int num_timesteps:
Maximum number of timesteps in a student history, i.e.,
the output of InteractionHistory.duration()
:param bool using_bias:
Including bias terms in the assessment result likelihood
:param bool using_graph_prior:
Including the graph regularization term
:param bool using_l1_regularizer:
True => use L1 regularization on lesson and assessment embeddings
False => use L1 regularization on lesson and assessment embeddings
:param np.array gradient:
Placeholder for the flattened gradient
:rtype: (float,np.array)
:return:
The value of the cost function
(evaluated at the supplied parameter values),
and the flattened gradient of the cost function
(evaluated at the supplied parameter values)
"""
# pull regularization constants for different parameters out of tuple
(
student_regularization_constant,
assessment_regularization_constant,
lesson_regularization_constant,
prereq_regularization_constant,
concept_regularization_constant) = regularization_constant
# reshape flattened student embeddings into tensor
student_embeddings = np.reshape(
param_vals[:last_student_embedding_idx],
param_shapes[models.STUDENT_EMBEDDINGS])
# reshape flattened assessment embeddings into matrix
assessment_embeddings = np.reshape(
param_vals[last_student_embedding_idx:last_assessment_embedding_idx],
param_shapes[models.ASSESSMENT_EMBEDDINGS])
if using_graph_prior:
# reshape flattened concept embeddings into matrix
concept_embeddings = np.reshape(
param_vals[last_assessment_bias_idx:],
param_shapes[models.CONCEPT_EMBEDDINGS])
# split assessment interactions into students, assessments, outcomes
(
student_idxes_for_assessment_ixns,
assessment_idxes_for_assessment_ixns,
outcomes_for_assessment_ixns) = assessment_interactions
# use dummy lesson interactions to get students in temporal process
student_idxes_for_temporal_process, _, _ = lesson_interactions
if not using_bias:
# zero out bias terms, so that they definitely have no effect
# on the gradient or cost here. this should be done in addition to
# imposing (0, 0) bounds in the call to scipy.optimize.minimize in est.
param_vals[last_assessment_embedding_idx:last_assessment_bias_idx] = 0
# get biases for assessment interactions
student_biases = np.reshape(
param_vals[last_assessment_embedding_idx:last_student_bias_idx],
param_shapes[models.STUDENT_BIASES])[(
student_idxes_for_assessment_ixns // num_timesteps)][:, None]
assessment_biases = np.reshape(
param_vals[last_student_bias_idx:last_assessment_bias_idx],
param_shapes[models.ASSESSMENT_BIASES])[(
assessment_idxes_for_assessment_ixns)][:, None]
# shape outcomes as a column vector
outcomes = outcomes_for_assessment_ixns[:, None]
# get the assessment embedding for each assessment interaction
assessment_embeddings_for_assessment_ixns = assessment_embeddings[\
assessment_idxes_for_assessment_ixns, :]
# compute the L2 norm of the assessment embedding for each assessment interaction
assessment_embedding_norms_for_assessment_ixns = np.linalg.norm(
assessment_embeddings_for_assessment_ixns, axis=1)[:, None]
# get the student embedding for each assessment interaction
student_embeddings_for_assessment_ixns = (
student_embeddings[student_idxes_for_assessment_ixns, :])
# compute the dot product of the student embedding
# and assessment embedding for each interaction
student_dot_assessment = np.einsum(
'ij, ij->i',
student_embeddings_for_assessment_ixns,
assessment_embeddings_for_assessment_ixns)[:, None]
# compute intermediate quantities for the gradient that get reused
exp_diff = np.exp(outcomes * (
assessment_embedding_norms_for_assessment_ixns - student_dot_assessment / \
assessment_embedding_norms_for_assessment_ixns - student_biases - \
assessment_biases))
one_plus_exp_diff = 1 + exp_diff
mult_diff = outcomes * exp_diff / one_plus_exp_diff
using_temporal_process = len(student_idxes_for_temporal_process) > 0
if using_temporal_process:
# get embeddings of student states resulting from lesson interactions
curr_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_temporal_process, :]
# get embeddings of student states prior to lesson interactions
prev_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_temporal_process - 1, :]
# compute intermediate quantities for the gradient that get reused
diffs = curr_student_embeddings_for_lesson_ixns - prev_student_embeddings_for_lesson_ixns \
+ forgetting_penalty_terms
diffs_over_var = diffs / learning_update_variance
else:
diffs = diffs_over_var = 0
if using_graph_prior:
# get distance from an assessment embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the assessment's
# governing concepts
assessment_diffs_from_concept_centers = assessment_embeddings - \
assessment_participation_in_concepts.dot(concept_embeddings)
# grab the concept dependency graph
prereq_concept_idxes, postreq_concept_idxes = prereq_edge_concept_idxes
(
concept_participation_in_prereqs,
concept_participation_in_postreqs) = concept_participation_in_prereq_edges
# get prereq and postreq concept embeddings
prereq_concept_embeddings = concept_embeddings[prereq_concept_idxes, :]
postreq_concept_embeddings = concept_embeddings[postreq_concept_idxes, :]
postreq_concept_norms = np.linalg.norm(postreq_concept_embeddings, axis=1)[:, None]
prereq_dot_postreq = np.einsum(
'ij, ij->i',
prereq_concept_embeddings,
postreq_concept_embeddings)[:, None]
# intermediate quantity, useful later
prereq_edge_diffs = prereq_dot_postreq / postreq_concept_norms - postreq_concept_norms
# compute the gradient w.r.t. student embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
stud_grad_from_asmt_ixns = -student_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * \
assessment_embeddings_for_assessment_ixns)
if using_temporal_process:
stud_grad_from_temporal_process = curr_student_participation_in_lesson_ixns.dot(
diffs_over_var)
else:
stud_grad_from_temporal_process = 0
stud_grad_from_norm_regularization = 2 * student_regularization_constant * student_embeddings
gradient[:last_student_embedding_idx] = (
stud_grad_from_asmt_ixns + stud_grad_from_temporal_process + \
stud_grad_from_norm_regularization).ravel()
# compute the gradient w.r.t. assessment embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
asmt_grad_from_asmt_ixns = -assessment_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * (
student_embeddings_for_assessment_ixns - \
assessment_embeddings_for_assessment_ixns - student_dot_assessment / \
np.einsum(
'ij, ij->ij',
assessment_embedding_norms_for_assessment_ixns,
assessment_embedding_norms_for_assessment_ixns) * \
assessment_embeddings_for_assessment_ixns))
if using_graph_prior:
asmt_grad_from_graph_regularization = 2 * graph_regularization_constant * \
assessment_diffs_from_concept_centers
else:
asmt_grad_from_graph_regularization = 0
if using_l1_regularizer:
asmt_grad_from_norm_regularization = assessment_regularization_constant * np.sign(
assessment_embeddings)
else:
asmt_grad_from_norm_regularization = 2 * assessment_regularization_constant * \
assessment_embeddings
gradient[last_student_embedding_idx:last_assessment_embedding_idx] = (
asmt_grad_from_asmt_ixns + asmt_grad_from_graph_regularization + \
asmt_grad_from_norm_regularization).ravel()
if using_bias:
# compute the gradient w.r.t. student biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient[last_assessment_embedding_idx:last_student_bias_idx] = \
-student_bias_participation_in_assessment_ixns.dot(mult_diff).ravel()
# compute the gradient w.r.t. assessment biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient[last_student_bias_idx:last_assessment_bias_idx] = \
-assessment_participation_in_assessment_ixns.dot(mult_diff).ravel()
if using_graph_prior:
# compute the gradient w.r.t. concept embeddings,
# which is the sum of gradient of the log-likelihood
# of assessment and lesson interactions and the gradient
# of the regularization terms
concept_grad_from_assessments = -concept_participation_in_assessments.dot(
2 * assessment_diffs_from_concept_centers)
concept_grad_from_prereqs = concept_participation_in_prereqs.dot(
postreq_concept_embeddings / postreq_concept_norms)
concept_grad_from_postreqs = concept_participation_in_postreqs.dot(
(prereq_concept_embeddings - 2 * postreq_concept_embeddings) / postreq_concept_norms -\
2 * prereq_dot_postreq * postreq_concept_embeddings / postreq_concept_norms**3)
concept_grad_from_norm_regularization = 2 * concept_regularization_constant * \
concept_embeddings
gradient[last_assessment_bias_idx:] = (graph_regularization_constant * (
concept_grad_from_assessments + concept_grad_from_prereqs + \
concept_grad_from_postreqs) + concept_grad_from_norm_regularization).ravel()
cost_from_assessment_ixns = np.einsum('ij->', np.log(one_plus_exp_diff))
if using_temporal_process:
cost_from_temporal_process = np.einsum(
'ij, ij', diffs, diffs) / (2 * learning_update_variance)
else:
cost_from_temporal_process = 0
cost_from_student_regularization = student_regularization_constant * np.einsum(
'ij, ij', student_embeddings, student_embeddings)
if using_l1_regularizer:
cost_from_assessment_regularization = assessment_regularization_constant * np.absolute(
assessment_embeddings).sum()
else:
cost_from_assessment_regularization = assessment_regularization_constant * np.einsum(
'ij, ij', assessment_embeddings, assessment_embeddings)
if using_graph_prior:
cost_from_concept_regularization = concept_regularization_constant * np.einsum(
'ij, ij', concept_embeddings, concept_embeddings)
cost_from_graph_regularization = graph_regularization_constant * ((
assessment_diffs_from_concept_centers**2).sum() + prereq_edge_diffs.sum())
else:
cost_from_concept_regularization = 0
cost_from_graph_regularization = 0
cost_from_norm_regularization = cost_from_student_regularization + \
cost_from_assessment_regularization + cost_from_concept_regularization
cost_from_regularization = cost_from_norm_regularization + cost_from_graph_regularization
cost = cost_from_assessment_ixns + cost_from_temporal_process + cost_from_regularization
return cost, gradient
def with_scipy_with_prereqs(
param_vals,
param_shapes,
assessment_interactions,
lesson_interactions,
learning_update_variance,
forgetting_penalty_terms,
regularization_constant,
graph_regularization_constant,
student_participation_in_assessment_ixns,
student_bias_participation_in_assessment_ixns,
assessment_participation_in_assessment_ixns,
curr_student_participation_in_lesson_ixns,
prev_student_participation_in_lesson_ixns,
lesson_participation_in_lesson_ixns,
assessment_participation_in_concepts,
lesson_participation_in_concepts,
concept_participation_in_assessments,
concept_participation_in_lessons,
prereq_edge_concept_idxes,
concept_participation_in_prereq_edges,
last_student_embedding_idx,
last_assessment_embedding_idx,
last_lesson_embedding_idx,
last_prereq_embedding_idx,
last_student_bias_idx,
last_assessment_bias_idx,
num_timesteps,
using_bias,
using_graph_prior,
using_l1_regularizer,
gradient):
"""
Compute the gradient of the cost function with respect to model parameters
:param np.array param_vals:
Flattened, concatenated parameter values
:param dict[str,tuple] param_shapes:
A dictionary mapping a parameter's name to the shape of its np.ndarray
:param (np.array,np.array,np.array) assessment_interactions:
For each assessment interaction, (student_idx, assessment_idx, outcome),
where outcome is -1 or 1
:param (np.array,np.array,np.array) lesson_interactions:
For each lesson interaction, (student_idx, lesson_idx, time_since_previous_interaction)
:param np.array|float learning_update_variance:
Variance of the Gaussian learning update. If float, then the variance
is constant across all interactions. If np.array, then the variance is
different for each lesson interaction.
:param np.array|float forgetting_penalty_terms:
Penalty term for the forgetting effect in the Gaussian learning update.
If float, then the penalty term is constant across all interactions. If
np.array, then the penalty is different for each lesson interaction.
:param (float,float,float,float,float) regularization_constant:
Coefficients of the regularization terms for (students, assessments,
lessons, prereqs, concepts)
:param float graph_regularization_constant:
Coefficient of the graph regularization term
:param scipy.sparse.csr_matrix student_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of assessment interactions] where a non-zero entry indicates that the student at a
specific timestep participated in the assessment interaction
:param scipy.sparse.csr_matrix student_bias_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students] X
[number of assessment interactions] where a non-zero entry indicates that the student
participated in the assessment interaction
:param scipy.sparse.csr_matrix assessment_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique assessments] X
[number of assessment interactions] where a non-zero entry indicates that the assessment
participated in the assessment interaction
:param scipy.sparse.csr_matrix curr_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the post-update student state for the lesson interaction
:param scipy.sparse.csr_matrix prev_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the pre-update student state for the lesson interaction
:param scipy.sparse.csr_matrix lesson_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique lessons] X [number of lesson interactions]
where a non-zero entry indicates that the lesson participated in the lesson interaction
:param scipy.sparse.csr_matrix assessment_participation_in_concepts:
A binary matrix of dimensions [number of unique assessments] X [number of unique concepts],
where an entry indicates assessment-concept association. Concept associations for a given
assessment sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix lesson_participation_in_concepts:
A binary matrix of dimensions [number of unique lessons] X [number of unique concepts],
where an entry indicates lesson-concept association. Concept associations for a given
lesson sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix concept_participation_in_assessments:
The transpose of assessment_participation_in_concepts
:param scipy.sparse.csr_matrix concept_participation_in_lessons:
The transpose of lesson_participation_in_lessons
:param (np.array,np.array) prereq_edge_concept_idxes:
(Indices of prereq concepts, Indices of postreq concepts)
:param (scipy.sparse.csr_matrix,scipy.sparse.csr_matrix) concept_participation_in_prereq_edges:
The first binary matrix has dimensions [number of unique concepts] X
[number of prereq edges], where a non-zero entry indicates that the concept is the prereq
in the edge.
The second binary matrix has the same dimensions,
where a non-zero entry indicates that the concept is the postreq in the edge.
:param int last_student_embedding_idx:
Index of the last student embedding parameter in the flattened gradient
:param int last_assessment_embedding_idx:
Index of the last assessment embedding parameter in the flattened gradient
:param int last_lesson_embedding_idx:
Index of the last lesson embedding parameter in the flattened gradient
:param int last_prereq_embedding_idx:
Index of the last prereq embedding parameter in the flattened gradient
:param int last_student_bias_idx:
Index of the last student bias parameter in the flattened gradient
:param int last_assessment_bias_idx:
Index of the last assessment bias parameter in the flattened gradient
:param int num_timesteps:
Maximum number of timesteps in a student history, i.e.,
the output of InteractionHistory.duration()
:param bool using_bias:
Including bias terms in the assessment result likelihood
:param bool using_graph_prior:
Including the graph regularization term
:param bool using_l1_regularizer:
True => use L1 regularization on lesson and assessment embeddings
False => use L2 regularization on lesson and assessment embeddings
:param np.array gradient:
Placeholder for the flattened gradient
:rtype: (float,np.array)
:return:
The value of the cost function
(evaluated at the supplied parameter values),
and the flattened gradient of the cost function
(evaluated at the supplied parameter values)
"""
# pull regularization constants for different parameters out of tuple
(
student_regularization_constant,
assessment_regularization_constant,
lesson_regularization_constant,
prereq_regularization_constant,
concept_regularization_constant) = regularization_constant
# reshape flattened student embeddings into tensor
student_embeddings = np.reshape(
param_vals[:last_student_embedding_idx],
param_shapes[models.STUDENT_EMBEDDINGS])
# reshape flattened assessment embeddings into matrix
assessment_embeddings = np.reshape(
param_vals[last_student_embedding_idx:last_assessment_embedding_idx],
param_shapes[models.ASSESSMENT_EMBEDDINGS])
# reshape flattened lesson embeddings into matrix
lesson_embeddings = np.reshape(
param_vals[last_assessment_embedding_idx:last_lesson_embedding_idx],
param_shapes[models.LESSON_EMBEDDINGS])
# reshape flattened prereq embeddings into matrix
prereq_embeddings = np.reshape(
param_vals[last_lesson_embedding_idx:last_prereq_embedding_idx],
param_shapes[models.PREREQ_EMBEDDINGS])
if using_graph_prior:
# reshape flattened concept embeddings into matrix
concept_embeddings = np.reshape(
param_vals[last_assessment_bias_idx:],
param_shapes[models.CONCEPT_EMBEDDINGS])
# split assessment interactions into students, assessments, outcomes
(
student_idxes_for_assessment_ixns,
assessment_idxes_for_assessment_ixns,
outcomes_for_assessment_ixns) = assessment_interactions
# split lesson interactions into students, lessons
student_idxes_for_lesson_ixns, lesson_idxes_for_lesson_ixns, _ = lesson_interactions
if not using_bias:
# zero out bias terms, so that they definitely have no effect
# on the gradient or cost here. this should be done in addition to
# imposing (0, 0) bounds in the call to scipy.optimize.minimize in est.
param_vals[last_prereq_embedding_idx:last_assessment_bias_idx] = 0
# get biases for assessment interactions
student_biases = np.reshape(
param_vals[last_prereq_embedding_idx:last_student_bias_idx],
param_shapes[models.STUDENT_BIASES])[(
student_idxes_for_assessment_ixns // num_timesteps)][:, None]
assessment_biases = np.reshape(
param_vals[last_student_bias_idx:last_assessment_bias_idx],
param_shapes[models.ASSESSMENT_BIASES])[assessment_idxes_for_assessment_ixns][:, None]
# shape outcomes as a column vector
outcomes = outcomes_for_assessment_ixns[:, None]
# get the assessment embedding for each assessment interaction
assessment_embeddings_for_assessment_ixns = \
assessment_embeddings[assessment_idxes_for_assessment_ixns, :]
# compute the L2 norm of the assessment embedding for each assessment interaction
assessment_embedding_norms_for_assessment_ixns = np.linalg.norm(
assessment_embeddings_for_assessment_ixns, axis=1)[:, None]
# get the student embedding for each assessment interaction
student_embeddings_for_assessment_ixns = (
student_embeddings[student_idxes_for_assessment_ixns, :])
# compute the dot product of the student embedding
# and assessment embedding for each interaction
student_dot_assessment = np.einsum(
'ij, ij->i',
student_embeddings_for_assessment_ixns,
assessment_embeddings_for_assessment_ixns)[:, None]
# compute intermediate quantities for the gradient that get reused
exp_diff = np.exp(outcomes * (
assessment_embedding_norms_for_assessment_ixns - student_dot_assessment / \
assessment_embedding_norms_for_assessment_ixns - student_biases - \
assessment_biases))
one_plus_exp_diff = 1 + exp_diff
mult_diff = outcomes * exp_diff / one_plus_exp_diff
# get lesson embeddings for lesson interactions
lesson_embeddings_for_lesson_ixns = lesson_embeddings[lesson_idxes_for_lesson_ixns, :]
# get lesson prereq embeddings for lesson interactions
prereq_embeddings_for_lesson_ixns = prereq_embeddings[lesson_idxes_for_lesson_ixns, :]
# get embeddings of student states resulting from lesson interactions
curr_student_embeddings_for_lesson_ixns = student_embeddings[student_idxes_for_lesson_ixns, :]
# get embeddings of student states prior to lesson interactions
prev_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_lesson_ixns - 1, :]
# compute the L2 norm of the lesson embedding for each lesson interaction
prereq_embedding_norms_for_lesson_ixns = np.linalg.norm(
prereq_embeddings_for_lesson_ixns, axis=1)[:, None]
# compute the dot product of the student embedding prior
# to the lesson interaction and the lesson prereq embedding,
# for each interaction
prev_student_dot_prereq = np.einsum(
'ij, ij->i',
prev_student_embeddings_for_lesson_ixns,
prereq_embeddings_for_lesson_ixns)[:, None]
# compute intermediate quantities for the gradient that get reused
update_exp_diff = np.exp(
prereq_embedding_norms_for_lesson_ixns - prev_student_dot_prereq / \
prereq_embedding_norms_for_lesson_ixns)
update_one_plus_exp_diff = 1 + update_exp_diff
diffs = curr_student_embeddings_for_lesson_ixns - prev_student_embeddings_for_lesson_ixns - \
lesson_embeddings_for_lesson_ixns / update_one_plus_exp_diff + forgetting_penalty_terms
diffs_over_var = diffs / learning_update_variance
update_mult_diff = np.einsum(
'ij, ij->i',
diffs_over_var,
lesson_embeddings_for_lesson_ixns)[:, None] * update_exp_diff / (
np.einsum('ij, ij->ij',
update_one_plus_exp_diff,
update_one_plus_exp_diff) * prereq_embedding_norms_for_lesson_ixns)
if using_graph_prior:
# get distance from an assessment embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the assessment's
# governing concepts
assessment_diffs_from_concept_centers = assessment_embeddings - \
assessment_participation_in_concepts.dot(concept_embeddings)
# get distance from a lesson embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the lesson's
# governing concepts
lesson_diffs_from_concept_centers = lesson_embeddings - \
lesson_participation_in_concepts.dot(concept_embeddings)
# grab the concept dependency graph
prereq_concept_idxes, postreq_concept_idxes = prereq_edge_concept_idxes
concept_participation_in_prereqs, concept_participation_in_postreqs = \
concept_participation_in_prereq_edges
# get prereq and postreq concept embeddings
prereq_concept_embeddings = concept_embeddings[prereq_concept_idxes, :]
postreq_concept_embeddings = concept_embeddings[postreq_concept_idxes, :]
# compute column vector of L2 norms for postreq concept embeddings
postreq_concept_norms = np.linalg.norm(postreq_concept_embeddings, axis=1)[:, None]
# compute the dot product of the prereq concept embedding
# and postreq concept embedding for each edge in the concept dependency graph
prereq_dot_postreq = np.einsum(
'ij, ij->i',
prereq_concept_embeddings,
postreq_concept_embeddings)[:, None]
# intermediate quantity, useful and reusable later
prereq_edge_diffs = prereq_dot_postreq / postreq_concept_norms - postreq_concept_norms
# compute the gradient w.r.t. student embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
stud_grad_from_asmt_ixns = -student_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * \
assessment_embeddings_for_assessment_ixns)
stud_grad_from_lesson_ixns = curr_student_participation_in_lesson_ixns.dot(
diffs_over_var) - prev_student_participation_in_lesson_ixns.dot(
update_mult_diff * prereq_embeddings_for_lesson_ixns + diffs_over_var)
stud_grad_from_norm_regularization = 2 * student_regularization_constant * student_embeddings
gradient[:last_student_embedding_idx] = (
stud_grad_from_asmt_ixns + stud_grad_from_lesson_ixns + \
stud_grad_from_norm_regularization).ravel()
# compute the gradient w.r.t. assessment embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
asmt_grad_from_asmt_ixns = -assessment_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * (
student_embeddings_for_assessment_ixns - assessment_embeddings_for_assessment_ixns\
- student_dot_assessment / np.einsum(
'ij, ij->ij',
assessment_embedding_norms_for_assessment_ixns,
assessment_embedding_norms_for_assessment_ixns) * \
assessment_embeddings_for_assessment_ixns))
if using_graph_prior:
asmt_grad_from_graph_regularization = 2 * graph_regularization_constant * \
assessment_diffs_from_concept_centers
else:
asmt_grad_from_graph_regularization = 0
if using_l1_regularizer:
asmt_grad_from_norm_regularization = assessment_regularization_constant * np.sign(
assessment_embeddings)
else:
asmt_grad_from_norm_regularization = 2 * assessment_regularization_constant * \
assessment_embeddings
gradient[last_student_embedding_idx:last_assessment_embedding_idx] = (
asmt_grad_from_asmt_ixns + asmt_grad_from_graph_regularization + \
asmt_grad_from_norm_regularization).ravel()
# compute the gradient w.r.t. lesson embeddings,
# which is the sum of gradient of the log-likelihood of assessment and lesson interactions
# and the gradient of the regularization terms
lesson_grad_from_lesson_ixns = -lesson_participation_in_lesson_ixns.dot(
diffs_over_var / update_one_plus_exp_diff)
if using_graph_prior:
lesson_grad_from_graph_regularization = 2 * graph_regularization_constant * \
lesson_diffs_from_concept_centers
else:
lesson_grad_from_graph_regularization = 0
if using_l1_regularizer:
lesson_grad_from_norm_regularization = lesson_regularization_constant * np.sign(
lesson_embeddings)
else:
lesson_grad_from_norm_regularization = 2 * lesson_regularization_constant * \
lesson_embeddings
gradient[last_assessment_embedding_idx:last_lesson_embedding_idx] = (
lesson_grad_from_lesson_ixns + lesson_grad_from_graph_regularization + \
lesson_grad_from_norm_regularization).ravel()
# compute the gradient w.r.t. prereq embeddings,
# which is the sum of gradient of the log-likelihood of assessment and lesson interactions
# and the gradient of the regularization terms
prereq_grad_from_lesson_ixns = lesson_participation_in_lesson_ixns.dot(
update_mult_diff * (prev_student_dot_prereq / np.einsum(
'ij, ij->ij',
prereq_embedding_norms_for_lesson_ixns,
prereq_embedding_norms_for_lesson_ixns) * \
prereq_embeddings_for_lesson_ixns - \
prev_student_embeddings_for_lesson_ixns + \
prereq_embeddings_for_lesson_ixns))
prereq_grad_from_norm_regularization = 2 * prereq_regularization_constant * prereq_embeddings
gradient[last_lesson_embedding_idx:last_prereq_embedding_idx] = (
prereq_grad_from_lesson_ixns + prereq_grad_from_norm_regularization).ravel()
if using_bias:
# compute the gradient w.r.t. student biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient[last_prereq_embedding_idx:last_student_bias_idx] = \
-student_bias_participation_in_assessment_ixns.dot(mult_diff).ravel()
# compute the gradient w.r.t. assessment biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient[last_student_bias_idx:last_assessment_bias_idx] = \
-assessment_participation_in_assessment_ixns.dot(mult_diff).ravel()
if using_graph_prior:
# compute the gradient w.r.t. concept embeddings,
# which is the sum of gradient of the log-likelihood of assessment
# and lesson interactions and the gradient of the regularization terms
concept_grad_from_assessments = -concept_participation_in_assessments.dot(
2 * assessment_diffs_from_concept_centers)
concept_grad_from_lessons = concept_participation_in_lessons.dot(
2 * lesson_diffs_from_concept_centers)
concept_grad_from_prereqs = concept_participation_in_prereqs.dot(
postreq_concept_embeddings / postreq_concept_norms)
concept_grad_from_postreqs = concept_participation_in_postreqs.dot(
(prereq_concept_embeddings - 2 * postreq_concept_embeddings) / \
postreq_concept_norms - 2 * prereq_dot_postreq * postreq_concept_embeddings / \
postreq_concept_norms**3)
gradient[last_assessment_bias_idx:] = graph_regularization_constant * (
concept_grad_from_assessments + concept_grad_from_lessons + concept_grad_from_prereqs +
concept_grad_from_postreqs).ravel()
cost_from_assessment_ixns = np.einsum('ij->', np.log(one_plus_exp_diff))
cost_from_lesson_ixns = np.einsum('ij, ij', diffs, diffs) / (2 * learning_update_variance)
cost_from_student_regularization = student_regularization_constant * np.einsum(
'ij, ij', student_embeddings, student_embeddings)
if using_l1_regularizer:
cost_from_assessment_regularization = assessment_regularization_constant * np.absolute(
assessment_embeddings).sum()
cost_from_lesson_regularization = lesson_regularization_constant * np.absolute(
lesson_embeddings).sum()
else:
cost_from_assessment_regularization = assessment_regularization_constant * np.einsum(
'ij, ij', assessment_embeddings, assessment_embeddings)
cost_from_lesson_regularization = lesson_regularization_constant * np.einsum(
'ij, ij', lesson_embeddings, lesson_embeddings)
cost_from_prereq_regularization = prereq_regularization_constant * np.einsum(
'ij, ij', prereq_embeddings, prereq_embeddings)
if using_graph_prior:
cost_from_concept_regularization = concept_regularization_constant * np.einsum(
'ij, ij', concept_embeddings, concept_embeddings)
cost_from_graph_regularization = graph_regularization_constant * ((
assessment_diffs_from_concept_centers**2).sum() + (
lesson_diffs_from_concept_centers**2).sum() + prereq_edge_diffs.sum())
else:
cost_from_concept_regularization = 0
cost_from_graph_regularization = 0
cost_from_norm_regularization = cost_from_student_regularization + \
cost_from_assessment_regularization + cost_from_lesson_regularization + \
cost_from_prereq_regularization + cost_from_concept_regularization
cost_from_ixns = cost_from_assessment_ixns + cost_from_lesson_ixns
cost_from_regularization = cost_from_norm_regularization + cost_from_graph_regularization
cost = cost_from_ixns + cost_from_regularization
return cost, gradient
def with_scipy_without_prereqs(
param_vals,
param_shapes,
assessment_interactions,
lesson_interactions,
learning_update_variance,
forgetting_penalty_terms,
regularization_constant,
graph_regularization_constant,
student_participation_in_assessment_ixns,
student_bias_participation_in_assessment_ixns,
assessment_participation_in_assessment_ixns,
curr_student_participation_in_lesson_ixns,
prev_student_participation_in_lesson_ixns,
lesson_participation_in_lesson_ixns,
assessment_participation_in_concepts,
lesson_participation_in_concepts,
concept_participation_in_assessments,
concept_participation_in_lessons,
prereq_edge_concept_idxes,
concept_participation_in_prereq_edges,
last_student_embedding_idx,
last_assessment_embedding_idx,
last_lesson_embedding_idx,
last_prereq_embedding_idx,
last_student_bias_idx,
last_assessment_bias_idx,
num_timesteps,
using_bias,
using_graph_prior,
using_l1_regularizer,
gradient):
"""
Compute the gradient of the cost function with respect to model parameters
:param np.array param_vals:
Flattened, concatenated parameter values
:param dict[str,tuple] param_shapes:
A dictionary mapping a parameter's name to the shape of its np.ndarray
:param (np.array,np.array,np.array) assessment_interactions:
For each assessment interaction, (student_idx, assessment_idx, outcome),
where outcome is -1 or 1
:param (np.array,np.array,np.array) lesson_interactions:
For each lesson interaction, (student_idx, lesson_idx, time_since_previous_interaction)
:param np.array|float learning_update_variance:
Variance of the Gaussian learning update. If float, then the variance
is constant across all interactions. If np.array, then the variance is
different for each lesson interaction.
:param np.array|float forgetting_penalty_terms:
Penalty term for the forgetting effect in the Gaussian learning update.
If float, then the penalty term is constant across all interactions. If
np.array, then the penalty is different for each lesson interaction.
:param (float,float,float,float,float) regularization_constant:
Coefficients of the regularization terms for (students, assessments,
lessons, prereqs, concepts)
:param float graph_regularization_constant:
Coefficient of the graph regularization term
:param scipy.sparse.csr_matrix student_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of assessment interactions] where a non-zero entry indicates that the student at a
specific timestep participated in the assessment interaction
:param scipy.sparse.csr_matrix student_bias_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique students] X [number of assessment
interactions] where a non-zero entry indicates that the student participated in the
assessment interaction
:param scipy.sparse.csr_matrix assessment_participation_in_assessment_ixns:
A binary matrix of dimensions [number of unique assessments] X [number of assessment
interactions] where a non-zero entry indicates that the assessment participated in the
assessment interaction
:param scipy.sparse.csr_matrix curr_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the post-update student state for the lesson interaction
:param scipy.sparse.csr_matrix prev_student_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique students * number of timesteps] X
[number of lesson interactions] where a non-zero entry indicates that the student at a
specific timestep was the pre-update student state for the lesson interaction
:param scipy.sparse.csr_matrix lesson_participation_in_lesson_ixns:
A binary matrix of dimensions [number of unique lessons] X [number of lesson interactions]
where a non-zero entry indicates that the lesson participated in the lesson interaction
:param scipy.sparse.csr_matrix assessment_participation_in_concepts:
A binary matrix of dimensions [number of unique assessments] X [number of unique concepts],
where an entry indicates assessment-concept association. Concept associations for a given
assessment sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix lesson_participation_in_concepts:
A binary matrix of dimensions [number of unique lessons] X [number of unique concepts],
where an entry indicates lesson-concept association. Concept associations for a given
lesson sum to one, i.e., each row sums to one.
:param scipy.sparse.csr_matrix concept_participation_in_assessments:
The transpose of assessment_participation_in_concepts
:param scipy.sparse.csr_matrix concept_participation_in_lessons:
The transpose of lesson_participation_in_lessons
:param (np.array,np.array) prereq_edge_concept_idxes:
(Indices of prereq concepts, Indices of postreq concepts)
:param (scipy.sparse.csr_matrix,scipy.sparse.csr_matrix) concept_participation_in_prereq_edges:
The first binary matrix has dimensions [number of unique concepts] X
[number of prereq edges], where a non-zero entry indicates that the concept is the prereq
in the edge.
The second binary matrix has the same dimensions,
where a non-zero entry indicates that the concept is the postreq in the edge.
:param int last_student_embedding_idx:
Index of the last student embedding parameter in the flattened gradient
:param int last_assessment_embedding_idx:
Index of the last assessment embedding parameter in the flattened gradient
:param int last_lesson_embedding_idx:
Index of the last lesson embedding parameter in the flattened gradient
:param int last_prereq_embedding_idx:
Index of the last prereq embedding parameter in the flattened gradient
:param int last_student_bias_idx:
Index of the last student bias parameter in the flattened gradient
:param int last_assessment_bias_idx:
Index of the last assessment bias parameter in the flattened gradient
:param int num_timesteps:
Maximum number of timesteps in a student history, i.e.,
the output of InteractionHistory.duration()
:param bool using_bias:
Including bias terms in the assessment result likelihood
:param bool using_graph_prior:
Including the graph regularization term
:param bool using_l1_regularizer:
True => use L1 regularization on lesson and assessment embeddings
False => use L2 regularization on lesson and assessment embeddings
:param np.array gradient:
Placeholder for the flattened gradient
:rtype: (float,np.array)
:return:
The value of the cost function
(evaluated at the supplied parameter values),
and the flattened gradient of the cost function
(evaluated at the supplied parameter values)
"""
# pull regularization constants for different parameters out of tuple
(
student_regularization_constant,
assessment_regularization_constant,
lesson_regularization_constant,
prereq_regularization_constant,
concept_regularization_constant) = regularization_constant
# reshape flattened student embeddings into tensor
student_embeddings = np.reshape(
param_vals[:last_student_embedding_idx],
param_shapes[models.STUDENT_EMBEDDINGS])
# reshape flattened assessment embeddings into matrix
assessment_embeddings = np.reshape(
param_vals[last_student_embedding_idx:last_assessment_embedding_idx],
param_shapes[models.ASSESSMENT_EMBEDDINGS])
# reshape flattened lesson embeddings into matrix
lesson_embeddings = np.reshape(
param_vals[last_assessment_embedding_idx:last_lesson_embedding_idx],
param_shapes[models.LESSON_EMBEDDINGS])
if using_graph_prior:
# reshape flattened concept embeddings into matrix
concept_embeddings = np.reshape(
param_vals[last_assessment_bias_idx:],
param_shapes[models.CONCEPT_EMBEDDINGS])
# split assessment interactions into students, assessments, outcomes
(
student_idxes_for_assessment_ixns,
assessment_idxes_for_assessment_ixns,
outcomes_for_assessment_ixns) = assessment_interactions
# split lesson interactions into students, lessons
student_idxes_for_lesson_ixns, lesson_idxes_for_lesson_ixns, _ = lesson_interactions
if not using_bias:
# zero out bias terms, so that they definitely have no effect
# on the gradient or cost here. this should be done in addition to
# imposing (0, 0) bounds in the call to scipy.optimize.minimize in est.
param_vals[last_lesson_embedding_idx:last_assessment_bias_idx] = 0
# get biases for assessment interactions
student_biases = np.reshape(
param_vals[last_lesson_embedding_idx:last_student_bias_idx],
param_shapes[models.STUDENT_BIASES])[(
student_idxes_for_assessment_ixns // num_timesteps)][:, None]
assessment_biases = np.reshape(
param_vals[last_student_bias_idx:last_assessment_bias_idx],
param_shapes[models.ASSESSMENT_BIASES])[(
assessment_idxes_for_assessment_ixns)][:, None]
# shape outcomes as a column vector
outcomes = outcomes_for_assessment_ixns[:, None]
# get the assessment embedding for each assessment interaction
assessment_embeddings_for_assessment_ixns = \
assessment_embeddings[assessment_idxes_for_assessment_ixns, :]
# compute the L2 norm of the assessment embedding for each assessment interaction
assessment_embedding_norms_for_assessment_ixns = np.linalg.norm(
assessment_embeddings_for_assessment_ixns, axis=1)[:, None]
# get the student embedding for each assessment interaction
student_embeddings_for_assessment_ixns = \
student_embeddings[student_idxes_for_assessment_ixns, :]
# compute the dot product of the student embedding
# and assessment embedding for each interaction
student_dot_assessment = np.einsum(
'ij, ij->i',
student_embeddings_for_assessment_ixns,
assessment_embeddings_for_assessment_ixns)[:, None]
# compute intermediate quantities for the gradient that get reused
exp_diff = np.exp(outcomes * (
assessment_embedding_norms_for_assessment_ixns - student_dot_assessment / \
assessment_embedding_norms_for_assessment_ixns - student_biases - \
assessment_biases))
one_plus_exp_diff = 1 + exp_diff
mult_diff = outcomes * exp_diff / one_plus_exp_diff
# get lesson embeddings for lesson interactions
lesson_embeddings_for_lesson_ixns = lesson_embeddings[lesson_idxes_for_lesson_ixns, :]
# get embeddings of student states resulting from lesson interactions
curr_student_embeddings_for_lesson_ixns = student_embeddings[student_idxes_for_lesson_ixns, :]
# get embeddings of student states prior to lesson interactions
prev_student_embeddings_for_lesson_ixns = \
student_embeddings[student_idxes_for_lesson_ixns - 1, :]
# compute intermediate quantities for the gradient that get reused
diffs = curr_student_embeddings_for_lesson_ixns - (
prev_student_embeddings_for_lesson_ixns) - (
lesson_embeddings_for_lesson_ixns) + forgetting_penalty_terms
diffs_over_var = diffs / learning_update_variance
if using_graph_prior:
# get distance from an assessment embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the assessment's
# governing concepts
assessment_diffs_from_concept_centers = assessment_embeddings - \
assessment_participation_in_concepts.dot(concept_embeddings)
# get distance from a lesson embedding to its prior embedding,
# i.e., the weighted average of the embeddings of the lesson's
# governing concepts
lesson_diffs_from_concept_centers = (
lesson_embeddings) - lesson_participation_in_concepts.dot(
concept_embeddings)
# grab the concept dependency graph
prereq_concept_idxes, postreq_concept_idxes = prereq_edge_concept_idxes
concept_participation_in_prereqs, concept_participation_in_postreqs = \
concept_participation_in_prereq_edges
# get prereq and postreq concept embeddings
prereq_concept_embeddings = concept_embeddings[prereq_concept_idxes, :]
postreq_concept_embeddings = concept_embeddings[postreq_concept_idxes, :]
# compute column vector of L2 norms for postreq concept embeddings
postreq_concept_norms = np.linalg.norm(postreq_concept_embeddings, axis=1)[:, None]
# compute the dot product of the prereq concept embedding
# and postreq concept embedding for each edge in the concept dependency graph
prereq_dot_postreq = np.einsum(
'ij, ij->i',
prereq_concept_embeddings,
postreq_concept_embeddings)[:, None]
# intermediate quantity, useful and reusable later
prereq_edge_diffs = prereq_dot_postreq / postreq_concept_norms - postreq_concept_norms
# compute the gradient w.r.t. student embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
stud_grad_from_asmt_ixns = -student_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * \
assessment_embeddings_for_assessment_ixns)
stud_grad_from_lesson_ixns = curr_student_participation_in_lesson_ixns.dot(diffs_over_var)
stud_grad_from_norm_regularization = 2 * student_regularization_constant * student_embeddings
gradient[:last_student_embedding_idx] = (
stud_grad_from_asmt_ixns + stud_grad_from_lesson_ixns + \
stud_grad_from_norm_regularization).ravel()
# compute the gradient w.r.t. assessment embeddings,
# which is the sum of gradient of the log-likelihood of assessment interactions
# and the gradient of the regularization terms
asmt_grad_from_asmt_ixns = -assessment_participation_in_assessment_ixns.dot(
mult_diff / assessment_embedding_norms_for_assessment_ixns * (
student_embeddings_for_assessment_ixns - assessment_embeddings_for_assessment_ixns - \
student_dot_assessment / np.einsum(
'ij, ij->ij',
assessment_embedding_norms_for_assessment_ixns,
assessment_embedding_norms_for_assessment_ixns) * \
assessment_embeddings_for_assessment_ixns))
if using_graph_prior:
asmt_grad_from_graph_regularization = 2 * graph_regularization_constant * \
assessment_diffs_from_concept_centers
else:
asmt_grad_from_graph_regularization = 0
if using_l1_regularizer:
asmt_grad_from_norm_regularization = assessment_regularization_constant * np.sign(
assessment_embeddings)
else:
asmt_grad_from_norm_regularization = 2 * assessment_regularization_constant * \
assessment_embeddings
gradient[last_student_embedding_idx:last_assessment_embedding_idx] = (
asmt_grad_from_asmt_ixns + asmt_grad_from_graph_regularization + \
asmt_grad_from_norm_regularization).ravel()
# compute the gradient w.r.t. lesson embeddings,
# which is the sum of gradient of the log-likelihood of assessment and lesson interactions
# and the gradient of the regularization terms
lesson_grad_from_lesson_ixns = -lesson_participation_in_lesson_ixns.dot(diffs_over_var)
if using_graph_prior:
lesson_grad_from_graph_regularization = 2 * graph_regularization_constant * \
lesson_diffs_from_concept_centers
else:
lesson_grad_from_graph_regularization = 0
if using_l1_regularizer:
lesson_grad_from_norm_regularization = lesson_regularization_constant * np.sign(
lesson_embeddings)
else:
lesson_grad_from_norm_regularization = 2 * lesson_regularization_constant * \
lesson_embeddings
gradient[last_assessment_embedding_idx:last_lesson_embedding_idx] = (
lesson_grad_from_lesson_ixns + lesson_grad_from_graph_regularization + \
lesson_grad_from_norm_regularization).ravel()
if using_bias:
# compute the gradient w.r.t. student biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient[last_lesson_embedding_idx:last_student_bias_idx] = \
-student_bias_participation_in_assessment_ixns.dot(mult_diff).ravel()
# compute the gradient w.r.t. assessment biases,
# which is the sum of gradient of the log-likelihood of assessment interactions
gradient[last_student_bias_idx:last_assessment_bias_idx] = \
-assessment_participation_in_assessment_ixns.dot(mult_diff).ravel()
if using_graph_prior:
# compute the gradient w.r.t. concept embeddings,
# which is the sum of gradient of the log-likelihood of assessment
# and lesson interactions and the gradient of the regularization terms
concept_grad_from_assessments = -concept_participation_in_assessments.dot(
2 * assessment_diffs_from_concept_centers)
concept_grad_from_lessons = concept_participation_in_lessons.dot(
2 * lesson_diffs_from_concept_centers)
concept_grad_from_prereqs = concept_participation_in_prereqs.dot(
postreq_concept_embeddings / postreq_concept_norms)
concept_grad_from_postreqs = concept_participation_in_postreqs.dot(
(prereq_concept_embeddings - 2 * postreq_concept_embeddings) / postreq_concept_norms \
- 2 * prereq_dot_postreq * postreq_concept_embeddings / \
postreq_concept_norms**3)
concept_grad_from_norm_regularization = 2 * concept_regularization_constant * \
concept_embeddings
gradient[last_assessment_bias_idx:] = (graph_regularization_constant * (
concept_grad_from_assessments + concept_grad_from_lessons + \
concept_grad_from_prereqs + concept_grad_from_postreqs) + \
concept_grad_from_norm_regularization).ravel()
cost_from_assessment_ixns = np.einsum('ij->', np.log(one_plus_exp_diff))
cost_from_lesson_ixns = np.einsum('ij, ij', diffs, diffs) / (2 * learning_update_variance)
cost_from_student_regularization = student_regularization_constant * np.einsum(
'ij, ij', student_embeddings, student_embeddings)
if using_l1_regularizer:
cost_from_assessment_regularization = assessment_regularization_constant * np.absolute(
assessment_embeddings).sum()
cost_from_lesson_regularization = lesson_regularization_constant * np.absolute(
lesson_embeddings).sum()
else:
cost_from_assessment_regularization = assessment_regularization_constant * np.einsum(
'ij, ij', assessment_embeddings, assessment_embeddings)
cost_from_lesson_regularization = lesson_regularization_constant * np.einsum(
'ij, ij', lesson_embeddings, lesson_embeddings)
if using_graph_prior:
cost_from_concept_regularization = concept_regularization_constant * np.einsum(
'ij, ij', concept_embeddings, concept_embeddings)
cost_from_graph_regularization = graph_regularization_constant * ((
assessment_diffs_from_concept_centers**2).sum() + (
lesson_diffs_from_concept_centers**2).sum() + prereq_edge_diffs.sum())
else:
cost_from_concept_regularization = 0
cost_from_graph_regularization = 0
cost_from_norm_regularization = cost_from_student_regularization + \
cost_from_assessment_regularization + cost_from_lesson_regularization + \
cost_from_concept_regularization
cost_from_ixns = cost_from_assessment_ixns + cost_from_lesson_ixns
cost_from_regularization = cost_from_norm_regularization + cost_from_graph_regularization
cost = cost_from_ixns + cost_from_regularization
return cost, gradient
def get_grad(
using_scipy=True,
using_lessons=True,
using_prereqs=True):
"""
Select the appropriate gradient and cost function evaluator
for a model configuration
:param bool using_scipy: Using scipy.optimize.minize for optimization
:param bool using_lessons: Including lessons in the embedding model
:param bool using_prereqs: Including lesson prereqs in the embedding model
:rtype: function
:return: A function that takes current parameter values
as input, and outputs the gradient of the cost function
with respect to those parameters
"""
if using_scipy:
if using_lessons:
if using_prereqs:
return with_scipy_with_prereqs
else:
return with_scipy_without_prereqs
else:
return with_scipy_without_lessons
else:
if using_lessons:
if using_prereqs:
return without_scipy_with_prereqs
else:
return without_scipy_without_prereqs
else:
return without_scipy_without_lessons
| 49.470252
| 99
| 0.725721
| 15,450
| 129,711
| 5.746926
| 0.019417
| 0.039194
| 0.027571
| 0.016218
| 0.989064
| 0.988264
| 0.984751
| 0.983467
| 0.98243
| 0.980921
| 0
| 0.002193
| 0.230027
| 129,711
| 2,621
| 100
| 49.489126
| 0.886827
| 0.414475
| 0
| 0.914489
| 0
| 0
| 0.006135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007918
| false
| 0
| 0.003167
| 0
| 0.022961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
da3553b92690dd94a9549c902321266b55322de7
| 81
|
py
|
Python
|
tests/__init__.py
|
inexio/pysnmpcrypto
|
85380d33cabcee4aee5da53927708fe8b894a643
|
[
"BSD-2-Clause"
] | 2
|
2018-12-18T07:21:30.000Z
|
2020-11-19T19:46:17.000Z
|
tests/__init__.py
|
inexio/pysnmpcrypto
|
85380d33cabcee4aee5da53927708fe8b894a643
|
[
"BSD-2-Clause"
] | 3
|
2018-07-04T06:50:42.000Z
|
2019-02-26T07:49:09.000Z
|
tests/__init__.py
|
inexio/pysnmpcrypto
|
85380d33cabcee4aee5da53927708fe8b894a643
|
[
"BSD-2-Clause"
] | 3
|
2020-12-22T11:27:54.000Z
|
2021-11-12T12:15:39.000Z
|
from tests import test_aes
from tests import test_des
from tests import test_des3
| 27
| 27
| 0.864198
| 15
| 81
| 4.466667
| 0.466667
| 0.402985
| 0.671642
| 0.850746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 0.135802
| 81
| 3
| 27
| 27
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
da41808ec856b0db32cbd1bb32f6a2ee25dffad6
| 9,220
|
py
|
Python
|
molecule/default/tests/test_section_06.py
|
hanyhesham/cis_ubuntu_2004
|
43042c6c212704c534499b250b2a2cfdd0399c64
|
[
"MIT"
] | null | null | null |
molecule/default/tests/test_section_06.py
|
hanyhesham/cis_ubuntu_2004
|
43042c6c212704c534499b250b2a2cfdd0399c64
|
[
"MIT"
] | null | null | null |
molecule/default/tests/test_section_06.py
|
hanyhesham/cis_ubuntu_2004
|
43042c6c212704c534499b250b2a2cfdd0399c64
|
[
"MIT"
] | null | null | null |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
AUDIT_SYSTEM_SCRIPT = "/usr/local/bin/6_1_1_cis_audit_system.sh"
ETC_PASSWD = "/etc/passwd"
ETC_GSHADOW_DASH = "/etc/gshadow-"
ETC_SHADOW = "/etc/shadow"
ETC_GROUP = "/etc/group"
ETC_PASSWD_DASH = "/etc/passwd-"
ETC_SHADOW_DASH = "/etc/shadow-"
ETC_GROUP_DASH = "/etc/group-"
ETC_GSHADOW = "/etc/gshadow"
def test_6_1_1_script_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.1
Tests if /usr/local/bin/6_1_1_cis_audit_system.sh file exists
"""
assert host.file(AUDIT_SYSTEM_SCRIPT).exists
def test_6_1_1_script_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.1
Tests if /usr/local/bin/6_1_1_cis_audit_system.sh is a file
"""
assert host.file(AUDIT_SYSTEM_SCRIPT).is_file
def test_6_1_1_script_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.1
Tests if /usr/local/bin/6_1_1_cis_audit_system.sh has 0744 mode
"""
assert host.file(AUDIT_SYSTEM_SCRIPT).mode == 0o744
def test_6_1_1_script_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.1
Tests if /usr/local/bin/6_1_1_cis_audit_system.sh is owned by user root
"""
assert host.file(AUDIT_SYSTEM_SCRIPT).user == 'root'
def test_6_1_1_script_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.1
Tests if /usr/local/bin/6_1_1_cis_audit_system.sh is owned by group root
"""
assert host.file(AUDIT_SYSTEM_SCRIPT).group == 'root'
def test_6_1_2_etc_passwd_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.2
Tests if /etc/passwd file exists
"""
assert host.file(ETC_PASSWD).exists
def test_6_1_2_etc_passwd_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.2
Tests if /etc/passwd is a file
"""
assert host.file(ETC_PASSWD).is_file
def test_6_1_2_etc_passwd_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.2
Tests if /etc/passwd has 0644 mode
"""
assert host.file(ETC_PASSWD).mode == 0o644
def test_6_1_2_etc_passwd_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.2
Tests if /etc/passwd is owned by user root
"""
assert host.file(ETC_PASSWD).user == 'root'
def test_6_1_2_etc_passwd_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.2
Tests if /etc/passwd is owned by group root
"""
assert host.file(ETC_PASSWD).group == 'root'
def test_6_1_3_etc_gshadow_dash_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.3
Tests if /etc/gshadow- file exists
"""
assert host.file(ETC_GSHADOW_DASH).exists
def test_6_1_3_etc_gshadow_dash_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.3
Tests if /etc/gshadow- is a file
"""
assert host.file(ETC_GSHADOW_DASH).is_file
def test_6_1_3_etc_gshadow_dash_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.3
Tests if /etc/gshadow- has 0640 mode
"""
assert host.file(ETC_GSHADOW_DASH).mode == 0o640
def test_6_1_3_etc_gshadow_dash_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.3
Tests if /etc/gshadow- is owned by user root
"""
assert host.file(ETC_GSHADOW_DASH).user == 'root'
def test_6_1_3_etc_gshadow_dash_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.3
Tests if /etc/gshadow- is owned by group root
"""
assert host.file(ETC_GSHADOW_DASH).group == 'root'
def test_6_1_4_etc_shadow_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.4
Tests if /etc/shadow file exists
"""
assert host.file(ETC_SHADOW).exists
def test_6_1_4_etc_shadow_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.4
Tests if /etc/shadow is a file
"""
assert host.file(ETC_SHADOW).is_file
def test_6_1_4_etc_shadow_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.4
Tests if /etc/shadow has 0640 mode
"""
assert host.file(ETC_SHADOW).mode == 0o640
def test_6_1_4_etc_shadow_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.4
Tests if /etc/shadow is owned by user root
"""
assert host.file(ETC_SHADOW).user == 'root'
def test_6_1_4_etc_shadow_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.4
Tests if /etc/shadow is owned by group root
"""
assert host.file(ETC_SHADOW).group == 'root'
def test_6_1_5_etc_group_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.5
Tests if /etc/group file exists
"""
assert host.file(ETC_GROUP).exists
def test_6_1_5_etc_group_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.5
Tests if /etc/group is a file
"""
assert host.file(ETC_GROUP).is_file
def test_6_1_5_etc_group_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.5
Tests if /etc/group has 0644 mode
"""
assert host.file(ETC_GROUP).mode == 0o644
def test_6_1_5_etc_group_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.5
Tests if /etc/group is owned by user root
"""
assert host.file(ETC_GROUP).user == 'root'
def test_6_1_5_etc_group_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.5
Tests if /etc/group is owned by group root
"""
assert host.file(ETC_GROUP).group == 'root'
def test_6_1_6_etc_group_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.6
Tests if /etc/passwd- file exists
"""
assert host.file(ETC_PASSWD_DASH).exists
def test_6_1_6_etc_group_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.6
Tests if /etc/passwd- is a file
"""
assert host.file(ETC_PASSWD_DASH).is_file
def test_6_1_6_etc_group_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.6
Tests if /etc/passwd- has 0644 mode
"""
assert host.file(ETC_PASSWD_DASH).mode == 0o644
def test_6_1_6_etc_group_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.6
Tests if /etc/passwd- is owned by user root
"""
assert host.file(ETC_PASSWD_DASH).user == 'root'
def test_6_1_6_etc_group_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.6
Tests if /etc/passwd- is owned by group root
"""
assert host.file(ETC_PASSWD_DASH).group == 'root'
def test_6_1_7_etc_shadow_dash_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.7
Tests if /etc/shadow- file exists
"""
assert host.file(ETC_SHADOW_DASH).exists
def test_6_1_7_etc_shadow_dash_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.7
Tests if /etc/shadow- is a file
"""
assert host.file(ETC_SHADOW_DASH).is_file
def test_6_1_7_etc_shadow_dash_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.7
Tests if /etc/shadow- has 0640 mode
"""
assert host.file(ETC_SHADOW_DASH).mode == 0o640
def test_6_1_7_etc_shadow_dash_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.7
Tests if /etc/shadow- is owned by user root
"""
assert host.file(ETC_SHADOW_DASH).user == 'root'
def test_6_1_7_etc_shadow_dash_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.7
Tests if /etc/shadow- is owned by group root
"""
assert host.file(ETC_SHADOW_DASH).group == 'root'
def test_6_1_8_etc_group_dash_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.8
Tests if /etc/group- file exists
"""
assert host.file(ETC_GROUP_DASH).exists
def test_6_1_8_etc_group_dash_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.8
Tests if /etc/group- is a file
"""
assert host.file(ETC_GROUP_DASH).is_file
def test_6_1_8_etc_group_dash_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.8
Tests if /etc/group- has 0644 mode
"""
assert host.file(ETC_GROUP_DASH).mode == 0o644
def test_6_1_8_etc_group_dash_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.8
Tests if /etc/group- is owned by user root
"""
assert host.file(ETC_GROUP_DASH).user == 'root'
def test_6_1_8_etc_group_dash_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.8
Tests if /etc/group- is owned by group root
"""
assert host.file(ETC_GROUP_DASH).group == 'root'
def test_6_1_9_etc_gshadow_exists(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.9
Tests if /etc/gshadow file exists
"""
assert host.file(ETC_GSHADOW).exists
def test_6_1_9_etc_gshadow_isfile(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.9
Tests if /etc/gshadow is a file
"""
assert host.file(ETC_GSHADOW).is_file
def test_6_1_9_etc_gshadow_mode(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.9
Tests if /etc/gshadow has 0640 mode
"""
assert host.file(ETC_GSHADOW).mode == 0o640
def test_6_1_9_etc_gshadow_user(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.9
Tests if /etc/gshadow is owned by user root
"""
assert host.file(ETC_GSHADOW).user == 'root'
def test_6_1_9_etc_gshadow_group(host):
"""
CIS Ubuntu 20.04 v1.0.0 - Rule # 6.1.9
Tests if /etc/gshadow is owned by group root
"""
assert host.file(ETC_GSHADOW).group == 'root'
| 24.391534
| 76
| 0.653037
| 1,694
| 9,220
| 3.312869
| 0.038371
| 0.034212
| 0.064148
| 0.072167
| 0.939059
| 0.931932
| 0.873485
| 0.79722
| 0.723806
| 0.699216
| 0
| 0.093672
| 0.218438
| 9,220
| 377
| 77
| 24.456233
| 0.685124
| 0.389696
| 0
| 0
| 0
| 0
| 0.048208
| 0.013205
| 0
| 0
| 0
| 0
| 0.436893
| 1
| 0.436893
| false
| 0.165049
| 0.019417
| 0
| 0.456311
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
e5a2d6a80f51dc6e78dc2f78a5440c6474661bdd
| 41,391
|
py
|
Python
|
tests/unit_tests/test_tethys_cli/test_services_commands.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 79
|
2015-10-05T13:13:28.000Z
|
2022-02-01T12:30:33.000Z
|
tests/unit_tests/test_tethys_cli/test_services_commands.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 542
|
2015-08-12T22:11:32.000Z
|
2022-03-29T22:18:08.000Z
|
tests/unit_tests/test_tethys_cli/test_services_commands.py
|
msouff/tethys
|
45795d1e6561d5db8fddd838f4d1ae1d91dbb837
|
[
"BSD-2-Clause"
] | 71
|
2016-01-16T01:03:41.000Z
|
2022-03-31T17:55:54.000Z
|
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # noqa: F401
import unittest
from unittest import mock
from tethys_cli.services_commands import (
services_create_persistent_command,
services_remove_persistent_command,
services_create_spatial_command,
services_remove_spatial_command,
services_list_command,
services_create_dataset_command,
services_remove_dataset_command,
services_create_wps_command,
services_remove_wps_command
)
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
class ServicesCommandsTest(unittest.TestCase):
"""
Tests for tethys_cli.services_commands
"""
# Dictionary used in some of the tests
my_dict = {'id': 'Id_foo', 'name': 'Name_foo', 'host': 'Host_foo', 'port': 'Port_foo', 'endpoint': 'EndPoint_foo',
'public_endpoint': 'PublicEndPoint_bar', 'apikey': 'APIKey_foo'}
def setUp(self):
load_apps_patcher = mock.patch('tethys_cli.services_commands.load_apps')
load_apps_patcher.start()
self.addCleanup(load_apps_patcher.stop)
def tearDown(self):
pass
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_create_persistent_command(self, mock_service, mock_pretty_output):
"""
Test for services_create_persistent_command.
For running the test without any errors or problems.
:param mock_service: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
services_create_persistent_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Persistent Store Service!', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_create_persistent_command_exception_attributeerror(self, mock_service, mock_pretty_output):
"""
Test for services_create_persistent_command.
For running the test with an IndexError exception thrown.
:param mock_service: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = AttributeError
services_create_persistent_command(mock_args)
mock_service.assert_not_called()
mock_service.objects.get().save.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Missing Input Parameters. Please check your input.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_create_persistent_command_exception_indexerror(self, mock_service, mock_pretty_output):
"""
Test for services_create_persistent_command.
For running the test with an IndexError exception thrown.
:param mock_service: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.side_effect = IndexError
services_create_persistent_command(mock_args)
mock_service.assert_called()
mock_service.objects.get().save.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The connection argument (-c) must be of the form', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_create_persistent_command_exception_integrityerror(self, mock_service, mock_pretty_output):
"""
Test for services_create_persistent_command.
For running the test with an IntegrityError exception thrown.
:param mock_service: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.side_effect = IntegrityError
services_create_persistent_command(mock_args)
mock_service.assert_called()
mock_service.objects.get().save.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Persistent Store Service with name', po_call_args[0][0][0])
self.assertIn('already exists. Command aborted.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_remove_persistent_command_Exceptions(self, mock_service, mock_exit, mock_pretty_output):
"""
Test for services_remove_persistent_command
Test for handling all exceptions thrown by the function.
:param mock_service: mock for PersistentStoreService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Persistent Store'
mock_args.force = True
mock_service.objects.get.side_effect = [ValueError, ObjectDoesNotExist]
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, services_remove_persistent_command, mock_args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('A Persistent Store Service with ID/Name', po_call_args[0][0][0])
self.assertIn('does not exist', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_remove_persistent_command_force(self, mock_service, mock_exit, mock_pretty_output):
"""
Test for services_remove_persistent_command
Test for forcing a delete of the service
:param mock_service: mock for PersistentStoreService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.force = True
mock_service.__str__.return_value = 'Persistent Store'
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, services_remove_persistent_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Persistent Store Service', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_remove_persistent_command_no_proceed_invalid_char(self, mock_service, mock_exit,
mock_pretty_output, mock_input):
"""
Test for services_remove_persistent_command
Handles answering the prompt to delete with invalid characters, and answering no.
:param mock_service: mock for PersistentStoreService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:param mock_input: mock for handling raw_input requests
:return:
"""
mock_args = mock.MagicMock()
mock_args.force = False
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['foo', 'N']
mock_service.__str__.return_value = 'Persistent Store'
self.assertRaises(SystemExit, services_remove_persistent_command, mock_args)
mock_service.objects.get().delete.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Aborted. Persistent Store Service not removed.', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertEqual(
'Are you sure you want to delete this Persistent Store Service? [y/n]: ', po_call_args[0][0][0])
self.assertEqual('Please enter either "y" or "n": ', po_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.PersistentStoreService')
def test_services_remove_persistent_command_proceed(self, mock_service, mock_exit, mock_pretty_output, mock_input):
"""
Test for services_remove_persistent_command
Handles answering the prompt to delete with invalid characters by answering yes
:param mock_service: mock for PersistentStoreService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:param mock_input: mock for handling raw_input requests
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Persistent Store'
mock_args.force = False
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['y']
self.assertRaises(SystemExit, services_remove_persistent_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Persistent Store Service', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual(
'Are you sure you want to delete this Persistent Store Service? [y/n]: ', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command_IndexError(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
Handles an IndexError exception
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = 'IndexError:9876@IndexError' # No 'http' or '://'
services_create_spatial_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The connection argument (-c) must be of the form', po_call_args[0][0][0])
self.assertIn('"<username>:<password>@<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command_FormatError(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
Handles an FormatError exception
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'foo@foo:foo' # No 'http' or '://'
services_create_spatial_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The public_endpoint argument (-p) must be of the form ', po_call_args[0][0][0])
self.assertIn('"<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command_IntegrityError(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
Handles an IntegrityError exception
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'http://foo:1234'
mock_service.side_effect = IntegrityError
services_create_spatial_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Spatial Dataset Service with name ', po_call_args[0][0][0])
self.assertIn('already exists. Command aborted.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_create_spatial_command(self, mock_service, mock_pretty_output):
"""
Test for services_create_spatial_command
For going through the function and saving
:param mock_service: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'http://foo:1234'
mock_service.return_value = mock.MagicMock()
services_create_spatial_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Spatial Dataset Service!', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.WebProcessingService')
def test_services_remove_wps_command_Exceptions(self, mock_service, mock_exit, mock_pretty_output):
"""
Test for services_remove_wps_command
Handles testing all of the exceptions thrown
:param mock_service: mock for Web Processing Service
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Web Processing'
mock_service.objects.get.side_effect = [ValueError, ObjectDoesNotExist]
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, services_remove_wps_command, mock_args)
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('A Web Processing Service with ID/Name', po_call_args[0][0][0])
self.assertIn('does not exist.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_remove_spatial_command_force(self, mock_service, mock_exit, mock_pretty_output):
"""
Test for services_remove_spatial_command
For when a delete is forced
:param mock_service: mock for SpatialDatasetService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Spatial Dataset'
mock_args.force = True
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
self.assertRaises(SystemExit, services_remove_spatial_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Spatial Dataset Service', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_remove_spatial_command_no_proceed_invalid_char(self, mock_service, mock_exit,
mock_pretty_output, mock_input):
"""
Test for services_remove_spatial_command
For when deleting is not forced, and when prompted, giving an invalid answer, then no delete
:param mock_service: mock for SpatialDatasetService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:param mock_input: mock for handling raw_input requests
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Spatial Dataset'
mock_args.force = False
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['foo', 'N']
self.assertRaises(SystemExit, services_remove_spatial_command, mock_args)
mock_service.objects.get().delete.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Aborted. Spatial Dataset Service not removed.', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertEqual('Are you sure you want to delete this Spatial Dataset Service? [y/n]: ', po_call_args[0][0][0])
self.assertEqual('Please enter either "y" or "n": ', po_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.SpatialDatasetService')
def test_services_remove_spatial_command_proceed(self, mock_service, mock_exit, mock_pretty_output, mock_input):
"""
Test for services_remove_spatial_command
For when deleting is not forced, and when prompted, giving a valid answer to delete
:param mock_service: mock for SpatialDatasetService
:param mock_exit: mock for handling exit() code in function
:param mock_pretty_output: mock for pretty_output text
:param mock_input: mock for handling raw_input requests
:return:
"""
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Spatial Dataset'
mock_args.force = False
# NOTE: to prevent our tests from exiting prematurely, we change the behavior of exit to raise an exception
# to break the code execution, which we catch below.
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['y']
self.assertRaises(SystemExit, services_remove_spatial_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Spatial Dataset Service', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Are you sure you want to delete this Spatial Dataset Service? [y/n]: ', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
@mock.patch('tethys_services.models.SpatialDatasetService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_not_spatial_not_persistent(self, mock_mtd, mock_spatial, mock_persistent,
mock_pretty_output, mock_print):
"""
Test for services_list_command
Both spatial and persistent are not set, so both are processed
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_spatial: mock for SpatialDatasetService
:param mock_persistent: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = False
mock_args.persistent = False
mock_args.dataset = False
mock_args.wps = False
mock_spatial.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),
mock.MagicMock()]
mock_persistent.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),
mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(4, len(po_call_args))
self.assertIn('Persistent Store Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('Host', po_call_args[1][0][0])
self.assertIn('Port', po_call_args[1][0][0])
self.assertNotIn('Endpoint', po_call_args[1][0][0])
self.assertNotIn('Public Endpoint', po_call_args[1][0][0])
self.assertNotIn('API Key', po_call_args[1][0][0])
self.assertIn('Spatial Dataset Services:', po_call_args[2][0][0])
self.assertIn('ID', po_call_args[3][0][0])
self.assertIn('Name', po_call_args[3][0][0])
self.assertNotIn('Host', po_call_args[3][0][0])
self.assertNotIn('Port', po_call_args[3][0][0])
self.assertIn('Endpoint', po_call_args[3][0][0])
self.assertIn('Public Endpoint', po_call_args[3][0][0])
self.assertIn('API Key', po_call_args[3][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[0][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[0][0][0])
self.assertIn(self.my_dict['host'], rts_call_args[0][0][0])
self.assertIn(self.my_dict['port'], rts_call_args[0][0][0])
self.assertIn(self.my_dict['id'], rts_call_args[4][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[4][0][0])
self.assertNotIn(self.my_dict['host'], rts_call_args[4][0][0])
self.assertNotIn(self.my_dict['port'], rts_call_args[4][0][0])
self.assertIn(self.my_dict['endpoint'], rts_call_args[4][0][0])
self.assertIn(self.my_dict['public_endpoint'], rts_call_args[4][0][0])
self.assertIn(self.my_dict['apikey'], rts_call_args[4][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.SpatialDatasetService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_spatial(self, mock_mtd, mock_spatial, mock_pretty_output, mock_print):
"""
Test for services_list_command
Only spatial is set
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_spatial: mock for SpatialDatasetService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = True
mock_args.persistent = False
mock_args.dataset = False
mock_args.wps = False
mock_spatial.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertIn('Spatial Dataset Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertNotIn('Host', po_call_args[1][0][0])
self.assertNotIn('Port', po_call_args[1][0][0])
self.assertIn('Endpoint', po_call_args[1][0][0])
self.assertIn('Public Endpoint', po_call_args[1][0][0])
self.assertIn('API Key', po_call_args[1][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[2][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[2][0][0])
self.assertNotIn(self.my_dict['host'], rts_call_args[2][0][0])
self.assertNotIn(self.my_dict['port'], rts_call_args[2][0][0])
self.assertIn(self.my_dict['endpoint'], rts_call_args[2][0][0])
self.assertIn(self.my_dict['public_endpoint'], rts_call_args[2][0][0])
self.assertIn(self.my_dict['apikey'], rts_call_args[2][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.PersistentStoreService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_persistent(self, mock_mtd, mock_persistent, mock_pretty_output, mock_print):
"""
Test for services_list_command
Only persistent is set
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_persistent: mock for PersistentStoreService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = False
mock_args.persistent = True
mock_args.dataset = False
mock_args.wps = False
mock_persistent.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertIn('Persistent Store Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('Host', po_call_args[1][0][0])
self.assertIn('Port', po_call_args[1][0][0])
self.assertNotIn('Endpoint', po_call_args[1][0][0])
self.assertNotIn('Public Endpoint', po_call_args[1][0][0])
self.assertNotIn('API Key', po_call_args[1][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['host'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['port'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['endpoint'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['public_endpoint'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['apikey'], rts_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_dataset(self, mock_mtd, mock_dataset, mock_pretty_output, mock_print):
"""
Test for services_list_command
Only dataset is set
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_dataset: mock for DatasetService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = False
mock_args.persistent = False
mock_args.dataset = True
mock_args.wps = False
mock_dataset.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertIn('Dataset Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('Endpoint', po_call_args[1][0][0])
self.assertIn('Public Endpoint', po_call_args[1][0][0])
self.assertIn('API Key', po_call_args[1][0][0])
self.assertNotIn('Host', po_call_args[1][0][0])
self.assertNotIn('Port', po_call_args[1][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['endpoint'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['public_endpoint'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['apikey'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['host'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['port'], rts_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.print')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.WebProcessingService')
@mock.patch('tethys_cli.services_commands.model_to_dict')
def test_services_list_command_wps(self, mock_mtd, mock_wps, mock_pretty_output, mock_print):
"""
Test for services_list_command
Only dataset is set
:param mock_mtd: mock for model_to_dict to return a dictionary
:param mock_wps: mock for WebProcessingService
:param mock_pretty_output: mock for pretty_output text
:param mock_stdout: mock for text written with print statements
:return:
"""
mock_mtd.return_value = self.my_dict
mock_args = mock.MagicMock()
mock_args.spatial = False
mock_args.persistent = False
mock_args.dataset = False
mock_args.wps = True
mock_wps.objects.order_by('id').all.return_value = [mock.MagicMock(), mock.MagicMock()]
services_list_command(mock_args)
# Check expected pretty_output
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(2, len(po_call_args))
self.assertIn('Web Processing Services:', po_call_args[0][0][0])
self.assertIn('ID', po_call_args[1][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('Endpoint', po_call_args[1][0][0])
self.assertIn('Public Endpoint', po_call_args[1][0][0])
self.assertNotIn('Host', po_call_args[1][0][0])
self.assertNotIn('Port', po_call_args[1][0][0])
self.assertNotIn('API Key', po_call_args[1][0][0])
# Check text written with Python's print
rts_call_args = mock_print.call_args_list
self.assertIn(self.my_dict['id'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['name'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['endpoint'], rts_call_args[1][0][0])
self.assertIn(self.my_dict['public_endpoint'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['host'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['port'], rts_call_args[1][0][0])
self.assertNotIn(self.my_dict['apikey'], rts_call_args[1][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command_IndexError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'IndexError:9876@IndexError' # No 'http' or '://'
services_create_dataset_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The connection argument (-c) must be of the form', po_call_args[0][0][0])
self.assertIn('"<username>:<password>@<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command_FormatError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'foo@foo:foo' # No 'http' or '://'
services_create_dataset_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The public_endpoint argument (-p) must be of the form ', po_call_args[0][0][0])
self.assertIn('"<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command_IntegrityError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'http://foo:1234'
mock_service.side_effect = IntegrityError
services_create_dataset_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Dataset Service with name ', po_call_args[0][0][0])
self.assertIn('already exists. Command aborted.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.DatasetService')
def test_services_create_dataset_command(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'http://foo:1234'
mock_service.return_value = mock.MagicMock()
services_create_dataset_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Dataset Service!', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.input')
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_cli.services_commands.exit')
@mock.patch('tethys_services.models.DatasetService')
def test_services_remove_dataset_command_proceed(self, mock_service, mock_exit, mock_pretty_output, mock_input):
mock_args = mock.MagicMock()
mock_service.__str__.return_value = 'Dataset'
mock_args.force = False
mock_exit.side_effect = SystemExit
mock_input.side_effect = ['y']
self.assertRaises(SystemExit, services_remove_dataset_command, mock_args)
mock_service.objects.get().delete.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Successfully removed Dataset Service', po_call_args[0][0][0])
po_call_args = mock_input.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Are you sure you want to delete this Dataset Service? [y/n]: ', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.WebProcessingService')
def test_services_create_wps_command_IndexError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'IndexError:9876@IndexError' # No 'http' or '://'
services_create_wps_command(mock_args)
mock_service.assert_not_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('The connection argument (-c) must be of the form', po_call_args[0][0][0])
self.assertIn('"<username>:<password>@<protocol>//<host>:<port>".', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.WebProcessingService')
def test_services_create_wps_command_IntegrityError(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_args.public_endpoint = 'http://foo:1234'
mock_service.side_effect = IntegrityError
services_create_wps_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertIn('Web Processing Service with name ', po_call_args[0][0][0])
self.assertIn('already exists. Command aborted.', po_call_args[0][0][0])
@mock.patch('tethys_cli.services_commands.pretty_output')
@mock.patch('tethys_services.models.WebProcessingService')
def test_services_create_wps_command(self, mock_service, mock_pretty_output):
mock_args = mock.MagicMock()
mock_args.connection = 'foo:pass@http:://foo:1234'
mock_service.return_value = mock.MagicMock()
services_create_wps_command(mock_args)
mock_service.assert_called()
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertEqual(1, len(po_call_args))
self.assertEqual('Successfully created new Web Processing Service!', po_call_args[0][0][0])
| 48.073171
| 120
| 0.694281
| 5,496
| 41,391
| 4.906841
| 0.041303
| 0.071492
| 0.058588
| 0.031667
| 0.94334
| 0.940522
| 0.935887
| 0.931623
| 0.92124
| 0.910153
| 0
| 0.014697
| 0.197821
| 41,391
| 860
| 121
| 48.12907
| 0.797518
| 0.182093
| 0
| 0.759082
| 0
| 0
| 0.199433
| 0.119198
| 0
| 0
| 0
| 0
| 0.376673
| 1
| 0.059273
| false
| 0.022945
| 0.015296
| 0
| 0.078394
| 0.028681
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f9092b61bc4dd7982fb74bd9611a6dcd77fd8ef8
| 93,299
|
py
|
Python
|
mars/serialize/protos/indexvalue_pb2.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
mars/serialize/protos/indexvalue_pb2.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
mars/serialize/protos/indexvalue_pb2.py
|
sighingnow/mars
|
c7897fbd144d230fff5edabc1494fb3ff44aa0d2
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mars/serialize/protos/indexvalue.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mars.serialize.protos import value_pb2 as mars_dot_serialize_dot_protos_dot_value__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mars/serialize/protos/indexvalue.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n&mars/serialize/protos/indexvalue.proto\x1a!mars/serialize/protos/value.proto\"\xdb\"\n\nIndexValue\x12\"\n\x05index\x18\x01 \x01(\x0b\x32\x11.IndexValue.IndexH\x00\x12-\n\x0brange_index\x18\x02 \x01(\x0b\x32\x16.IndexValue.RangeIndexH\x00\x12\x39\n\x11\x63\x61tegorical_index\x18\x03 \x01(\x0b\x32\x1c.IndexValue.CategoricalIndexH\x00\x12\x33\n\x0einterval_index\x18\x04 \x01(\x0b\x32\x19.IndexValue.IntervalIndexH\x00\x12\x33\n\x0e\x64\x61tetime_index\x18\x05 \x01(\x0b\x32\x19.IndexValue.DatetimeIndexH\x00\x12\x35\n\x0ftimedelta_index\x18\x06 \x01(\x0b\x32\x1a.IndexValue.TimedeltaIndexH\x00\x12/\n\x0cperiod_index\x18\x07 \x01(\x0b\x32\x17.IndexValue.PeriodIndexH\x00\x12-\n\x0bint64_index\x18\x08 \x01(\x0b\x32\x16.IndexValue.Int64IndexH\x00\x12/\n\x0cuint64_index\x18\t \x01(\x0b\x32\x17.IndexValue.UInt64IndexH\x00\x12\x31\n\rfloat64_index\x18\n \x01(\x0b\x32\x18.IndexValue.Float64IndexH\x00\x12-\n\x0bmulti_index\x18\x0b \x01(\x0b\x32\x16.IndexValue.MultiIndexH\x00\x1a\xa9\x02\n\x05Index\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x15\n\x05\x64type\x18\x03 \x01(\x0b\x32\x06.Value\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\x98\x02\n\nRangeIndex\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x15\n\x05slice\x18\x02 \x01(\x0b\x32\x06.Value\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\xca\x02\n\x10\x43\x61tegoricalIndex\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x1a\n\ncategories\x18\x03 \x01(\x0b\x32\x06.Value\x12\x0f\n\x07ordered\x18\x04 \x01(\x08\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\xaa\x02\n\rIntervalIndex\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x0e\n\x06\x63losed\x18\x03 \x01(\x08\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\xbe\x03\n\rDatetimeIndex\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x66req\x18\x03 \x01(\x0b\x32\x06.Value\x12\x15\n\x05start\x18\x04 \x01(\x0b\x32\x06.Value\x12\x0f\n\x07periods\x18\x05 \x01(\x03\x12\x13\n\x03\x65nd\x18\x06 \x01(\x0b\x32\x06.Value\x12\x16\n\x06\x63losed\x18\x07 \x01(\x0b\x32\x06.Value\x12\x12\n\x02tz\x18\x08 \x01(\x0b\x32\x06.Value\x12\x10\n\x08\x64\x61yfirst\x18\t \x01(\x08\x12\x11\n\tyearfirst\x18\n \x01(\x08\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\x9c\x03\n\x0eTimedeltaIndex\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x14\n\x04unit\x18\x03 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x66req\x18\x04 \x01(\x0b\x32\x06.Value\x12\x15\n\x05start\x18\x05 \x01(\x0b\x32\x06.Value\x12\x0f\n\x07periods\x18\x06 \x01(\x03\x12\x13\n\x03\x65nd\x18\x07 \x01(\x0b\x32\x06.Value\x12\x16\n\x06\x63losed\x18\x08 \x01(\x0b\x32\x06.Value\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\xb6\x04\n\x0bPeriodIndex\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x66req\x18\x03 \x01(\x0b\x32\x06.Value\x12\x15\n\x05start\x18\x04 \x01(\x0b\x32\x06.Value\x12\x0f\n\x07periods\x18\x05 \x01(\x03\x12\x13\n\x03\x65nd\x18\x06 \x01(\x0b\x32\x06.Value\x12\x14\n\x04year\x18\x07 \x01(\x0b\x32\x06.Value\x12\x15\n\x05month\x18\x08 \x01(\x0b\x32\x06.Value\x12\x16\n\x06quater\x18\t \x01(\x0b\x32\x06.Value\x12\x13\n\x03\x64\x61y\x18\n \x01(\x0b\x32\x06.Value\x12\x14\n\x04hour\x18\x0b \x01(\x0b\x32\x06.Value\x12\x16\n\x06minute\x18\x0c \x01(\x0b\x32\x06.Value\x12\x16\n\x06second\x18\r \x01(\x0b\x32\x06.Value\x12\x12\n\x02tz\x18\x0e \x01(\x0b\x32\x06.Value\x12\x15\n\x05\x64type\x18\x0f \x01(\x0b\x32\x06.Value\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\xae\x02\n\nInt64Index\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x15\n\x05\x64type\x18\x03 \x01(\x0b\x32\x06.Value\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\xaf\x02\n\x0bUInt64Index\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x15\n\x05\x64type\x18\x03 \x01(\x0b\x32\x06.Value\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\xb0\x02\n\x0c\x46loat64Index\x12\x14\n\x04name\x18\x01 \x01(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x15\n\x05\x64type\x18\x03 \x01(\x0b\x32\x06.Value\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x1a\xab\x02\n\nMultiIndex\x12\x15\n\x05names\x18\x01 \x03(\x0b\x32\x06.Value\x12\x14\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x06.Value\x12\x11\n\tsortorder\x18\x03 \x01(\x05\x12\x0b\n\x03key\x18\x33 \x01(\t\x12\x1f\n\x17is_monotonic_increasing\x18\x34 \x01(\x08\x12\x1f\n\x17is_monotonic_decreasing\x18\x35 \x01(\x08\x12\x11\n\tis_unique\x18\x36 \x01(\x08\x12\x1b\n\x13should_be_monotonic\x18\x37 \x01(\x08\x12\x17\n\x07max_val\x18\x38 \x01(\x0b\x32\x06.Value\x12\x17\n\x07min_val\x18\x39 \x01(\x0b\x32\x06.Value\x12\x15\n\rmax_val_close\x18: \x01(\x08\x12\x15\n\rmin_val_close\x18; \x01(\x08\x42\r\n\x0bindex_valueb\x06proto3')
,
dependencies=[mars_dot_serialize_dot_protos_dot_value__pb2.DESCRIPTOR,])
_INDEXVALUE_INDEX = _descriptor.Descriptor(
name='Index',
full_name='IndexValue.Index',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.Index.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.Index.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='IndexValue.Index.dtype', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.Index.key', index=3,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.Index.is_monotonic_increasing', index=4,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.Index.is_monotonic_decreasing', index=5,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.Index.is_unique', index=6,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.Index.should_be_monotonic', index=7,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.Index.max_val', index=8,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.Index.min_val', index=9,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.Index.max_val_close', index=10,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.Index.min_val_close', index=11,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=639,
serialized_end=936,
)
_INDEXVALUE_RANGEINDEX = _descriptor.Descriptor(
name='RangeIndex',
full_name='IndexValue.RangeIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.RangeIndex.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='slice', full_name='IndexValue.RangeIndex.slice', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.RangeIndex.key', index=2,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.RangeIndex.is_monotonic_increasing', index=3,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.RangeIndex.is_monotonic_decreasing', index=4,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.RangeIndex.is_unique', index=5,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.RangeIndex.should_be_monotonic', index=6,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.RangeIndex.max_val', index=7,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.RangeIndex.min_val', index=8,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.RangeIndex.max_val_close', index=9,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.RangeIndex.min_val_close', index=10,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=939,
serialized_end=1219,
)
_INDEXVALUE_CATEGORICALINDEX = _descriptor.Descriptor(
name='CategoricalIndex',
full_name='IndexValue.CategoricalIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.CategoricalIndex.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.CategoricalIndex.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='categories', full_name='IndexValue.CategoricalIndex.categories', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ordered', full_name='IndexValue.CategoricalIndex.ordered', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.CategoricalIndex.key', index=4,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.CategoricalIndex.is_monotonic_increasing', index=5,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.CategoricalIndex.is_monotonic_decreasing', index=6,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.CategoricalIndex.is_unique', index=7,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.CategoricalIndex.should_be_monotonic', index=8,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.CategoricalIndex.max_val', index=9,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.CategoricalIndex.min_val', index=10,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.CategoricalIndex.max_val_close', index=11,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.CategoricalIndex.min_val_close', index=12,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1222,
serialized_end=1552,
)
_INDEXVALUE_INTERVALINDEX = _descriptor.Descriptor(
name='IntervalIndex',
full_name='IndexValue.IntervalIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.IntervalIndex.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.IntervalIndex.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='closed', full_name='IndexValue.IntervalIndex.closed', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.IntervalIndex.key', index=3,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.IntervalIndex.is_monotonic_increasing', index=4,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.IntervalIndex.is_monotonic_decreasing', index=5,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.IntervalIndex.is_unique', index=6,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.IntervalIndex.should_be_monotonic', index=7,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.IntervalIndex.max_val', index=8,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.IntervalIndex.min_val', index=9,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.IntervalIndex.max_val_close', index=10,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.IntervalIndex.min_val_close', index=11,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1555,
serialized_end=1853,
)
_INDEXVALUE_DATETIMEINDEX = _descriptor.Descriptor(
name='DatetimeIndex',
full_name='IndexValue.DatetimeIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.DatetimeIndex.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.DatetimeIndex.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freq', full_name='IndexValue.DatetimeIndex.freq', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='IndexValue.DatetimeIndex.start', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='periods', full_name='IndexValue.DatetimeIndex.periods', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='IndexValue.DatetimeIndex.end', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='closed', full_name='IndexValue.DatetimeIndex.closed', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tz', full_name='IndexValue.DatetimeIndex.tz', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dayfirst', full_name='IndexValue.DatetimeIndex.dayfirst', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='yearfirst', full_name='IndexValue.DatetimeIndex.yearfirst', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.DatetimeIndex.key', index=10,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.DatetimeIndex.is_monotonic_increasing', index=11,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.DatetimeIndex.is_monotonic_decreasing', index=12,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.DatetimeIndex.is_unique', index=13,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.DatetimeIndex.should_be_monotonic', index=14,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.DatetimeIndex.max_val', index=15,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.DatetimeIndex.min_val', index=16,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.DatetimeIndex.max_val_close', index=17,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.DatetimeIndex.min_val_close', index=18,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1856,
serialized_end=2302,
)
_INDEXVALUE_TIMEDELTAINDEX = _descriptor.Descriptor(
name='TimedeltaIndex',
full_name='IndexValue.TimedeltaIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.TimedeltaIndex.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.TimedeltaIndex.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unit', full_name='IndexValue.TimedeltaIndex.unit', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freq', full_name='IndexValue.TimedeltaIndex.freq', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='IndexValue.TimedeltaIndex.start', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='periods', full_name='IndexValue.TimedeltaIndex.periods', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='IndexValue.TimedeltaIndex.end', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='closed', full_name='IndexValue.TimedeltaIndex.closed', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.TimedeltaIndex.key', index=8,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.TimedeltaIndex.is_monotonic_increasing', index=9,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.TimedeltaIndex.is_monotonic_decreasing', index=10,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.TimedeltaIndex.is_unique', index=11,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.TimedeltaIndex.should_be_monotonic', index=12,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.TimedeltaIndex.max_val', index=13,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.TimedeltaIndex.min_val', index=14,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.TimedeltaIndex.max_val_close', index=15,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.TimedeltaIndex.min_val_close', index=16,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2305,
serialized_end=2717,
)
_INDEXVALUE_PERIODINDEX = _descriptor.Descriptor(
name='PeriodIndex',
full_name='IndexValue.PeriodIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.PeriodIndex.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.PeriodIndex.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freq', full_name='IndexValue.PeriodIndex.freq', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='IndexValue.PeriodIndex.start', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='periods', full_name='IndexValue.PeriodIndex.periods', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='IndexValue.PeriodIndex.end', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='year', full_name='IndexValue.PeriodIndex.year', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='month', full_name='IndexValue.PeriodIndex.month', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quater', full_name='IndexValue.PeriodIndex.quater', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='day', full_name='IndexValue.PeriodIndex.day', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hour', full_name='IndexValue.PeriodIndex.hour', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minute', full_name='IndexValue.PeriodIndex.minute', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='second', full_name='IndexValue.PeriodIndex.second', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tz', full_name='IndexValue.PeriodIndex.tz', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='IndexValue.PeriodIndex.dtype', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.PeriodIndex.key', index=15,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.PeriodIndex.is_monotonic_increasing', index=16,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.PeriodIndex.is_monotonic_decreasing', index=17,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.PeriodIndex.is_unique', index=18,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.PeriodIndex.should_be_monotonic', index=19,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.PeriodIndex.max_val', index=20,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.PeriodIndex.min_val', index=21,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.PeriodIndex.max_val_close', index=22,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.PeriodIndex.min_val_close', index=23,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2720,
serialized_end=3286,
)
_INDEXVALUE_INT64INDEX = _descriptor.Descriptor(
name='Int64Index',
full_name='IndexValue.Int64Index',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.Int64Index.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.Int64Index.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='IndexValue.Int64Index.dtype', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.Int64Index.key', index=3,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.Int64Index.is_monotonic_increasing', index=4,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.Int64Index.is_monotonic_decreasing', index=5,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.Int64Index.is_unique', index=6,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.Int64Index.should_be_monotonic', index=7,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.Int64Index.max_val', index=8,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.Int64Index.min_val', index=9,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.Int64Index.max_val_close', index=10,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.Int64Index.min_val_close', index=11,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3289,
serialized_end=3591,
)
_INDEXVALUE_UINT64INDEX = _descriptor.Descriptor(
name='UInt64Index',
full_name='IndexValue.UInt64Index',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.UInt64Index.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.UInt64Index.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='IndexValue.UInt64Index.dtype', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.UInt64Index.key', index=3,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.UInt64Index.is_monotonic_increasing', index=4,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.UInt64Index.is_monotonic_decreasing', index=5,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.UInt64Index.is_unique', index=6,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.UInt64Index.should_be_monotonic', index=7,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.UInt64Index.max_val', index=8,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.UInt64Index.min_val', index=9,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.UInt64Index.max_val_close', index=10,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.UInt64Index.min_val_close', index=11,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3594,
serialized_end=3897,
)
_INDEXVALUE_FLOAT64INDEX = _descriptor.Descriptor(
name='Float64Index',
full_name='IndexValue.Float64Index',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='IndexValue.Float64Index.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.Float64Index.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='IndexValue.Float64Index.dtype', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.Float64Index.key', index=3,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.Float64Index.is_monotonic_increasing', index=4,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.Float64Index.is_monotonic_decreasing', index=5,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.Float64Index.is_unique', index=6,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.Float64Index.should_be_monotonic', index=7,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.Float64Index.max_val', index=8,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.Float64Index.min_val', index=9,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.Float64Index.max_val_close', index=10,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.Float64Index.min_val_close', index=11,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3900,
serialized_end=4204,
)
_INDEXVALUE_MULTIINDEX = _descriptor.Descriptor(
name='MultiIndex',
full_name='IndexValue.MultiIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='names', full_name='IndexValue.MultiIndex.names', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='IndexValue.MultiIndex.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sortorder', full_name='IndexValue.MultiIndex.sortorder', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='IndexValue.MultiIndex.key', index=3,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_increasing', full_name='IndexValue.MultiIndex.is_monotonic_increasing', index=4,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monotonic_decreasing', full_name='IndexValue.MultiIndex.is_monotonic_decreasing', index=5,
number=53, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_unique', full_name='IndexValue.MultiIndex.is_unique', index=6,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='should_be_monotonic', full_name='IndexValue.MultiIndex.should_be_monotonic', index=7,
number=55, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val', full_name='IndexValue.MultiIndex.max_val', index=8,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val', full_name='IndexValue.MultiIndex.min_val', index=9,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_val_close', full_name='IndexValue.MultiIndex.max_val_close', index=10,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_val_close', full_name='IndexValue.MultiIndex.min_val_close', index=11,
number=59, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4207,
serialized_end=4506,
)
_INDEXVALUE = _descriptor.Descriptor(
name='IndexValue',
full_name='IndexValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='IndexValue.index', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='range_index', full_name='IndexValue.range_index', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='categorical_index', full_name='IndexValue.categorical_index', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interval_index', full_name='IndexValue.interval_index', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datetime_index', full_name='IndexValue.datetime_index', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timedelta_index', full_name='IndexValue.timedelta_index', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='period_index', full_name='IndexValue.period_index', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int64_index', full_name='IndexValue.int64_index', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uint64_index', full_name='IndexValue.uint64_index', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='float64_index', full_name='IndexValue.float64_index', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='multi_index', full_name='IndexValue.multi_index', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_INDEXVALUE_INDEX, _INDEXVALUE_RANGEINDEX, _INDEXVALUE_CATEGORICALINDEX, _INDEXVALUE_INTERVALINDEX, _INDEXVALUE_DATETIMEINDEX, _INDEXVALUE_TIMEDELTAINDEX, _INDEXVALUE_PERIODINDEX, _INDEXVALUE_INT64INDEX, _INDEXVALUE_UINT64INDEX, _INDEXVALUE_FLOAT64INDEX, _INDEXVALUE_MULTIINDEX, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='index_value', full_name='IndexValue.index_value',
index=0, containing_type=None, fields=[]),
],
serialized_start=78,
serialized_end=4521,
)
_INDEXVALUE_INDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INDEX.fields_by_name['dtype'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INDEX.containing_type = _INDEXVALUE
_INDEXVALUE_RANGEINDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_RANGEINDEX.fields_by_name['slice'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_RANGEINDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_RANGEINDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_RANGEINDEX.containing_type = _INDEXVALUE
_INDEXVALUE_CATEGORICALINDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_CATEGORICALINDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_CATEGORICALINDEX.fields_by_name['categories'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_CATEGORICALINDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_CATEGORICALINDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_CATEGORICALINDEX.containing_type = _INDEXVALUE
_INDEXVALUE_INTERVALINDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INTERVALINDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INTERVALINDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INTERVALINDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INTERVALINDEX.containing_type = _INDEXVALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['freq'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['start'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['end'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['closed'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['tz'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_DATETIMEINDEX.containing_type = _INDEXVALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['unit'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['freq'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['start'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['end'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['closed'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_TIMEDELTAINDEX.containing_type = _INDEXVALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['freq'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['start'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['end'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['year'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['month'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['quater'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['day'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['hour'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['minute'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['second'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['tz'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['dtype'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_PERIODINDEX.containing_type = _INDEXVALUE
_INDEXVALUE_INT64INDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INT64INDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INT64INDEX.fields_by_name['dtype'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INT64INDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INT64INDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_INT64INDEX.containing_type = _INDEXVALUE
_INDEXVALUE_UINT64INDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_UINT64INDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_UINT64INDEX.fields_by_name['dtype'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_UINT64INDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_UINT64INDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_UINT64INDEX.containing_type = _INDEXVALUE
_INDEXVALUE_FLOAT64INDEX.fields_by_name['name'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_FLOAT64INDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_FLOAT64INDEX.fields_by_name['dtype'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_FLOAT64INDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_FLOAT64INDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_FLOAT64INDEX.containing_type = _INDEXVALUE
_INDEXVALUE_MULTIINDEX.fields_by_name['names'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_MULTIINDEX.fields_by_name['data'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_MULTIINDEX.fields_by_name['max_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_MULTIINDEX.fields_by_name['min_val'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_INDEXVALUE_MULTIINDEX.containing_type = _INDEXVALUE
_INDEXVALUE.fields_by_name['index'].message_type = _INDEXVALUE_INDEX
_INDEXVALUE.fields_by_name['range_index'].message_type = _INDEXVALUE_RANGEINDEX
_INDEXVALUE.fields_by_name['categorical_index'].message_type = _INDEXVALUE_CATEGORICALINDEX
_INDEXVALUE.fields_by_name['interval_index'].message_type = _INDEXVALUE_INTERVALINDEX
_INDEXVALUE.fields_by_name['datetime_index'].message_type = _INDEXVALUE_DATETIMEINDEX
_INDEXVALUE.fields_by_name['timedelta_index'].message_type = _INDEXVALUE_TIMEDELTAINDEX
_INDEXVALUE.fields_by_name['period_index'].message_type = _INDEXVALUE_PERIODINDEX
_INDEXVALUE.fields_by_name['int64_index'].message_type = _INDEXVALUE_INT64INDEX
_INDEXVALUE.fields_by_name['uint64_index'].message_type = _INDEXVALUE_UINT64INDEX
_INDEXVALUE.fields_by_name['float64_index'].message_type = _INDEXVALUE_FLOAT64INDEX
_INDEXVALUE.fields_by_name['multi_index'].message_type = _INDEXVALUE_MULTIINDEX
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['index'])
_INDEXVALUE.fields_by_name['index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['range_index'])
_INDEXVALUE.fields_by_name['range_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['categorical_index'])
_INDEXVALUE.fields_by_name['categorical_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['interval_index'])
_INDEXVALUE.fields_by_name['interval_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['datetime_index'])
_INDEXVALUE.fields_by_name['datetime_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['timedelta_index'])
_INDEXVALUE.fields_by_name['timedelta_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['period_index'])
_INDEXVALUE.fields_by_name['period_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['int64_index'])
_INDEXVALUE.fields_by_name['int64_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['uint64_index'])
_INDEXVALUE.fields_by_name['uint64_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['float64_index'])
_INDEXVALUE.fields_by_name['float64_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
_INDEXVALUE.oneofs_by_name['index_value'].fields.append(
_INDEXVALUE.fields_by_name['multi_index'])
_INDEXVALUE.fields_by_name['multi_index'].containing_oneof = _INDEXVALUE.oneofs_by_name['index_value']
DESCRIPTOR.message_types_by_name['IndexValue'] = _INDEXVALUE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IndexValue = _reflection.GeneratedProtocolMessageType('IndexValue', (_message.Message,), dict(
Index = _reflection.GeneratedProtocolMessageType('Index', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_INDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.Index)
))
,
RangeIndex = _reflection.GeneratedProtocolMessageType('RangeIndex', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_RANGEINDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.RangeIndex)
))
,
CategoricalIndex = _reflection.GeneratedProtocolMessageType('CategoricalIndex', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_CATEGORICALINDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.CategoricalIndex)
))
,
IntervalIndex = _reflection.GeneratedProtocolMessageType('IntervalIndex', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_INTERVALINDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.IntervalIndex)
))
,
DatetimeIndex = _reflection.GeneratedProtocolMessageType('DatetimeIndex', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_DATETIMEINDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.DatetimeIndex)
))
,
TimedeltaIndex = _reflection.GeneratedProtocolMessageType('TimedeltaIndex', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_TIMEDELTAINDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.TimedeltaIndex)
))
,
PeriodIndex = _reflection.GeneratedProtocolMessageType('PeriodIndex', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_PERIODINDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.PeriodIndex)
))
,
Int64Index = _reflection.GeneratedProtocolMessageType('Int64Index', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_INT64INDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.Int64Index)
))
,
UInt64Index = _reflection.GeneratedProtocolMessageType('UInt64Index', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_UINT64INDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.UInt64Index)
))
,
Float64Index = _reflection.GeneratedProtocolMessageType('Float64Index', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_FLOAT64INDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.Float64Index)
))
,
MultiIndex = _reflection.GeneratedProtocolMessageType('MultiIndex', (_message.Message,), dict(
DESCRIPTOR = _INDEXVALUE_MULTIINDEX,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue.MultiIndex)
))
,
DESCRIPTOR = _INDEXVALUE,
__module__ = 'mars.serialize.protos.indexvalue_pb2'
# @@protoc_insertion_point(class_scope:IndexValue)
))
_sym_db.RegisterMessage(IndexValue)
_sym_db.RegisterMessage(IndexValue.Index)
_sym_db.RegisterMessage(IndexValue.RangeIndex)
_sym_db.RegisterMessage(IndexValue.CategoricalIndex)
_sym_db.RegisterMessage(IndexValue.IntervalIndex)
_sym_db.RegisterMessage(IndexValue.DatetimeIndex)
_sym_db.RegisterMessage(IndexValue.TimedeltaIndex)
_sym_db.RegisterMessage(IndexValue.PeriodIndex)
_sym_db.RegisterMessage(IndexValue.Int64Index)
_sym_db.RegisterMessage(IndexValue.UInt64Index)
_sym_db.RegisterMessage(IndexValue.Float64Index)
_sym_db.RegisterMessage(IndexValue.MultiIndex)
# @@protoc_insertion_point(module_scope)
| 54.752934
| 8,847
| 0.760287
| 12,706
| 93,299
| 5.253975
| 0.025421
| 0.061597
| 0.060353
| 0.050032
| 0.884102
| 0.863056
| 0.836422
| 0.835388
| 0.831778
| 0.829621
| 0
| 0.050029
| 0.122038
| 93,299
| 1,703
| 8,848
| 54.785085
| 0.764946
| 0.009646
| 0
| 0.737735
| 1
| 0.000606
| 0.197062
| 0.166039
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003634
| 0
| 0.003634
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f9145b5d3a4eeb1152ea34317aaf4e1154cd3bfe
| 345
|
wsgi
|
Python
|
Tools/english_word/application.wsgi
|
pynickle/awesome-python-tools
|
e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07
|
[
"BSD-2-Clause"
] | 21
|
2019-06-02T01:55:14.000Z
|
2022-01-08T22:35:31.000Z
|
Tools/english_word/application.wsgi
|
code-nick-python/daily-tools
|
e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07
|
[
"BSD-2-Clause"
] | 3
|
2019-06-02T01:55:17.000Z
|
2019-06-14T12:32:06.000Z
|
Tools/english_word/application.wsgi
|
code-nick-python/daily-tools
|
e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07
|
[
"BSD-2-Clause"
] | 16
|
2019-06-23T13:00:04.000Z
|
2021-09-18T06:09:58.000Z
|
import sys
sys.path.insert(0, r"C:\Users\Nick\Desktop\my-github\amazing-python\Tools\english_word")
sys.path.insert(0, r"C:\Users\Nick\Desktop\my-github\amazing-python\Tools\english_word\templates")
sys.path.insert(0, r"C:\Users\Nick\Desktop\my-github\amazing-python\Tools\english_word\static")
from app import app
application = app
| 38.333333
| 99
| 0.765217
| 58
| 345
| 4.5
| 0.37931
| 0.08046
| 0.149425
| 0.16092
| 0.793103
| 0.793103
| 0.793103
| 0.793103
| 0.793103
| 0.793103
| 0
| 0.009434
| 0.078261
| 345
| 8
| 100
| 43.125
| 0.811321
| 0
| 0
| 0
| 0
| 0.333333
| 0.62908
| 0.62908
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
00bc1f696f545abc7a0475b4a8f6ed7790f24f1c
| 182
|
py
|
Python
|
math_ops/pyGF2/__init__.py
|
popcornell/pyLEDAkem
|
af1b45aa71f4b6e6e4c60dfab10799a5dbf14853
|
[
"MIT"
] | null | null | null |
math_ops/pyGF2/__init__.py
|
popcornell/pyLEDAkem
|
af1b45aa71f4b6e6e4c60dfab10799a5dbf14853
|
[
"MIT"
] | null | null | null |
math_ops/pyGF2/__init__.py
|
popcornell/pyLEDAkem
|
af1b45aa71f4b6e6e4c60dfab10799a5dbf14853
|
[
"MIT"
] | null | null | null |
from math_ops.pyGF2.gf2_add import gf2_add
from math_ops.pyGF2.gf2_mul import gf2_mul
from math_ops.pyGF2.gf2_div import gf2_div
from math_ops.pyGF2.gf2_inv import gf2_inv, gf2_xgcd
| 36.4
| 52
| 0.857143
| 38
| 182
| 3.763158
| 0.289474
| 0.223776
| 0.307692
| 0.447552
| 0.531469
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078788
| 0.093407
| 182
| 4
| 53
| 45.5
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
975f7cba5d63f58d295610866209e34580eb05a5
| 46
|
py
|
Python
|
tests/__init__.py
|
bevy/Opentok-Python-SDK
|
f7b821160752d2383120c08f34b4c7ad66ddb161
|
[
"MIT"
] | 39
|
2015-03-30T19:51:46.000Z
|
2021-07-26T03:59:37.000Z
|
tests/__init__.py
|
bevy/Opentok-Python-SDK
|
f7b821160752d2383120c08f34b4c7ad66ddb161
|
[
"MIT"
] | 108
|
2015-01-05T19:41:05.000Z
|
2021-10-29T19:14:14.000Z
|
tests/__init__.py
|
bevy/Opentok-Python-SDK
|
f7b821160752d2383120c08f34b4c7ad66ddb161
|
[
"MIT"
] | 73
|
2015-03-18T17:54:39.000Z
|
2022-03-23T06:51:06.000Z
|
from .validate_jwt import validate_jwt_header
| 23
| 45
| 0.891304
| 7
| 46
| 5.428571
| 0.714286
| 0.578947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9776125332bfacf5aa5768ec1212345299b501d6
| 2,982
|
py
|
Python
|
tests/testPopulateDatabase.py
|
Rojber/open_password_management_API
|
5d86a83248cb9e13bec9ff842d54abf814e42abe
|
[
"MIT"
] | 2
|
2021-02-10T17:46:57.000Z
|
2021-02-19T17:53:49.000Z
|
tests/testPopulateDatabase.py
|
Rojber/open_password_management_API
|
5d86a83248cb9e13bec9ff842d54abf814e42abe
|
[
"MIT"
] | null | null | null |
tests/testPopulateDatabase.py
|
Rojber/open_password_management_API
|
5d86a83248cb9e13bec9ff842d54abf814e42abe
|
[
"MIT"
] | null | null | null |
from random import randint
from bson.objectid import ObjectId
def populate(db, client_encryption, data_key_id):
account1 = {
'email': client_encryption.encrypt("test1@test.com", "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", data_key_id),
'login': client_encryption.encrypt("test1", "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", data_key_id),
'password': client_encryption.encrypt("test1", "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'logindata': [
{
"_id": ObjectId(),
'site': "test_login_site1.com",
'login': client_encryption.encrypt("test_login1", "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'password': client_encryption.encrypt("test_password1", "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'passwordStrength': randint(1, 5),
'note': "note1"
},
{
"_id": ObjectId(),
'site': "test_login_site2.com",
'login': client_encryption.encrypt("test_login2", "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'password': client_encryption.encrypt("test_password2", "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'passwordStrength': randint(1, 5),
'note': "note2"
}
]
}
account2 = {
'email': client_encryption.encrypt("test2@test.com", "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", data_key_id),
'login': client_encryption.encrypt("test2", "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", data_key_id),
'password': client_encryption.encrypt("test2", "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'logindata': [
{
"_id": ObjectId(),
'site': "test_login_site1.com",
'login': client_encryption.encrypt("test_login1", "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'password': client_encryption.encrypt("test_password1", "AEAD_AES_256_CBC_HMAC_SHA_512-Random",
data_key_id),
'passwordStrength': randint(1, 5),
'note': "note1"
},
{
"_id": ObjectId(),
'site': "test_login_site2.com",
'login': client_encryption.encrypt("test_login2", "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'password': client_encryption.encrypt("test_password2", "AEAD_AES_256_CBC_HMAC_SHA_512-Random",
data_key_id),
'passwordStrength': randint(1, 5),
'note': "note2"
}
]
}
# Insert users directly into MongoDB
db.passwordManager.accounts.insert_one(account1)
db.passwordManager.accounts.insert_one(account2)
print('Finished creating testing database')
| 48.885246
| 125
| 0.600939
| 331
| 2,982
| 4.957704
| 0.178248
| 0.146252
| 0.082267
| 0.110908
| 0.844607
| 0.803169
| 0.803169
| 0.803169
| 0.803169
| 0.803169
| 0
| 0.055166
| 0.282696
| 2,982
| 60
| 126
| 49.7
| 0.712015
| 0.011402
| 0
| 0.444444
| 0
| 0
| 0.348948
| 0.180584
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0.222222
| 0.037037
| 0
| 0.055556
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
97ad270ecc5f99396b41d54038b10fe75fa69760
| 9,333
|
py
|
Python
|
tests/unit/test_serialize_ess.py
|
tottoto/nifcloud-sdk-python
|
8f105b89ea813c5500d595193b718475867646b4
|
[
"Apache-2.0"
] | 17
|
2018-04-03T03:34:04.000Z
|
2020-07-10T11:14:52.000Z
|
tests/unit/test_serialize_ess.py
|
tottoto/nifcloud-sdk-python
|
8f105b89ea813c5500d595193b718475867646b4
|
[
"Apache-2.0"
] | 48
|
2018-04-05T02:00:09.000Z
|
2022-01-27T06:38:00.000Z
|
tests/unit/test_serialize_ess.py
|
tottoto/nifcloud-sdk-python
|
8f105b89ea813c5500d595193b718475867646b4
|
[
"Apache-2.0"
] | 9
|
2018-04-03T04:37:38.000Z
|
2020-06-12T07:31:34.000Z
|
from botocore.model import ServiceModel
from nifcloud import serialize
class TestEssSerializer(object):
ess_model_metadata = {
"apiVersion": "2010-12-01N2014-05-28",
"endpointPrefix": "ess",
"protocol": "ess",
"serviceAbbreviation": "ess",
"serviceFullName": "NIFCLOUD ESS",
"serviceId": "ess",
"signatureVersion": "v4",
"signingName": "email",
"uid": "ess-2010-12-01N2014-05-28"
}
def test_EssSerializer(self):
ess_model = {
"metadata": self.ess_model_metadata,
"operations": {
"EssOperation": {
"http": {
"method": "POST",
"requestUri": "/"
},
"input": {
"shape": "EssOperationRequest"
},
"name": "essOperation",
"output": {
"shape": "EssOperationResult"
}
}
},
"shapes": {
"EssOperationRequest": {
"members": {
"Parameter": {
"locationName": "Parameter",
"shape": "String"
}
},
"name": "EssOperationRequest",
"type": "structure"
},
"EssOperationResult": {
"members": {
"Response": {
"locationName": "Response",
"shape": "String"
}
},
"name": "EssOperationResult",
"type": "structure"
},
"String": {
"name": "String",
"type": "string"
},
}
}
ess_service_model = ServiceModel(ess_model)
params = {
"Parameter": "test"
}
ess_serializer = serialize.EssSerializer()
res = ess_serializer.serialize_to_request(
params, ess_service_model.operation_model("EssOperation"))
assert res["body"] == {"Action": "EssOperation", "Parameter": "test", "Version": "2010-12-01N2014-05-28"}
assert res["headers"] == {"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"}
assert res["method"] == "POST"
assert res["query_string"] == ""
assert res["url_path"] == "/"
def test_EssSerializer_GetDeliveryLog(self):
ess_model = {
"metadata": self.ess_model_metadata,
"operations": {
"GetDeliveryLog": {
"http": {
"method": "POST",
"requestUri": "/"
},
"input": {
"shape": "GetDeliveryLogRequest"
},
"name": "essOperation",
"output": {
"shape": "EssOperationResult"
}
}
},
"shapes": {
"GetDeliveryLogRequest": {
"members": {
"EndDate": {
"locationName": "EndDate",
"shape": "TStamp"
},
"MaxItems": {
"locationName": "MaxItems",
"shape": "Integer"
},
"NextToken": {
"locationName": "NextToken",
"shape": "String"
},
"StartDate": {
"locationName": "StartDate",
"shape": "TStamp"
},
"Status": {
"locationName": "Status",
"shape": "Integer"
}
},
"name": "GetDeliveryLogRequest",
"required": [
"EndDate",
"StartDate"
],
"type": "structure"
},
"EssOperationResult": {
"members": {
"Response": {
"locationName": "Response",
"shape": "String"
}
},
"name": "EssOperationResult",
"type": "structure"
},
"Integer": {
"name": "Integer",
"type": "integer"
},
"TStamp": {
"name": "TStamp",
"type": "timestamp"
},
"String": {
"name": "String",
"type": "string"
}
}
}
ess_service_model = ServiceModel(ess_model)
params = {}
ess_serializer = serialize.EssSerializer()
res = ess_serializer.serialize_to_request(
params, ess_service_model.operation_model("GetDeliveryLog"))
assert res["body"] == {"Action": "GetDeliveryLog", "Version": "2010-12-01N2014-05-28"}
assert res["headers"] == {"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"}
assert res["method"] == "POST"
assert res["query_string"] == ""
assert res["url_path"] == "/"
def test_EssSerializer_GetDeliveryLog_with_status(self):
ess_model = {
"metadata": self.ess_model_metadata,
"operations": {
"GetDeliveryLog": {
"http": {
"method": "POST",
"requestUri": "/"
},
"input": {
"shape": "GetDeliveryLogRequest"
},
"name": "essOperation",
"output": {
"shape": "EssOperationResult"
}
}
},
"shapes": {
"GetDeliveryLogRequest": {
"members": {
"EndDate": {
"locationName": "EndDate",
"shape": "TStamp"
},
"MaxItems": {
"locationName": "MaxItems",
"shape": "Integer"
},
"NextToken": {
"locationName": "NextToken",
"shape": "String"
},
"StartDate": {
"locationName": "StartDate",
"shape": "TStamp"
},
"Status": {
"locationName": "Status",
"shape": "Integer"
}
},
"name": "GetDeliveryLogRequest",
"required": [
"EndDate",
"StartDate"
],
"type": "structure"
},
"EssOperationResult": {
"members": {
"Response": {
"locationName": "Response",
"shape": "String"
}
},
"name": "EssOperationResult",
"type": "structure"
},
"Integer": {
"name": "Integer",
"type": "integer"
},
"TStamp": {
"name": "TStamp",
"type": "timestamp"
},
"String": {
"name": "String",
"type": "string"
}
}
}
ess_service_model = ServiceModel(ess_model)
params = {
"Status": "test_status",
"MaxItems": 1,
"NextToken": "test_token",
"StartDate": "2017-12-13T00:00:00Z",
"EndDate": "2017-12-13T23:59:00Z"
}
ess_serializer = serialize.EssSerializer()
res = ess_serializer.serialize_to_request(
params, ess_service_model.operation_model("GetDeliveryLog"))
assert res["body"] == {
"Action": "GetDeliveryLog",
"Version": "2010-12-01N2014-05-28",
"Status": "test_status",
"MaxItems": 1,
"NextToken": "test_token",
"StartDate": "2017-12-13T00:00",
"EndDate": "2017-12-13T23:59"
}
assert res["headers"] == {"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"}
assert res["method"] == "POST"
assert res["query_string"] == ""
assert res["url_path"] == "/"
| 35.758621
| 113
| 0.355298
| 494
| 9,333
| 6.595142
| 0.194332
| 0.041436
| 0.034377
| 0.036832
| 0.85574
| 0.833026
| 0.806937
| 0.806937
| 0.806937
| 0.79159
| 0
| 0.030963
| 0.522447
| 9,333
| 260
| 114
| 35.896154
| 0.700022
| 0
| 0
| 0.633466
| 0
| 0
| 0.264117
| 0.036108
| 0
| 0
| 0
| 0
| 0.059761
| 1
| 0.011952
| false
| 0
| 0.007968
| 0
| 0.027888
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8adda21d5415b6dbf2d7c8c8247bb38715422bdd
| 2,653
|
py
|
Python
|
shenfun/optimization/numba/legendre.py
|
spectralDNS/shenfun
|
956633aa0f1638db5ebdc497ff68a438aa22b932
|
[
"BSD-2-Clause"
] | 138
|
2017-06-17T13:30:27.000Z
|
2022-03-20T02:33:47.000Z
|
shenfun/optimization/numba/legendre.py
|
liqihao2000/shenfun
|
2164596ccf906242779d9ec361168246ee6214d8
|
[
"BSD-2-Clause"
] | 73
|
2017-05-16T06:53:04.000Z
|
2022-02-04T10:40:44.000Z
|
shenfun/optimization/numba/legendre.py
|
liqihao2000/shenfun
|
2164596ccf906242779d9ec361168246ee6214d8
|
[
"BSD-2-Clause"
] | 38
|
2018-01-31T14:37:01.000Z
|
2022-03-31T15:07:27.000Z
|
import numba as nb
import numpy as np
@nb.jit(nopython=True, fastmath=True, cache=True)
def legendre_shendirichlet_scalar_product(xj, wj, input_array, output_array, is_scaled=True):
N = xj.shape[0]
phi_i = np.zeros_like(xj)
Lnm = np.ones_like(xj)
Ln = xj.copy()
Lnp = ((2+1)*xj*Ln - 1*Lnm)/2
for i in range(N-2):
s = 0.0
s2 = (i+2)/(i+3)
s1 = (2*(i+2)+1)/(i+3)
ss = np.sqrt(4*i+6)
for j in range(N):
phi_i[j] = Lnm[j]-Lnp[j]
if is_scaled:
phi_i[j] /= ss
s += phi_i[j]*wj[j]*input_array[j]
Lnm[j] = Ln[j]
Ln[j] = Lnp[j]
Lnp[j] = s1*xj[j]*Ln[j] - s2*Lnm[j]
output_array[i] = s
@nb.jit(nopython=True, fastmath=True, cache=True)
def legendre_shenneumann_scalar_product(xj, wj, input_array, output_array):
N = xj.shape[0]
phi_i = np.zeros_like(xj)
Lnm = np.ones_like(xj)
Ln = xj.copy()
Lnp = ((2+1)*xj*Ln - 1*Lnm)/2
for i in range(N-2):
s = 0.0
s2 = (i+2)/(i+3)
s1 = (2*(i+2)+1)/(i+3)
for j in range(N):
phi_i[j] = Lnm[j]-Lnp[j]*(i*(i+1)/(i+2)/(i+3))
s += phi_i[j]*wj[j]*input_array[j]
Lnm[j] = Ln[j]
Ln[j] = Lnp[j]
Lnp[j] = s1*xj[j]*Ln[j] - s2*Lnm[j]
output_array[i] = s
@nb.jit(nopython=True, fastmath=True, cache=True)
def legendre_shendirichlet_evaluate_expansion_all(xj, input_array, output_array, is_scaled=True):
N = xj.shape[0]
phi_i = np.zeros_like(xj)
Lnm = np.ones_like(xj)
Ln = xj.copy()
Lnp = ((2+1)*xj*Ln - 1*Lnm)/2
output_array[:] = 0
for i in range(N-2):
s2 = (i+2)/(i+3)
s1 = (2*(i+2)+1)/(i+3)
ss = np.sqrt(4*i+6)
for j in range(N):
phi_i[j] = Lnm[j]-Lnp[j]
if is_scaled:
phi_i[j] /= ss
output_array[j] += phi_i[j]*input_array[i]
Lnm[j] = Ln[j]
Ln[j] = Lnp[j]
Lnp[j] = s1*xj[j]*Ln[j] - s2*Lnm[j]
@nb.jit(nopython=True, fastmath=True, cache=True)
def legendre_shenneumann_evaluate_expansion_all(xj, input_array, output_array):
N = xj.shape[0]
phi_i = np.zeros_like(xj)
Lnm = np.ones_like(xj)
Ln = xj.copy()
Lnp = ((2+1)*xj*Ln - 1*Lnm)/2
output_array[:] = 0
for i in range(N-2):
s2 = (i+2)/(i+3)
s1 = (2*(i+2)+1)/(i+3)
for j in range(N):
phi_i[j] = Lnm[j]-Lnp[j]*(i*(i+1)/(i+2)/(i+3))
output_array[j] += phi_i[j]*input_array[i]
Lnm[j] = Ln[j]
Ln[j] = Lnp[j]
Lnp[j] = s1*xj[j]*Ln[j] - s2*Lnm[j]
| 31.583333
| 97
| 0.500565
| 482
| 2,653
| 2.634855
| 0.112033
| 0.044094
| 0.047244
| 0.018898
| 0.976378
| 0.976378
| 0.976378
| 0.976378
| 0.914961
| 0.914961
| 0
| 0.043526
| 0.307199
| 2,653
| 83
| 98
| 31.963855
| 0.647443
| 0
| 0
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.025641
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c13013a900f66134e9aec2248fef79c6657feaf6
| 8,477
|
py
|
Python
|
EvalData/migrations/0032_auto_20190228_0821.py
|
amalinovskiy/Appraise
|
03446dacebd91c556b29420fe917e2b0547047bd
|
[
"BSD-3-Clause"
] | 11
|
2021-02-08T08:40:23.000Z
|
2022-03-30T09:56:40.000Z
|
EvalData/migrations/0032_auto_20190228_0821.py
|
amalinovskiy/Appraise
|
03446dacebd91c556b29420fe917e2b0547047bd
|
[
"BSD-3-Clause"
] | 29
|
2021-01-23T16:50:47.000Z
|
2022-03-25T13:46:01.000Z
|
EvalData/migrations/0032_auto_20190228_0821.py
|
amalinovskiy/Appraise
|
03446dacebd91c556b29420fe917e2b0547047bd
|
[
"BSD-3-Clause"
] | 5
|
2021-05-22T14:34:47.000Z
|
2021-08-23T15:50:05.000Z
|
# Generated by Django 2.1.5 on 2019-02-28 16:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('EvalData', '0031_auto_20171024_2144'),
]
operations = [
migrations.AlterModelOptions(
name='directassessmentresult',
options={'ordering': ['_str_name']},
),
migrations.AlterModelOptions(
name='directassessmenttask',
options={'ordering': ['_str_name']},
),
migrations.AlterModelOptions(
name='market',
options={'ordering': ['_str_name']},
),
migrations.AlterModelOptions(
name='metadata',
options={'ordering': ['_str_name'], 'verbose_name': 'Metadata record'},
),
migrations.AlterModelOptions(
name='multimodalassessmentresult',
options={'ordering': ['_str_name']},
),
migrations.AlterModelOptions(
name='multimodalassessmenttask',
options={'ordering': ['_str_name']},
),
migrations.AlterModelOptions(
name='textpair',
options={'ordering': ['_str_name']},
),
migrations.AlterModelOptions(
name='textpairwithimage',
options={'ordering': ['_str_name']},
),
migrations.AlterModelOptions(
name='textsegment',
options={'ordering': ['_str_name']},
),
migrations.AlterField(
model_name='directassessmentresult',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='directassessmentresult',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='directassessmentresult',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='directassessmenttask',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='directassessmenttask',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='directassessmenttask',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='market',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='market',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='market',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='metadata',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='metadata',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='metadata',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='multimodalassessmentresult',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='multimodalassessmentresult',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='multimodalassessmentresult',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='multimodalassessmenttask',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='multimodalassessmenttask',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='multimodalassessmenttask',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='taskagenda',
name='campaign',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='Campaign.Campaign', verbose_name='Campaign'),
),
migrations.AlterField(
model_name='taskagenda',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='textpair',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='textpair',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='textpair',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='textpairwithimage',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='textpairwithimage',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='textpairwithimage',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='textsegment',
name='activated',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Activated?'),
),
migrations.AlterField(
model_name='textsegment',
name='completed',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Completed?'),
),
migrations.AlterField(
model_name='textsegment',
name='retired',
field=models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Retired?'),
),
migrations.AlterField(
model_name='workagenda',
name='campaign',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='Campaign.Campaign', verbose_name='Campaign'),
),
migrations.AlterField(
model_name='workagenda',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
]
| 40.951691
| 131
| 0.601274
| 751
| 8,477
| 6.628495
| 0.087883
| 0.070711
| 0.155685
| 0.180595
| 0.907995
| 0.903576
| 0.798313
| 0.723785
| 0.723785
| 0.723785
| 0
| 0.005038
| 0.274154
| 8,477
| 206
| 132
| 41.150485
| 0.803998
| 0.005308
| 0
| 0.905
| 1
| 0
| 0.163464
| 0.036892
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015
| 0
| 0.03
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c1e4d0bb443a364b02d021c5c6ac965d1975d8bf
| 3,940
|
py
|
Python
|
confusion_matrices.py
|
biolib/deepclip
|
06e0a3c431db76745b6674afabc4d171f19b3eb0
|
[
"MIT"
] | 7
|
2019-07-23T10:20:11.000Z
|
2022-03-14T14:46:13.000Z
|
confusion_matrices.py
|
biolib/deepclip
|
06e0a3c431db76745b6674afabc4d171f19b3eb0
|
[
"MIT"
] | 10
|
2019-09-05T22:45:04.000Z
|
2022-03-21T08:40:49.000Z
|
confusion_matrices.py
|
biolib/deepclip
|
06e0a3c431db76745b6674afabc4d171f19b3eb0
|
[
"MIT"
] | 5
|
2019-07-23T10:20:46.000Z
|
2021-11-14T07:18:05.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
def gradient_cm2(sr, a1):
"""
This function adds the probabilities of a sequence being a class.
The probabilities are derived from the CNN. The top row is the percent wise
distribution of class 0 prediction and the bottom row is the same
for class 1.
"""
x = np.zeros((2, 2))
for i in sr:
x[0] += i
for i in a1:
x[1] += i
x1 = np.vstack(np.array([x[0], x[1]]))
x2 = np.vstack(np.array([x[0] / len(sr), x[1] / len(a1)]))
# x2 = np.array(x2).transpose()
# print '\n The added pure-predictions of the CNN:')
# print(str(x1))
# print('\n The percent-wise distribution of the model predictions:')
# print(str(x2))
return x1, x2
def bernoulli_cm2(sr, a1):
"""
The predictions of the CNN is treated as Bernoulli trials
('success' or 'failure')
"""
x = np.zeros((2, 2))
for i in sr:
x[0] += np.round(i)
for i in a1:
x[1] += np.round(i)
x1 = np.vstack(np.array([x[0], x[1]]))
x2 = np.vstack(np.array([x[0] / len(sr), x[1] / len(a1)]))
# x2 = np.array(x2).transpose()
print('\n The Bernoulli distribution:')
print(str(x1))
print('\n The percent-wise Bernoulli distribution:')
print(str(x2))
return x1, x2
def gradient_cm3(sr, a1, p):
"""
This function adds the probabilities of a sequence being a class.
The probabilities are derived from the CNN. The top row is the percentwise
distribution of SRSF5 prediction and the bottom row is the same
for hnRNPA1.
"""
x = np.zeros((3, 3))
for i in sr:
x[0] += i
for i in a1:
x[1] += i
for i in p:
x[2] += i
x2 = x / np.sum(x, 1).reshape((3, 1))
x2 = np.array(x2).transpose()
# print('\n The added pure-predictions of the CNN:')
# print(str(x1))
print('\n The percent-wise distribution of the model predictions:')
print(str(x2))
return x, x2
def bernoulli_cm3(sr, a1, p):
"""
The predictions of the CNN is treated as Bernoulli trials
('succes' or 'failure')
"""
x = np.zeros((3, 3))
for i in sr:
x[0][np.argmax(i)] += 1
for i in a1:
x[1][np.argmax(i)] += 1
for i in p:
x[2][np.argmax(i)] += 1
x2 = x / np.sum(x, 1).reshape((3, 1))
x2 = np.array(x2).transpose()
# print('\n pure Bernoulli distribution:')
# print(str(x1))
print('\n The percent-wise Bernoulli distribution:')
print(str(x2))
return x, x2
def gradient_cm2_binary(sr, a1):
"""
This function adds the probabilities of a sequence being a class.
The probabilities are derived from the CNN. The top row is the percentwise
distribution of SRSF5 prediction and the bottom row is the same
for hnRNPA1.
"""
x = np.zeros((2, 2))
for i in sr:
x[0] += np.array([1 - i[0], 1 - (1 - i[0])])
for i in a1:
x[1] += np.array([1 - i[0], 1 - (1 - i[0])])
x1 = np.vstack(np.array([x[0], x[1]]))
x2 = np.vstack(np.array([x[0] / len(sr), x[1] / len(a1)]))
# x2 = np.array(x2).transpose()
# print('\n The added pure-predictions of the CNN:')
# print(str(x1))
print('\n The percent-wise distribution of the model predictions:')
print(str(x2))
return x1, x2
def bernoulli_cm2_binary(sr, a1):
"""
The predictions of the CNN is treated as Bernoulli trials
('succes' or 'failure')
"""
x = np.zeros((2, 2))
for i in sr:
x[0] += np.round(np.array([1 - i[0], 1 - (1 - i[0])]))
for i in a1:
x[1] += np.round(np.array([1 - i[0], 1 - (1 - i[0])]))
x1 = np.vstack(np.array([x[0], x[1]]))
x2 = np.vstack(np.array([x[0] / len(sr), x[1] / len(a1)]))
# x2 = np.array(x2).transpose()
print('\n Validation confusion matrix:')
print(str(x1))
print('\n Validation accuracy in percent:')
print(str(x2))
return x1, x2
| 25.751634
| 79
| 0.563959
| 653
| 3,940
| 3.390505
| 0.122511
| 0.056911
| 0.03794
| 0.054201
| 0.918248
| 0.899729
| 0.885276
| 0.859982
| 0.852304
| 0.835592
| 0
| 0.050209
| 0.272081
| 3,940
| 152
| 80
| 25.921053
| 0.721757
| 0.351269
| 0
| 0.726027
| 0
| 0
| 0.123596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.013699
| 0
| 0.178082
| 0.191781
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e79d52bbbc6663485835a24f9f4b0ce12f953499
| 9,490
|
py
|
Python
|
MCEq/geometry/myfunc_py3.py
|
dunedyn/MCEq_AIRS
|
dee4aff5611f5884be245fba15a449c2c676059a
|
[
"BSD-3-Clause"
] | null | null | null |
MCEq/geometry/myfunc_py3.py
|
dunedyn/MCEq_AIRS
|
dee4aff5611f5884be245fba15a449c2c676059a
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T02:36:45.000Z
|
2021-06-02T02:36:45.000Z
|
MCEq/geometry/myfunc_py3.py
|
dunedyn/MCEq_AIRS
|
dee4aff5611f5884be245fba15a449c2c676059a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon May 22 12:30:46 2017
@author: Jakob
"""
#!/usr/bin python
import os
import tarfile
import numpy
#from pyhdf import HDF, SD, VS
#import warnings
#warnings.simplefilter('ignore', DeprecationWarning)
#--- function to read vdata
def read_vdata(vs, attribute):
vd = vs.attach(attribute)
vdata = vd.read()
vd.detach()
return vdata
#--- function to read sd data
def read_sd(sd, attribute):
sds = sd.select(attribute)
sddata = sds.get()
sds.endaccess()
return sddata
#--- get mean value over the south pole
def get_mean_2d(data_2d):
# get mean of 360 longitudinal bin, standard deviation
# numpy[180,360] -> numpy[180]
data_2d_ma = numpy.ma.masked_equal(data_2d, -9999.) # mask no data bin
data_mean = data_2d_ma.mean(axis=1)
data_mean_std = data_2d_ma.std(axis=1)
data_mean_num = data_2d_ma.count(axis=1)
data_mean = numpy.ma.filled(data_mean, fill_value=-1.)
data_mean_std = numpy.ma.filled(data_mean_std, fill_value=-1.)
return data_mean, data_mean_std, data_mean_num
def get_mean_3d(data_3d):
# get mean of 360 longitudinal bin, standard deviation
# numpy[24,180,360] -> numpy[24,180]
data_3d_ma = numpy.ma.masked_equal(data_3d, -9999.) # mask no data bin
data_mean = data_3d_ma.mean(axis=2)
data_mean_std = data_3d_ma.std(axis=2)
data_mean_num = data_3d_ma.count(axis=2)
data_mean = numpy.ma.filled(data_mean, fill_value=-1.)
data_mean_std = numpy.ma.filled(data_mean_std, fill_value=-1.)
return data_mean, data_mean_std, data_mean_num
#--- write all data
def write_all(
txtfile, grid_plvl, grid_lat, sttime_a, entime_a, sttime_d, entime_d,
surft_a, surft_a_std, surft_a_num, surft_d, surft_d_std, surft_d_num,
surfp_a, surfp_a_std, surfp_a_num, surfp_d, surfp_d_std, surfp_d_num,
tprof_a, tprof_a_std, tprof_a_num, tprof_d, tprof_d_std, tprof_d_num,
geoph_a, geoph_a_std, geoph_a_num, geoph_d, geoph_d_std, geoph_d_num):
fout = open(txtfile, "w")
# ascending node
fout.write( "#------ ---- ascending node\n" )
fout.write( "# start %s\n" % sttime_a )
fout.write( "# end %s\n" % entime_a )
buff = "".join([" ----------------------------- %6.1f hPa" % elem for elem in grid_plvl])
fout.write( "".join(["#------ -------------------- surface observation", '%s' % buff, "\n"]) )
buff = " geoph std nave tprof std nave"
fout.write( "".join(["# lat surfp std nave surft std nave", buff * 24, "\n"]) )
for ilat in range(0,180):
surf_lat_a = '%9.1f%7.1f%5i%9.2f%7.2f%5i' % (
surfp_a[ilat], surfp_a_std[ilat], surfp_a_num[ilat],
surft_a[ilat], surft_a_std[ilat], surft_a_num[ilat])
tprof_lat_a = [ "%.2f" % elem for elem in tprof_a[0:,ilat] ]
tprof_lat_a_std = [ "%.2f" % elem for elem in tprof_a_std[0:,ilat] ]
tprof_lat_a_num = [ "%i" % elem for elem in tprof_a_num[0:,ilat] ]
geoph_lat_a = [ "%.1f" % elem for elem in geoph_a[0:,ilat] ]
geoph_lat_a_std = [ "%.1f" % elem for elem in geoph_a_std[0:,ilat] ]
geoph_lat_a_num = [ "%i" % elem for elem in geoph_a_num[0:,ilat] ]
prof_lat_a = tuple(
['%9s%7s%5s' * 2 % (geoph, geoph_std, geoph_num, tprof, tprof_std, tprof_num)
for geoph, geoph_std, geoph_num, tprof, tprof_std, tprof_num
in zip(geoph_lat_a, geoph_lat_a_std, geoph_lat_a_num,
tprof_lat_a, tprof_lat_a_std, tprof_lat_a_num)])
fout.write( "".join(['%7.1f' % grid_lat[ilat,0], '%42s' % surf_lat_a, '%42s' * 24 % prof_lat_a, "\n"]) )
# descending node
fout.write( "#------ --- descending node\n" )
fout.write( "# start %s\n" % sttime_d )
fout.write( "# end %s\n" % entime_d )
buff = "".join([" ----------------------------- %6.1f hPa" % elem for elem in grid_plvl])
fout.write( "".join(["#------ -------------------- surface observation", '%s' % buff, "\n"]) )
buff = " geoph std nave tprof std nave"
fout.write( "".join(["# lat surfp std nave surft std nave", buff * 24, "\n"]) )
for ilat in range(0,180):
surf_lat_d = '%9.1f%7.1f%5i%9.2f%7.2f%5i' % (
surfp_d[ilat], surfp_d_std[ilat], surfp_d_num[ilat],
surft_d[ilat], surft_d_std[ilat], surft_d_num[ilat])
tprof_lat_d = [ "%.2f" % elem for elem in tprof_d[0:,ilat] ]
tprof_lat_d_std = [ "%.2f" % elem for elem in tprof_d_std[0:,ilat] ]
tprof_lat_d_num = [ "%i" % elem for elem in tprof_d_num[0:,ilat] ]
geoph_lat_d = [ "%.1f" % elem for elem in geoph_d[0:,ilat] ]
geoph_lat_d_std = [ "%.1f" % elem for elem in geoph_d_std[0:,ilat] ]
geoph_lat_d_num = [ "%i" % elem for elem in geoph_d_num[0:,ilat] ]
prof_lat_d = tuple(
['%9s%7s%5s' * 2 % (geoph, geoph_std, geoph_num, tprof, tprof_std, tprof_num)
for geoph, geoph_std, geoph_num, tprof, tprof_std, tprof_num
in zip(geoph_lat_d, geoph_lat_d_std, geoph_lat_d_num,
tprof_lat_d, tprof_lat_d_std, tprof_lat_d_num)])
fout.write( "".join(['%7.1f' % grid_lat[ilat,0], '%42s' % surf_lat_d, '%42s' * 24 % prof_lat_d, "\n"]) )
fout.close()
#--- compress to tar.gz
# dirname = os.path.dirname(txtfile)
# os.chdir(dirname)
# basename = os.path.basename(txtfile)
# tar = os.path.splitext(basename)[0]+".tar.gz"
# fout = tarfile.open(tar, "w:gz")
# fout.add(basename)
# fout.close()
def write_all2(
txtfile, grid_plvl, grid_lat,grid_lon, sttime_a, entime_a,
sttime_d, entime_d,tprof_all_a,geoph_all_a,tprof_all_d,geoph_all_d):
fout = open(txtfile, "w")
# ascending node
fout.write( "#------ ---- ascending node\n" )
fout.write( "# start %s\n" % sttime_a )
fout.write( "# end %s\n" % entime_a )
buff = "".join([" ----------------------------- %6.1f hPa" % elem for elem in grid_plvl])
fout.write( "".join(["#------ -------------------- surface observation", '%s' % buff, "\n"]) )
buff = " geoph std nave tprof std nave"
fout.write( "".join(["# lat surfp std nave surft std nave", buff * 24, "\n"]) )
for ilat in range(0,180):
surf_lat_a = '%9.1f%7.1f%5i%9.2f%7.2f%5i' % (
surfp_a[ilat], surfp_a_std[ilat], surfp_a_num[ilat],
surft_a[ilat], surft_a_std[ilat], surft_a_num[ilat])
tprof_lat_a = [ "%.2f" % elem for elem in tprof_a[0:,ilat] ]
tprof_lat_a_std = [ "%.2f" % elem for elem in tprof_a_std[0:,ilat] ]
tprof_lat_a_num = [ "%i" % elem for elem in tprof_a_num[0:,ilat] ]
geoph_lat_a = [ "%.1f" % elem for elem in geoph_a[0:,ilat] ]
geoph_lat_a_std = [ "%.1f" % elem for elem in geoph_a_std[0:,ilat] ]
geoph_lat_a_num = [ "%i" % elem for elem in geoph_a_num[0:,ilat] ]
prof_lat_a = tuple(
['%9s%7s%5s' * 2 % (geoph, geoph_std, geoph_num, tprof, tprof_std, tprof_num)
for geoph, geoph_std, geoph_num, tprof, tprof_std, tprof_num
in zip(geoph_lat_a, geoph_lat_a_std, geoph_lat_a_num,
tprof_lat_a, tprof_lat_a_std, tprof_lat_a_num)])
fout.write( "".join(['%7.1f' % grid_lat[ilat,0], '%42s' % surf_lat_a, '%42s' * 24 % prof_lat_a, "\n"]) )
# descending node
fout.write( "#------ --- descending node\n" )
fout.write( "# start %s\n" % sttime_d )
fout.write( "# end %s\n" % entime_d )
buff = "".join([" ----------------------------- %6.1f hPa" % elem for elem in grid_plvl])
fout.write( "".join(["#------ -------------------- surface observation", '%s' % buff, "\n"]) )
buff = " geoph std nave tprof std nave"
fout.write( "".join(["# lat surfp std nave surft std nave", buff * 24, "\n"]) )
for ilat in range(0,180):
surf_lat_d = '%9.1f%7.1f%5i%9.2f%7.2f%5i' % (
surfp_d[ilat], surfp_d_std[ilat], surfp_d_num[ilat],
surft_d[ilat], surft_d_std[ilat], surft_d_num[ilat])
tprof_lat_d = [ "%.2f" % elem for elem in tprof_d[0:,ilat] ]
tprof_lat_d_std = [ "%.2f" % elem for elem in tprof_d_std[0:,ilat] ]
tprof_lat_d_num = [ "%i" % elem for elem in tprof_d_num[0:,ilat] ]
geoph_lat_d = [ "%.1f" % elem for elem in geoph_d[0:,ilat] ]
geoph_lat_d_std = [ "%.1f" % elem for elem in geoph_d_std[0:,ilat] ]
geoph_lat_d_num = [ "%i" % elem for elem in geoph_d_num[0:,ilat] ]
prof_lat_d = tuple(
['%9s%7s%5s' * 2 % (geoph, geoph_std, geoph_num, tprof, tprof_std, tprof_num)
for geoph, geoph_std, geoph_num, tprof, tprof_std, tprof_num
in zip(geoph_lat_d, geoph_lat_d_std, geoph_lat_d_num,
tprof_lat_d, tprof_lat_d_std, tprof_lat_d_num)])
fout.write( "".join(['%7.1f' % grid_lat[ilat,0], '%42s' % surf_lat_d, '%42s' * 24 % prof_lat_d, "\n"]) )
fout.close()
#--- compress to tar.gz
# dirname = os.path.dirname(txtfile)
# os.chdir(dirname)
# basename = os.path.basename(txtfile)
# tar = os.path.splitext(basename)[0]+".tar.gz"
# fout = tarfile.open(tar, "w:gz")
# fout.add(basename)
# fout.close()
| 52.722222
| 113
| 0.572181
| 1,474
| 9,490
| 3.402307
| 0.093623
| 0.025523
| 0.061416
| 0.072582
| 0.824726
| 0.815952
| 0.806381
| 0.806381
| 0.783649
| 0.764108
| 0
| 0.033961
| 0.255321
| 9,490
| 180
| 114
| 52.722222
| 0.675676
| 0.107376
| 0
| 0.761194
| 0
| 0
| 0.151982
| 0.026663
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.022388
| 0
| 0.097015
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e7a86438c195ab423e93693b7fb988cc18f666a4
| 31
|
py
|
Python
|
keyboards/inline/__init__.py
|
Abdumaleek/SDK
|
3be23ec6e73a141ecafc57e1c131b215eec9f438
|
[
"MIT"
] | 1
|
2021-08-24T12:58:50.000Z
|
2021-08-24T12:58:50.000Z
|
keyboards/inline/__init__.py
|
Abdumaleek/SDK
|
3be23ec6e73a141ecafc57e1c131b215eec9f438
|
[
"MIT"
] | null | null | null |
keyboards/inline/__init__.py
|
Abdumaleek/SDK
|
3be23ec6e73a141ecafc57e1c131b215eec9f438
|
[
"MIT"
] | null | null | null |
from .set_lang import set_lang
| 15.5
| 30
| 0.83871
| 6
| 31
| 4
| 0.666667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e7c804518b986426c64d1b2742101899f3ccc8f2
| 530
|
py
|
Python
|
temporalio/api/failure/v1/__init__.py
|
cretz/temporal-sdk-python
|
431ca1967d365556a9cf5aa9aac00243b71059f8
|
[
"MIT"
] | 55
|
2022-01-31T22:02:22.000Z
|
2022-03-30T11:17:21.000Z
|
temporalio/api/failure/v1/__init__.py
|
cretz/temporal-sdk-python
|
431ca1967d365556a9cf5aa9aac00243b71059f8
|
[
"MIT"
] | 7
|
2022-02-04T14:08:46.000Z
|
2022-03-22T13:27:30.000Z
|
temporalio/api/failure/v1/__init__.py
|
cretz/temporal-sdk-python
|
431ca1967d365556a9cf5aa9aac00243b71059f8
|
[
"MIT"
] | 4
|
2022-01-31T17:31:49.000Z
|
2022-03-29T01:04:46.000Z
|
from .message_pb2 import (
ActivityFailureInfo,
ApplicationFailureInfo,
CanceledFailureInfo,
ChildWorkflowExecutionFailureInfo,
Failure,
ResetWorkflowFailureInfo,
ServerFailureInfo,
TerminatedFailureInfo,
TimeoutFailureInfo,
)
__all__ = [
"ActivityFailureInfo",
"ApplicationFailureInfo",
"CanceledFailureInfo",
"ChildWorkflowExecutionFailureInfo",
"Failure",
"ResetWorkflowFailureInfo",
"ServerFailureInfo",
"TerminatedFailureInfo",
"TimeoutFailureInfo",
]
| 22.083333
| 40
| 0.732075
| 23
| 530
| 16.652174
| 0.608696
| 0.214099
| 0.313316
| 0.48564
| 0.939948
| 0.939948
| 0.939948
| 0.939948
| 0.939948
| 0
| 0
| 0.002326
| 0.188679
| 530
| 23
| 41
| 23.043478
| 0.888372
| 0
| 0
| 0
| 0
| 0
| 0.339623
| 0.188679
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
99b3a3a2df4739b83e76396293b666f2c4a90320
| 30,094
|
py
|
Python
|
tests/test_mlxtend.py
|
a-wozniakowski/scikit-physlearn
|
3d4530fca1a7c997d4d6fc463fd8082d4ddc0e73
|
[
"MIT"
] | 8
|
2020-10-20T08:25:32.000Z
|
2022-02-17T10:27:20.000Z
|
tests/test_mlxtend.py
|
tzislam/scikit-physlearn
|
1241bbc4e3cedd581a1753b660a4d23d2e4f0ef4
|
[
"MIT"
] | 2
|
2021-07-14T16:25:08.000Z
|
2021-07-20T03:05:14.000Z
|
tests/test_mlxtend.py
|
tzislam/scikit-physlearn
|
1241bbc4e3cedd581a1753b660a4d23d2e4f0ef4
|
[
"MIT"
] | 3
|
2020-07-16T04:20:51.000Z
|
2021-06-23T21:22:43.000Z
|
"""
Unit tests for Mlxtend compatibility.
"""
# Author: Alex Wozniakowski
# License: MIT
import unittest
import pandas as pd
from scipy.stats import randint
from sklearn import __version__ as sk_version
from sklearn.base import clone
from sklearn.datasets import load_boston, load_linnerud
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.model_selection import train_test_split
from sklearn.pipeline import FeatureUnion
from physlearn import Regressor
from physlearn.datasets import load_benchmark
from physlearn.supervised import ShapInterpret
class TestMlxtend(unittest.TestCase):
def test_stacking_regressor_without_cv_gridsearchcv(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
search_params = dict(reg__kneighborsregressor__n_neighbors=[2, 4, 5],
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params)
self.assertLess(reg.best_score_.values, 3.0)
self.assertIn(reg.best_params_['reg__kneighborsregressor__n_neighbors'], [2, 4, 5])
self.assertIn(reg.best_params_['reg__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__meta_regressor__alpha'], [1.0])
def test_stacking_regressor_with_cv_gridsearchcv(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
search_params = dict(reg__kneighborsregressor__n_neighbors=[2, 4, 5],
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params)
self.assertLess(reg.best_score_.values, 2.8)
self.assertIn(reg.best_params_['reg__kneighborsregressor__n_neighbors'], [2, 4, 5])
self.assertIn(reg.best_params_['reg__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__meta_regressor__alpha'], [1.0])
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressor_without_cv_gridsearchcv(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
search_params = dict(reg__kneighborsregressor__n_neighbors=[2, 4, 5],
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params)
self.assertLess(reg.best_score_.values, 10.0)
self.assertIn(reg.best_params_['reg__estimator__kneighborsregressor__n_neighbors'],
[2, 4, 5])
self.assertIn(reg.best_params_['reg__estimator__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__estimator__meta_regressor__alpha'], [1.0])
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressor_with_cv_gridsearchcv(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
search_params = dict(reg__kneighborsregressor__n_neighbors=[2, 4, 5],
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params)
self.assertLess(reg.best_score_.values, 10.0)
self.assertIn(reg.best_params_['reg__estimator__kneighborsregressor__n_neighbors'],
[2, 4, 5])
self.assertIn(reg.best_params_['reg__estimator__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__estimator__meta_regressor__alpha'], [1.0])
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressorchain_without_cv_gridsearchcv(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
chain_order=[2, 0, 1])
search_params = dict(reg__kneighborsregressor__n_neighbors=[2, 4, 5],
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params)
self.assertLess(reg.best_score_.values, 12.0)
self.assertIn(reg.best_params_['reg__base_estimator__kneighborsregressor__n_neighbors'],
[2, 4, 5])
self.assertIn(reg.best_params_['reg__base_estimator__bayesianridge__alpha_1'],
[1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__base_estimator__meta_regressor__alpha'],
[1.0])
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressorchain_with_cv_gridsearchcv(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
chain_order=[2, 0, 1])
search_params = dict(reg__kneighborsregressor__n_neighbors=[2, 4, 5],
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params)
self.assertLess(reg.best_score_.values, 12.0)
self.assertIn(reg.best_params_['reg__base_estimator__kneighborsregressor__n_neighbors'],
[2, 4, 5])
self.assertIn(reg.best_params_['reg__base_estimator__bayesianridge__alpha_1'],
[1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__base_estimator__meta_regressor__alpha'],
[1.0])
def test_stacking_regressor_without_cv_randomizedsearchcv(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
randomizedcv_n_iter=6)
search_params = dict(reg__kneighborsregressor__n_neighbors=randint(low=2, high=5),
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params,
search_method='randomizedsearchcv')
self.assertLess(reg.best_score_.values, 3.0)
self.assertLessEqual(reg.best_params_['reg__kneighborsregressor__n_neighbors'], 5)
self.assertGreaterEqual(reg.best_params_['reg__kneighborsregressor__n_neighbors'], 2)
self.assertIn(reg.best_params_['reg__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__meta_regressor__alpha'], [1.0])
def test_stacking_regressor_with_cv_randomizedsearchcv(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
randomizedcv_n_iter=6)
search_params = dict(reg__kneighborsregressor__n_neighbors=randint(low=2, high=5),
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params,
search_method='randomizedsearchcv')
self.assertLess(reg.best_score_.values, 3.0)
self.assertLessEqual(reg.best_params_['reg__kneighborsregressor__n_neighbors'], 5)
self.assertGreaterEqual(reg.best_params_['reg__kneighborsregressor__n_neighbors'], 2)
self.assertIn(reg.best_params_['reg__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__meta_regressor__alpha'], [1.0])
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressor_without_cv_randomizedsearchcv(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
randomizedcv_n_iter=6)
search_params = dict(reg__kneighborsregressor__n_neighbors=randint(low=2, high=5),
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params,
search_method='randomizedsearchcv')
self.assertLess(reg.best_score_.values, 12.8)
self.assertLessEqual(reg.best_params_['reg__estimator__kneighborsregressor__n_neighbors'],
5)
self.assertGreaterEqual(reg.best_params_['reg__estimator__kneighborsregressor__n_neighbors'],
2)
self.assertIn(reg.best_params_['reg__estimator__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__estimator__meta_regressor__alpha'], [1.0])
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressor_with_cv_randomizedsearchcv(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
randomizedcv_n_iter=6)
search_params = dict(reg__kneighborsregressor__n_neighbors=randint(low=2, high=5),
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params,
search_method='randomizedsearchcv')
self.assertLess(reg.best_score_.values, 12.8)
self.assertLessEqual(reg.best_params_['reg__estimator__kneighborsregressor__n_neighbors'],
5)
self.assertGreaterEqual(reg.best_params_['reg__estimator__kneighborsregressor__n_neighbors'],
2)
self.assertIn(reg.best_params_['reg__estimator__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__estimator__meta_regressor__alpha'], [1.0])
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressorchain_without_cv_randomizedsearchcv(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
randomizedcv_n_iter=6,
chain_order=[2, 0, 1])
search_params = dict(reg__kneighborsregressor__n_neighbors=randint(low=2, high=5),
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params,
search_method='randomizedsearchcv')
self.assertLess(reg.best_score_.values, 12.8)
self.assertLessEqual(reg.best_params_['reg__base_estimator__kneighborsregressor__n_neighbors'],
5)
self.assertGreaterEqual(reg.best_params_['reg__base_estimator__kneighborsregressor__n_neighbors'],
2)
self.assertIn(reg.best_params_['reg__base_estimator__bayesianridge__alpha_1'],
[1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__base_estimator__meta_regressor__alpha'],
[1.0])
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressorchain_with_cv_randomizedsearchcv(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
randomizedcv_n_iter=6,
chain_order=[2, 0, 1])
search_params = dict(reg__kneighborsregressor__n_neighbors=randint(low=2, high=5),
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params,
search_method='randomizedsearchcv')
self.assertLess(reg.best_score_.values, 12.8)
self.assertLessEqual(reg.best_params_['reg__base_estimator__kneighborsregressor__n_neighbors'],
5)
self.assertGreaterEqual(reg.best_params_['reg__base_estimator__kneighborsregressor__n_neighbors'],
2)
self.assertIn(reg.best_params_['reg__base_estimator__bayesianridge__alpha_1'],
[1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__base_estimator__meta_regressor__alpha'],
[1.0])
def test_stacking_regressor_without_cv_fit_score(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
reg.fit(X_train, y_train)
y_pred = reg.fit(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred)
self.assertCountEqual(y_pred.index, y_test.index)
self.assertGreaterEqual(score['mae'].values, 0.0)
self.assertGreaterEqual(score['mse'].values, 0.0)
self.assertLess(score['mae'].values, 2.7)
self.assertLess(score['mse'].values, 19.0)
def test_stacking_regressor_with_cv_fit_score(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
reg.fit(X_train, y_train)
y_pred = reg.fit(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred)
self.assertCountEqual(y_pred.index, y_test.index)
self.assertGreaterEqual(score['mae'].values, 0.0)
self.assertGreaterEqual(score['mse'].values, 0.0)
self.assertLess(score['mae'].values, 2.7)
self.assertLess(score['mse'].values, 19.0)
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressor_without_cv_fit_score(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
y_pred = reg.fit(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred).mean()
self.assertCountEqual(y_pred.index, y_test.index)
self.assertGreaterEqual(score['mae'], 0.0)
self.assertGreaterEqual(score['mse'], 0.0)
self.assertLess(score['mae'], 11.0)
self.assertLess(score['mse'], 260.0)
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressor_with_cv_fit_score(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
y_pred = reg.fit(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred).mean()
self.assertCountEqual(y_pred.index, y_test.index)
self.assertGreaterEqual(score['mae'], 0.0)
self.assertGreaterEqual(score['mse'], 0.0)
self.assertLess(score['mae'], 9.0)
self.assertLess(score['mse'], 190.0)
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressorchain_without_cv_fit_score(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
chain_order=[2, 0, 1])
y_pred = reg.fit(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred).mean()
self.assertCountEqual(y_pred.index, y_test.index)
self.assertGreaterEqual(score['mae'], 0.0)
self.assertGreaterEqual(score['mse'], 0.0)
self.assertLess(score['mae'], 10.0)
self.assertLess(score['mse'], 180.0)
# sklearn < 0.23 does not have as_frame parameter
@unittest.skipIf(sk_version < '0.23.0', 'scikit-learn version is less than 0.23')
def test_multioutput_regressorchain_with_cv_fit_score(self):
bunch = load_linnerud(as_frame=True) # returns a Bunch instance
X, y = bunch['data'], bunch['target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack),
chain_order=[2, 0, 1])
y_pred = reg.fit(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred).mean()
self.assertCountEqual(y_pred.index, y_test.index)
self.assertGreaterEqual(score['mae'], 0.0)
self.assertGreaterEqual(score['mse'], 0.0)
self.assertLess(score['mae'], 7.0)
self.assertLess(score['mse'], 110.0)
def test_without_cv_pipeline_clone_fit_score(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
transformer_list = [('pca', PCA(n_components=1)),
('svd', TruncatedSVD(n_components=2))]
union = FeatureUnion(transformer_list=transformer_list, n_jobs=-1)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform=('tr', union),
stacking_options=dict(layers=stack))
reg.get_pipeline(y=y_train)
_class_before_clone = reg.pipe.__class__
reg.pipe = clone(reg.pipe)
y_pred = reg.fit(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred)
self.assertEqual(_class_before_clone, reg.pipe.__class__)
self.assertCountEqual(y_pred.index, y_test.index)
self.assertGreaterEqual(score['mae'].values, 0.0)
self.assertGreaterEqual(score['mse'].values, 0.0)
self.assertLess(score['mae'].values, 11.0)
self.assertLess(score['mse'].values, 232.0)
def test_with_cv_pipeline_clone_fit_score(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
transformer_list = [('pca', PCA(n_components=1)),
('svd', TruncatedSVD(n_components=2))]
union = FeatureUnion(transformer_list=transformer_list, n_jobs=-1)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingcvregressor',
pipeline_transform=('tr', union),
stacking_options=dict(layers=stack))
reg.get_pipeline(y=y_train)
_class_before_clone = reg.pipe.__class__
reg.pipe = clone(reg.pipe)
y_pred = reg.fit(X_train, y_train).predict(X_test)
score = reg.score(y_test, y_pred)
self.assertEqual(_class_before_clone, reg.pipe.__class__)
self.assertCountEqual(y_pred.index, y_test.index)
self.assertGreaterEqual(score['mae'].values, 0.0)
self.assertGreaterEqual(score['mse'].values, 0.0)
self.assertLess(score['mae'].values, 11.0)
self.assertLess(score['mse'].values, 232.0)
def test_without_cv_shap_explainer(self):
X_train, _, y_train, _ = load_benchmark(return_split=True)
index = 3
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
interpret = ShapInterpret(regressor_choice='mlxtendstackingregressor',
target_index=index,
stacking_options=dict(layers=stack))
interpret.fit(X=X_train, y=y_train, index=index)
explainer, shap_values = interpret.explainer(X=X_train)
self.assertEqual(X_train.shape, shap_values.shape)
def test_with_cv_shap_explainer(self):
X_train, _, y_train, _ = load_benchmark(return_split=True)
index = 3
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
interpret = ShapInterpret(regressor_choice='mlxtendstackingcvregressor',
target_index=index,
stacking_options=dict(layers=stack))
interpret.fit(X=X_train, y=y_train, index=index)
explainer, shap_values = interpret.explainer(X=X_train)
self.assertEqual(X_train.shape, shap_values.shape)
if __name__ == '__main__':
unittest.main()
| 54.41953
| 106
| 0.611085
| 3,426
| 30,094
| 5.000292
| 0.048745
| 0.006538
| 0.031872
| 0.039227
| 0.964801
| 0.959489
| 0.95622
| 0.954585
| 0.954585
| 0.954469
| 0
| 0.024845
| 0.285804
| 30,094
| 552
| 107
| 54.518116
| 0.772205
| 0.031667
| 0
| 0.89852
| 0
| 0
| 0.143328
| 0.076611
| 0
| 0
| 0
| 0
| 0.207188
| 1
| 0.046512
| false
| 0
| 0.02537
| 0
| 0.073996
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
99f2add62929f3817b2a7fa080acff1e010d37f1
| 708
|
py
|
Python
|
ss5/4-1.py
|
DuongVu39/C4E10_Duong
|
60ec59bddbb3397b5a1804930d5bdfd81107dcae
|
[
"MIT"
] | null | null | null |
ss5/4-1.py
|
DuongVu39/C4E10_Duong
|
60ec59bddbb3397b5a1804930d5bdfd81107dcae
|
[
"MIT"
] | null | null | null |
ss5/4-1.py
|
DuongVu39/C4E10_Duong
|
60ec59bddbb3397b5a1804930d5bdfd81107dcae
|
[
"MIT"
] | null | null | null |
<<<<<<< HEAD
from turtle import *
bgcolor("lightgreen") #Set the windown background color
color('pink', 'pink')
def draw_square():
for i in range(4):
fd(20)
left(90)
def ex1(n):
for i in range(n):
draw_square()
penup()
fd(40)
pendown()
begin_fill()
ex1(5)
end_fill()
=======
from turtle import *
bgcolor("lightgreen") #Set the windown background color
color('pink', 'pink')
def draw_square():
for i in range(4):
fd(20)
left(90)
def ex1(n):
for i in range(n):
draw_square()
penup()
fd(40)
pendown()
begin_fill()
ex1(5)
end_fill()
>>>>>>> 687005e51286e9522a42a2d33dcef452fb0a05b2
| 15.391304
| 55
| 0.564972
| 92
| 708
| 4.26087
| 0.369565
| 0.102041
| 0.061224
| 0.112245
| 0.887755
| 0.887755
| 0.887755
| 0.887755
| 0.887755
| 0.887755
| 0
| 0.092702
| 0.283898
| 708
| 45
| 56
| 15.733333
| 0.680473
| 0.090395
| 0
| 0.914286
| 0
| 0
| 0.056075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.057143
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
99f76d6d78286d4fa9e046e2fb618198b86d36ef
| 219,404
|
py
|
Python
|
Mybase/yale_utils/make_image.py
|
czyczyyzc/MyForElise
|
dcbf5924d3d63f441d3247741828804f74a29345
|
[
"MIT"
] | null | null | null |
Mybase/yale_utils/make_image.py
|
czyczyyzc/MyForElise
|
dcbf5924d3d63f441d3247741828804f74a29345
|
[
"MIT"
] | null | null | null |
Mybase/yale_utils/make_image.py
|
czyczyyzc/MyForElise
|
dcbf5924d3d63f441d3247741828804f74a29345
|
[
"MIT"
] | null | null | null |
import os
import cv2
import glob
import pickle
#import scipy.misc
import math
#import lmdb
import struct
import random
import itertools
import numpy as np
import tensorflow as tf
import xml.etree.ElementTree as ET
import scipy.io as sio
#from captcha.image import ImageCaptcha
from matplotlib.patches import Polygon
from skimage.measure import find_contours
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.lib.io.tf_record import TFRecordCompressionType
import colorsys
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.patches as patches
#from shapely.geometry import Polygon
from .bbox import *
from Mybase.comp_utils import tensor_update
def _int64_feature(value):
return tf.train.Feature(int64_list = tf.train.Int64List(value = [value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list = tf.train.BytesList(value = [value]))
def _float_feature(value):
return tf.train.Feature(float_list = tf.train.FloatList(value = [value]))
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random.uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image=None, color_order=0):
if color_order == 0:
image = tf.image.random_brightness(image, max_delta=32.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_order == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_order == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_order == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32.)
else:
raise ValueError("color_order must be in [0, 3]")
return tf.clip_by_value(image, 0.0, 255.0)
def distort_bbox(boxs=None, order=0):
img_shp = tf.cast(boxs[-1, :-1], dtype=tf.int32)
boxs = boxs[0:-1]
if order == 0:
box_beg = tf.zeros(shape=[3], dtype=tf.int32)
box_siz = tf.stack([img_shp[0], img_shp[1], -1], axis=-1)
elif order == 1:
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(img_shp, bounding_boxes=tf.expand_dims(boxs, 0), \
min_object_covered=0.1, aspect_ratio_range=[0.5, 2.0], \
area_range=[0.3, 1.0], max_attempts=50, \
use_image_if_no_bounding_boxes=True)
elif order == 2:
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(img_shp, bounding_boxes=tf.expand_dims(boxs, 0), \
min_object_covered=0.3, aspect_ratio_range=[0.5, 2.0], \
area_range=[0.3, 1.0], max_attempts=50, \
use_image_if_no_bounding_boxes=True)
elif order == 3:
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(img_shp, bounding_boxes=tf.expand_dims(boxs, 0), \
min_object_covered=0.5, aspect_ratio_range=[0.5, 2.0], \
area_range=[0.3, 1.0], max_attempts=50, \
use_image_if_no_bounding_boxes=True)
elif order == 4:
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(img_shp, bounding_boxes=tf.expand_dims(boxs, 0), \
min_object_covered=0.7, aspect_ratio_range=[0.5, 2.0], \
area_range=[0.3, 1.0], max_attempts=50, \
use_image_if_no_bounding_boxes=True)
elif order == 5:
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(img_shp, bounding_boxes=tf.expand_dims(boxs, 0), \
min_object_covered=0.9, aspect_ratio_range=[0.5, 2.0], \
area_range=[0.3, 1.0], max_attempts=50, \
use_image_if_no_bounding_boxes=True)
elif order == 6:
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(img_shp, bounding_boxes=tf.expand_dims(boxs, 0), \
min_object_covered=0.0, aspect_ratio_range=[0.5, 2.0], \
area_range=[0.3, 1.0], max_attempts=50, \
use_image_if_no_bounding_boxes=True)
else:
raise ValueError("order must be in [0, 6]")
return tf.stack([box_beg, box_siz], axis=0)
class GeneratorForMNIST(object):
def __init__(self, mod_tra=True, dat_dir='Mybase/datasets/train', bat_siz=3, epc_num=20, \
min_after_dequeue=30, gpu_lst='0', fil_num=32):
self.mod_tra = mod_tra
self.use_pad = False
self.use_exp = False
self.exp_rat = 2.0
self.img_avg = np.array([0.0], dtype=np.float32)
self.img_siz_min = 32
self.img_siz_max = 32
############for crop###########
self.min_object_covered = 0.70
self.aspect_ratio_range = [0.75, 1.33] #(0.5, 2.0) #(3/4, 4/3)
self.area_range = [0.70, 1.00]
self.max_attempts = 200
self.dat_dir = dat_dir
self.bat_siz = bat_siz
self.epc_num = epc_num
self.min_after_dequeue = min_after_dequeue
self.gpu_lst = gpu_lst
self.gpu_num = len(self.gpu_lst.split(','))
self.mdl_dev = '/cpu:%d' if self.gpu_num == 0 else '/gpu:%d'
self.gpu_num = 1 if self.gpu_num == 0 else self.gpu_num
self.fil_num = fil_num
self.num_readers = 16
self.num_threads = 16
self.capacity = self.min_after_dequeue + 3 * self.bat_siz
self.max_num = 100
self.cls_nams = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.cls_num = len(self.cls_nams)
self.cls_idx_to_cls_nam = dict(zip(range(self.cls_num), self.cls_nams))
self.cls_nam_to_cls_idx = dict(zip(self.cls_nams, range(self.cls_num)))
############for show###########
self.title = ''
self.figsize = (3, 3)
def make_input(self, num_per_sha=1000000, fil_nam='t10k'):
fils_dir = 'Mybase/datasets/raw/mnist'
rcds_dir = "Mybase/datasets/" + fil_nam
lbls_dir = os.path.join(fils_dir, '%s-labels-idx1-ubyte' % fil_nam)
imgs_dir = os.path.join(fils_dir, '%s-images-idx3-ubyte' % fil_nam)
with open(lbls_dir, 'rb') as f:
magic, n = struct.unpack('>II', f.read(8))
lbls = np.fromfile(f, dtype=np.uint8)
with open(imgs_dir, 'rb') as f:
magic, num, rows, cols = struct.unpack('>IIII', f.read(16))
imgs = np.fromfile(f, dtype=np.uint8).reshape(len(lbls), 28, 28, 1)
img_num = imgs.shape[0]
print("The amount of images is %d!" %(img_num))
idxs = np.arange(0, img_num)
np.random.shuffle(idxs)
imgs = [imgs[idx] for idx in idxs]
lbls = [lbls[idx] for idx in idxs]
with tf.Graph().as_default(), tf.device('/cpu:0'):
sha_num = int(img_num/num_per_sha)
if sha_num == 0:
sha_num = 1
num_per_sha = img_num
else:
num_per_sha = int(math.ceil(img_num/sha_num))
for sha_idx in range(sha_num):
out_nam = 'mnist_%s.tfrecord' % (fil_nam)
rcd_nam = os.path.join(rcds_dir, out_nam)
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
with tf.python_io.TFRecordWriter(rcd_nam, options=options) as writer:
sta_idx = sha_idx * num_per_sha
end_idx = min((sha_idx + 1) * num_per_sha, img_num)
for i in range(sta_idx, end_idx):
if i % 100 == 0:
print("Converting image %d/%d shard %d" % (i + 1, img_num, sha_idx))
img = imgs[i]
img_hgt = img.shape[0]
img_wdh = img.shape[1]
#img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
#img_tmp[:, :, :] = img
#img = img_tmp
lbl = lbls[i]
#写tfrecords
img_raw = img.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image/image': _bytes_feature(img_raw),
'image/height': _int64_feature(img_hgt),
'image/width': _int64_feature(img_wdh),
'label/label': _int64_feature(lbl),
}))
writer.write(example.SerializeToString())
def resize_image_with_pad(self, img=None):
#####################按最短边进行比例缩放######################
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
if self.use_pad:
leh_min = tf.minimum(img_hgt, img_wdh)
leh_max = tf.maximum(img_hgt, img_wdh)
leh_rat = tf.minimum(self.img_siz_min/leh_min, self.img_siz_max/leh_max)
img_hgt = tf.cast(img_hgt*leh_rat, dtype=tf.int32)
img_wdh = tf.cast(img_wdh*leh_rat, dtype=tf.int32)
#对image操作后对boxs操作
img = tf.image.resize_images(img, [img_hgt, img_wdh], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
################如果最长边过长则按中心对称进行裁剪################
#对image操作后对boxs操作
img = tf.image.resize_image_with_crop_or_pad(img, self.img_siz_max, self.img_siz_max)
else:
hgt_rat = self.img_siz_max / img_hgt
wdh_rat = self.img_siz_max / img_wdh
leh_rat = tf.stack([hgt_rat, wdh_rat], axis=0)
leh_rat = tf.tile(leh_rat, [2])
#对image操作后对boxs操作
img = tf.image.resize_images(img, [self.img_siz_max, self.img_siz_max], method=tf.image.ResizeMethod.BILINEAR, \
align_corners=False)
return img
def distort_crop(self, img=None):
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
boxs = tf.stack([[0.0, 0.0, img_hgt-1.0, img_wdh-1.0]], axis=0)
if self.use_exp:
exp_rat = tf.random.uniform(shape=[], minval=1.1, maxval=self.exp_rat, dtype=tf.float32)
#exp_rat = self.exp_rat
pad_hgt_all = tf.cast(img_hgt*(exp_rat-1.0), dtype=tf.int32)
pad_wdh_all = tf.cast(img_wdh*(exp_rat-1.0), dtype=tf.int32)
pad_hgt_fnt = tf.random.uniform(shape=[], minval=0, maxval=pad_hgt_all, dtype=tf.int32)
pad_wdh_fnt = tf.random.uniform(shape=[], minval=0, maxval=pad_wdh_all, dtype=tf.int32)
pad_hgt_bak = pad_hgt_all - pad_hgt_fnt
pad_wdh_bak = pad_wdh_all - pad_wdh_fnt
paddings = [[pad_hgt_fnt, pad_hgt_bak], [pad_wdh_fnt, pad_wdh_bak], [0, 0]]
img = tf.pad(img, paddings, "CONSTANT", constant_values=0)
pad_hgt_fnt = tf.cast(pad_hgt_fnt, dtype=tf.float32)
pad_wdh_fnt = tf.cast(pad_wdh_fnt, dtype=tf.float32)
beg = tf.stack([pad_hgt_fnt, pad_wdh_fnt], axis=0)
beg = tf.tile(beg, [2])
boxs = boxs + beg #padding中boxs不会超出边界,不用clip
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
########################crop the image randomly########################
boxs_tmp = boxs / tf.stack([img_hgt-1.0, img_wdh-1.0, img_hgt-1.0, img_wdh-1.0], axis=0)
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(tf.shape(img), bounding_boxes=tf.expand_dims(boxs_tmp, 0), \
min_object_covered=self.min_object_covered, \
aspect_ratio_range=self.aspect_ratio_range, \
area_range=self.area_range, max_attempts=self.max_attempts, \
use_image_if_no_bounding_boxes=True)
img = tf.slice(img, box_beg, box_siz)
###########resize image to the expected size with paddings############
img = self.resize_image_with_pad(img)
return img
def preprocessing(self, img=None):
img = tf.cast(img, dtype=tf.float32)
####################归化到0、1之间######################
#if img.dtype != tf.float32:
# img = tf.image.convert_image_dtype(img, dtype=tf.float32)
if self.mod_tra == True:
#######################光学畸变#########################
# Randomly distort the colors. There are 4 ways to do it.
#img= apply_with_random_selector(img, lambda x, order: distort_color(x, order), num_cases=4)
img = img - self.img_avg
img = img / 255.0
#######################随机裁剪#########################
#img= self.distort_crop(img)
#####################随机左右翻转#######################
img = tf.image.random_flip_left_right(img)
img = self.resize_image_with_pad(img)
#######################减去均值########################
#img= tf.image.per_image_standardization(img)
else:
img = img - self.img_avg
img = img / 255.0
img = self.resize_image_with_pad(img)
return img
def get_input(self):
def parse_function(serialized_example):
'''
定长特征解析:tf.FixedLenFeature(shape, dtype, default_value)
shape:可当 reshape 来用,如 vector 的 shape 从 (3,) 改动成了 (1,3)。
注:如果写入的 feature 使用了. tostring() 其 shape 就是 ()
dtype:必须是 tf.float32, tf.int64, tf.string 中的一种。
default_value:feature 值缺失时所指定的值。
不定长特征解析:tf.VarLenFeature(dtype)
注:可以不明确指定 shape,但得到的 tensor 是 SparseTensor。
变长的tensor转化为string后,shape是固定的,shape=[],所以可以用tf.FixedLenFeature进行解析
'''
parsed_example = tf.parse_single_example(
serialized_example,
features = {
'image/image': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'image/height': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'image/width': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/label': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
#'matrix': tf.VarLenFeature(dtype=dtype('float32')),
#'matrix_shape':tf.FixedLenFeature(shape=(2,), dtype=tf.int64),
}
)
img_hgt = tf.cast(parsed_example['image/height'], tf.int32)
img_wdh = tf.cast(parsed_example['image/width'], tf.int32)
lbl = tf.cast(parsed_example['label/label'], tf.int32)
img = tf.decode_raw(parsed_example['image/image'], tf.uint8)
img = tf.reshape(img, [img_hgt, img_wdh, 1])
img = self.preprocessing(img)
img = tf.reshape(img, [self.img_siz_max, self.img_siz_max, 1])
parsed_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'label/label': lbl,
}
#parsed_example['matrix'] = tf.sparse_tensor_to_dense(parsed_example['matrix'])
#parsed_example['matrix'] = tf.reshape(parsed_example['matrix'], parsed_example['matrix_shape'])
return parsed_example
imgs_lst = []
lbls_lst = []
if self.fil_num >= self.gpu_num:
fil_pat = os.path.join(self.dat_dir, 'mnist', '*.tfrecord')
dataset = tf.data.Dataset.list_files(file_pattern=fil_pat, shuffle=True, seed=None)
else:
fil_nam = glob.glob(os.path.join(self.dat_dir, 'mnist', '*.tfrecord'))
dataset = tf.data.TFRecordDataset(fil_nam, compression_type='ZLIB', num_parallel_reads=self.num_readers)
for i in range(self.gpu_num):
dat_sha = dataset.shard(num_shards=self.gpu_num, index=i)
if self.fil_num >= self.gpu_num:
#dat_sha= dat_sha.interleave(lambda x: tf.data.TFRecordDataset(x, compression_type='ZLIB'), \
# cycle_length=self.num_readers, block_length=1, num_parallel_calls=1)
dat_sha = dat_sha.apply(tf.data.experimental.\
parallel_interleave(lambda x: tf.data.TFRecordDataset(x, compression_type='ZLIB'), \
cycle_length=self.num_readers, block_length=1, sloppy=True, \
buffer_output_elements=None, \
prefetch_input_elements=None))
#dat_sha = dat_sha.repeat(count=self.epc_num)
#dat_sha = dat_sha.shuffle(buffer_size=self.capacity, seed=None, reshuffle_each_iteration=True)
dat_sha = dat_sha.apply(tf.data.experimental.\
shuffle_and_repeat(buffer_size=self.capacity, count=self.epc_num, seed=None))
#dat_sha = dat_sha.map(parse_function, num_parallel_calls=self.num_threads)
#dat_sha = dat_sha.batch(batch_size=self.bat_siz, drop_remainder=False)
dat_sha = dat_sha.apply(tf.data.experimental.\
map_and_batch(parse_function, batch_size=self.bat_siz, num_parallel_batches=None, \
drop_remainder=False, num_parallel_calls=self.num_threads))
#dat_sha = dat_sha.cache(filename=os.path.join(self.dat_dir, 'cache'))
#dat_sha = dat_sha.prefetch(buffer_size=self.bat_siz)
dat_sha = dat_sha.apply(tf.data.experimental.prefetch_to_device(self.mdl_dev%i, buffer_size=None))
iterator = dat_sha.make_one_shot_iterator()
example = iterator.get_next()
imgs_lst.append(example['image/image'])
lbls_lst.append(example['label/label'])
return imgs_lst, lbls_lst
def display_instances(self, img=None, lbl=None, img_hgt=None, img_wdh=None):
_, ax = plt.subplots(1, figsize=self.figsize)
#img = cv2.resize(img, (width, height), interpolation=cv2.INTER_LINEAR)
#Show area outside image boundaries.
#ax.set_ylim(img_hgt + 10, -10)
#ax.set_xlim(-10, img_wdh + 10)
ax.axis('off')
ax.set_title(str(lbl))
img = img * 255.0
img = img + self.img_avg
img = np.clip(img, 0.0, 225.0)
img = img.astype(dtype=np.uint8, copy=False)
img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_tmp[:, :, :] = img
img = img_tmp
ax.imshow(img)
plt.show()
plt.close()
def get_input_test(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs_lst, lbls_lst = self.get_input()
imgs = tf.concat(imgs_lst, axis=0)
lbls = tf.concat(lbls_lst, axis=0)
with tf.Session() as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
imgs_kep, lbls_kep = sess.run([imgs, lbls])
for i in range(self.bat_siz*self.gpu_num):
img_tmp = imgs_kep[i]
lbl_tmp = lbls_kep[i]
self.display_instances(img_tmp, lbl_tmp)
class GeneratorForCIFAR(object):
def __init__(self, mod_tra=True, dat_dir='Mybase/datasets/train', bat_siz=3, epc_num=20, \
min_after_dequeue=30, gpu_lst='0', fil_num=32):
self.mod_tra = mod_tra
self.use_pad = False
self.use_exp = False
self.exp_rat = 2.0
self.img_avg = np.array([125.30, 122.95, 113.87], dtype=np.float32)
self.img_siz_min = 32
self.img_siz_max = 32
############for crop###########
self.min_object_covered = 0.50
self.aspect_ratio_range = [0.75, 1.33] #(0.5, 2.0) #(3/4, 4/3)
self.area_range = [0.50, 1.00]
self.max_attempts = 200
self.dat_dir = dat_dir
self.bat_siz = bat_siz
self.epc_num = epc_num
self.min_after_dequeue = min_after_dequeue
self.gpu_lst = gpu_lst
self.gpu_num = len(self.gpu_lst.split(','))
self.mdl_dev = '/cpu:%d' if self.gpu_num == 0 else '/gpu:%d'
self.gpu_num = 1 if self.gpu_num == 0 else self.gpu_num
self.fil_num = fil_num
self.num_readers = 16
self.num_threads = 16
self.capacity = self.min_after_dequeue + 3 * self.bat_siz
self.max_num = 100
self.cls_nams = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
self.cls_num = len(self.cls_nams)
self.cls_idx_to_cls_nam = dict(zip(range(self.cls_num), self.cls_nams))
self.cls_nam_to_cls_idx = dict(zip(self.cls_nams, range(self.cls_num)))
############for show###########
self.title = ''
self.figsize = (3, 3)
def ZCA(self, X):
X = X.reshape(X.shape[0], -1)
sigma = np.dot(X.T, X)/X.shape[0]
U,S,V = np.linalg.svd(sigma)
eps = 0.001
Z = np.dot(np.dot(U, np.diag(1.0/np.sqrt(S+eps))), U.T)
X = np.dot(X, Z)
X = X.reshape(-1, 32, 32, 3)
return X
def make_input(self, num_per_sha=1000000, fil_nam='data_batch'):
fils_dir = 'Mybase/datasets/raw/cifar10/cifar-10-batches-py'
#fils_dir = 'Mybase/datasets/raw/cifar100/cifar-100-python'
rcds_dir = "Mybase/datasets/" + fil_nam
fils_lst = glob.glob(os.path.join(fils_dir, fil_nam+'*'))
imgs_lst = []
lbls_lst = []
for fil in fils_lst:
with open(fil, 'rb') as f:
dats = pickle.load(f, encoding='iso-8859-1')
imgs = dats['data']
lbls = dats['labels']
#lbls = dats['fine_labels']
imgs = imgs.reshape(-1, 3, 32, 32).transpose(0,2,3,1).astype(dtype=np.uint8, copy=False)
lbls = np.asarray(lbls, dtype=np.int64)
imgs_lst.append(imgs)
lbls_lst.append(lbls)
imgs = np.concatenate(imgs_lst)
lbls = np.concatenate(lbls_lst)
'''
imgs = imgs - self.img_avg
imgs = imgs / 255.0
imgs = self.ZCA(imgs)
imgs = imgs * 255.0 + self.img_avg
'''
img_num = imgs.shape[0]
print("The amount of images is %d!" %(img_num))
idxs = np.arange(0, img_num)
np.random.shuffle(idxs)
imgs = imgs[idxs]
lbls = lbls[idxs]
with tf.Graph().as_default(), tf.device('/cpu:0'):
sha_num = int(img_num/num_per_sha)
if sha_num == 0:
sha_num = 1
num_per_sha = img_num
else:
num_per_sha = int(math.ceil(img_num/sha_num))
for sha_idx in range(sha_num):
out_nam = 'cifar_%s.tfrecord' % (fil_nam)
rcd_nam = os.path.join(rcds_dir, out_nam)
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
with tf.python_io.TFRecordWriter(rcd_nam, options=options) as writer:
sta_idx = sha_idx * num_per_sha
end_idx = min((sha_idx + 1) * num_per_sha, img_num)
for i in range(sta_idx, end_idx):
if i % 100 == 0:
print("Converting image %d/%d shard %d" % (i + 1, img_num, sha_idx))
img = imgs[i]
img_hgt, img_wdh = img.shape[0], img.shape[1]
if img.size == img_hgt * img_wdh:
print ('Gray Image %s' %(imgs_lst[i]))
img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_tmp[:, :, :] = img[:, :, np.newaxis]
img = img_tmp
#img = img.astype(np.uint8)
img = img.astype(np.float32)
assert img.size == img_wdh * img_hgt * 3, '%s' % str(i)
lbl = lbls[i]
#写tfrecords
img_raw = img.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image/image': _bytes_feature(img_raw),
'image/height': _int64_feature(img_hgt),
'image/width': _int64_feature(img_wdh),
'label/label': _int64_feature(lbl),
}))
writer.write(example.SerializeToString())
def resize_image_with_pad(self, img=None):
#####################按最短边进行比例缩放######################
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
if self.use_pad:
leh_min = tf.minimum(img_hgt, img_wdh)
leh_max = tf.maximum(img_hgt, img_wdh)
leh_rat = tf.minimum(self.img_siz_min/leh_min, self.img_siz_max/leh_max)
img_hgt = tf.cast(img_hgt*leh_rat, dtype=tf.int32)
img_wdh = tf.cast(img_wdh*leh_rat, dtype=tf.int32)
#对image操作后对boxs操作
img = tf.image.resize_images(img, [img_hgt, img_wdh], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
################如果最长边过长则按中心对称进行裁剪################
#对image操作后对boxs操作
img = tf.image.resize_image_with_crop_or_pad(img, self.img_siz_max, self.img_siz_max)
else:
hgt_rat = self.img_siz_max / img_hgt
wdh_rat = self.img_siz_max / img_wdh
leh_rat = tf.stack([hgt_rat, wdh_rat], axis=0)
leh_rat = tf.tile(leh_rat, [2])
#对image操作后对boxs操作
img = tf.image.resize_images(img, [self.img_siz_max, self.img_siz_max], method=tf.image.ResizeMethod.BILINEAR, \
align_corners=False)
return img
'''
def distort_crop(self, img=None):
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
boxs = tf.stack([[0.0, 0.0, img_hgt-1.0, img_wdh-1.0]], axis=0)
if self.use_exp:
exp_rat = tf.random.uniform(shape=[], minval=1.1, maxval=self.exp_rat, dtype=tf.float32)
#exp_rat = self.exp_rat
pad_hgt_all = tf.cast(img_hgt*(exp_rat-1.0), dtype=tf.int32)
pad_wdh_all = tf.cast(img_wdh*(exp_rat-1.0), dtype=tf.int32)
pad_hgt_fnt = tf.random.uniform(shape=[], minval=0, maxval=pad_hgt_all, dtype=tf.int32)
pad_wdh_fnt = tf.random.uniform(shape=[], minval=0, maxval=pad_wdh_all, dtype=tf.int32)
pad_hgt_bak = pad_hgt_all - pad_hgt_fnt
pad_wdh_bak = pad_wdh_all - pad_wdh_fnt
paddings = [[pad_hgt_fnt, pad_hgt_bak], [pad_wdh_fnt, pad_wdh_bak], [0, 0]]
img = tf.pad(img, paddings, "CONSTANT", constant_values=0)
pad_hgt_fnt = tf.cast(pad_hgt_fnt, dtype=tf.float32)
pad_wdh_fnt = tf.cast(pad_wdh_fnt, dtype=tf.float32)
beg = tf.stack([pad_hgt_fnt, pad_wdh_fnt], axis=0)
beg = tf.tile(beg, [2])
boxs = boxs + beg #padding中boxs不会超出边界,不用clip
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
########################crop the image randomly########################
boxs_tmp = boxs / tf.stack([img_hgt-1.0, img_wdh-1.0, img_hgt-1.0, img_wdh-1.0], axis=0)
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(tf.shape(img), bounding_boxes=tf.expand_dims(boxs_tmp, 0), \
min_object_covered=self.min_object_covered, \
aspect_ratio_range=self.aspect_ratio_range, \
area_range=self.area_range, max_attempts=self.max_attempts, \
use_image_if_no_bounding_boxes=True)
img = tf.slice(img, box_beg, box_siz)
###########resize image to the expected size with paddings############
img = self.resize_image_with_pad(img)
return img
'''
def distort_crop(self, img=None):
paddings = [[4, 4], [4, 4], [0, 0]]
img = tf.pad(img, paddings, mode='CONSTANT', constant_values=0)
ymn = tf.random.uniform(shape=[], minval=0, maxval=8, dtype=tf.int32)
xmn = tf.random.uniform(shape=[], minval=0, maxval=8, dtype=tf.int32)
ymx = ymn + 32
xmx = xmn + 32
img = img[ymn:ymx, xmn:xmx, :]
return img
def preprocessing(self, img=None):
img = tf.cast(img, dtype=tf.float32)
####################归化到0、1之间######################
#if img.dtype != tf.float32:
# img = tf.image.convert_image_dtype(img, dtype=tf.float32)
if self.mod_tra == True:
#######################光学畸变#########################
# Randomly distort the colors. There are 4 ways to do it.
# img = apply_with_random_selector(img, lambda x, order: distort_color(x, order), num_cases=4)
img = img - self.img_avg
img = img / 255.0
#######################随机裁剪#########################
img = self.distort_crop(img)
#img= self.resize_image_with_pad(img)
#####################随机左右翻转#######################
img = tf.image.random_flip_left_right(img)
#######################减去均值########################
#img= tf.image.per_image_standardization(img)
else:
img = img - self.img_avg
img = img / 255.0
#img = self.resize_image_with_pad(img)
return img
def get_input(self):
def parse_function(serialized_example):
'''
定长特征解析:tf.FixedLenFeature(shape, dtype, default_value)
shape:可当 reshape 来用,如 vector 的 shape 从 (3,) 改动成了 (1,3)。
注:如果写入的 feature 使用了. tostring() 其 shape 就是 ()
dtype:必须是 tf.float32, tf.int64, tf.string 中的一种。
default_value:feature 值缺失时所指定的值。
不定长特征解析:tf.VarLenFeature(dtype)
注:可以不明确指定 shape,但得到的 tensor 是 SparseTensor。
变长的tensor转化为string后,shape是固定的,shape=[],所以可以用tf.FixedLenFeature进行解析
'''
parsed_example = tf.parse_single_example(
serialized_example,
features = {
'image/image': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'image/height': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'image/width': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/label': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
#'matrix': tf.VarLenFeature(dtype=dtype('float32')),
#'matrix_shape':tf.FixedLenFeature(shape=(2,), dtype=tf.int64),
}
)
img_hgt = tf.cast(parsed_example['image/height'], tf.int32)
img_wdh = tf.cast(parsed_example['image/width'], tf.int32)
lbl = tf.cast(parsed_example['label/label'], tf.int32)
#img = tf.decode_raw(parsed_example['image/image'], tf.uint8)
img = tf.decode_raw(parsed_example['image/image'], tf.float32)
img = tf.reshape(img, [img_hgt, img_wdh, 3])
img = self.preprocessing(img)
img = tf.reshape(img, [self.img_siz_max, self.img_siz_max, 3])
parsed_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'label/label': lbl,
}
#parsed_example['matrix'] = tf.sparse_tensor_to_dense(parsed_example['matrix'])
#parsed_example['matrix'] = tf.reshape(parsed_example['matrix'], parsed_example['matrix_shape'])
return parsed_example
imgs_lst = []
lbls_lst = []
if self.fil_num >= self.gpu_num:
fil_pat = os.path.join(self.dat_dir, 'cifar', '*.tfrecord')
dataset = tf.data.Dataset.list_files(file_pattern=fil_pat, shuffle=True, seed=None)
else:
fil_nam = glob.glob(os.path.join(self.dat_dir, 'cifar', '*.tfrecord'))
dataset = tf.data.TFRecordDataset(fil_nam, compression_type='ZLIB', num_parallel_reads=self.num_readers)
for i in range(self.gpu_num):
dat_sha = dataset.shard(num_shards=self.gpu_num, index=i)
if self.fil_num >= self.gpu_num:
#dat_sha= dat_sha.interleave(lambda x: tf.data.TFRecordDataset(x, compression_type='ZLIB'), \
# cycle_length=self.num_readers//self.gpu_num, block_length=1, num_parallel_calls=1)
dat_sha = dat_sha.apply(tf.data.experimental.\
parallel_interleave(lambda x: tf.data.TFRecordDataset(x, compression_type='ZLIB'), \
cycle_length=self.num_readers//self.gpu_num, \
block_length=1, sloppy=True, \
buffer_output_elements=None, \
prefetch_input_elements=None))
#dat_sha = dat_sha.repeat(count=self.epc_num)
#dat_sha = dat_sha.shuffle(buffer_size=self.capacity, seed=None, reshuffle_each_iteration=True)
dat_sha = dat_sha.apply(tf.data.experimental.\
shuffle_and_repeat(buffer_size=self.capacity, count=self.epc_num, seed=None))
#dat_sha = dat_sha.map(parse_function, num_parallel_calls=self.num_threads)
#dat_sha = dat_sha.batch(batch_size=self.bat_siz, drop_remainder=False)
dat_sha = dat_sha.apply(tf.data.experimental.\
map_and_batch(parse_function, batch_size=self.bat_siz, num_parallel_batches=None, \
drop_remainder=False, num_parallel_calls=self.num_threads//self.gpu_num))
#dat_sha = dat_sha.cache(filename=os.path.join(self.dat_dir, 'cache'))
#dat_sha = dat_sha.prefetch(buffer_size=self.bat_siz)
dat_sha = dat_sha.apply(tf.data.experimental.prefetch_to_device(self.mdl_dev%i, buffer_size=None))
iterator = dat_sha.make_one_shot_iterator()
example = iterator.get_next()
imgs_lst.append(example['image/image'])
lbls_lst.append(example['label/label'])
return imgs_lst, lbls_lst
def display_instances(self, img=None, lbl=None, img_hgt=None, img_wdh=None):
_, ax = plt.subplots(1, figsize=self.figsize)
#img = cv2.resize(img, (width, height), interpolation=cv2.INTER_LINEAR)
#Show area outside image boundaries.
#ax.set_ylim(img_hgt + 10, -10)
#ax.set_xlim(-10, img_wdh + 10)
ax.axis('off')
ax.set_title(str(lbl))
img = img * 255.0
img = img + self.img_avg
img = np.clip(img, 0.0, 225.0)
img = img.astype(dtype=np.uint8, copy=False)
ax.imshow(img)
plt.show()
plt.close()
def get_input_test(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs_lst, lbls_lst = self.get_input()
imgs = tf.concat(imgs_lst, axis=0)
lbls = tf.concat(lbls_lst, axis=0)
with tf.Session() as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
imgs_kep, lbls_kep = sess.run([imgs, lbls])
for i in range(self.bat_siz*self.gpu_num):
img_tmp = imgs_kep[i]
lbl_tmp = lbls_kep[i]
self.display_instances(img_tmp, lbl_tmp)
class GeneratorForImageNet(object):
def __init__(self, mod_tra=True, dat_dir='Mybase/datasets/train', bat_siz=3, epc_num=20, \
min_after_dequeue=30, gpu_lst='0', fil_num=32):
self.mod_tra = mod_tra
self.use_pad = True
self.use_exp = False
self.exp_rat = 2.0
self.img_avg = np.array([123.7, 116.8, 103.9], dtype=np.float32)
self.img_siz_min = 224
self.img_siz_max = 224
self.box_siz_min = 5
self.box_isc_min = 0.5
############for crop###########
self.min_object_covered = 0.30
self.aspect_ratio_range = [0.75, 1.33] #(0.5, 2.0) #(3/4, 4/3)
self.area_range = [0.30, 1.00]
self.max_attempts = 200
self.dat_dir = dat_dir
self.bat_siz = bat_siz
self.epc_num = epc_num
self.min_after_dequeue = min_after_dequeue
self.gpu_lst = gpu_lst
self.gpu_num = len(self.gpu_lst.split(','))
self.mdl_dev = '/cpu:%d' if self.gpu_num == 0 else '/gpu:%d'
self.gpu_num = 1 if self.gpu_num == 0 else self.gpu_num
self.fil_num = fil_num
self.num_readers = 2
self.num_threads = 20
self.bat_siz_all = self.bat_siz * self.gpu_num
self.capacity = self.min_after_dequeue + 3 * self.bat_siz_all
mets_dir = 'Mybase/datasets/raw/ILSVRC/devkit/data'
wnds = self.load_imagenet_meta(os.path.join(mets_dir, 'meta_clsloc.mat'))
self.cls_nams = [wnds[i] for i in range(1000)]
self.cls_num = len(self.cls_nams)
self.cls_idx_to_cls_nam = dict(zip(range(self.cls_num), self.cls_nams))
self.cls_nam_to_cls_idx = dict(zip(self.cls_nams, range(self.cls_num)))
############for show###########
self.title = ''
self.figsize = (12, 12)
def load_imagenet_meta(self, met_dir):
metadata = sio.loadmat(met_dir, struct_as_record=False)
synsets = np.squeeze(metadata['synsets'])
wnids = np.squeeze(np.array([s.WNID for s in synsets]))
return wnids
def make_input(self, num_per_sha=40000, fil_nam='train'):
#################此处添加image文件路径##################
imgs_dir = "Mybase/datasets/raw/ILSVRC/Data/CLS-LOC/" + fil_nam
###############此处添加annotation文件路径###############
anns_dir = "Mybase/datasets/raw/ILSVRC/Annotations/CLS-LOC/" + fil_nam
##############此处添加tfrecords文件保存路径##############
rcds_dir = "Mybase/datasets/" + fil_nam
imgs_lst = []
lbls_lst = []
if fil_nam == "train":
for key, value in self.cls_nam_to_cls_idx.items():
#for ext in ['jpg', 'png', 'jpeg', 'JPG', 'JPEG']:
#imgs_cls = glob.glob(os.path.join(imgs_dir, key, '*.{}'.format(ext)))
imgs_cls = glob.glob(os.path.join(imgs_dir, key, '*'))
imgs_lst.extend(imgs_cls)
lbls_lst.extend([value]*len(imgs_cls))
else:
#for ext in ['jpg', 'png', 'jpeg', 'JPG', 'JPEG']:
#imgs_lst = glob.glob(os.path.join(imgs_dir, '*.{}'.format(ext)))
imgs_lst = glob.glob(os.path.join(imgs_dir, '*'))
lbls_lst = [-1] * len(imgs_lst)
img_num = len(imgs_lst)
print("The amount of images is %d!" %(img_num))
print("The amount of files is %d!" %(img_num//num_per_sha))
idxs = np.arange(0, img_num)
np.random.shuffle(idxs)
imgs_lst = [imgs_lst[idx] for idx in idxs]
lbls_lst = [lbls_lst[idx] for idx in idxs]
#/data/ziyechen/ILSVRC/Data/CLS-LOC/train/n01440764/n01440764_18.JPEG
with tf.Graph().as_default(), tf.device('/cpu:0'):
sha_num = int(img_num/num_per_sha)
if sha_num == 0:
sha_num = 1
num_per_sha = img_num
else:
num_per_sha = int(math.ceil(img_num/sha_num))
for sha_idx in range(sha_num):
'''
#out_nam = 'imagenet_%05d-of-%05d.tfrecord' % (sha_idx, sha_num)
out_nam_cls = 'imagenet_cls_%s_%d.tfrecord' % (fil_nam, sha_idx)
out_nam_det = 'imagenet_det_%s_%d.tfrecord' % (fil_nam, sha_idx)
rcd_nam_cls = os.path.join(rcds_dir, out_nam_cls)
rcd_nam_det = os.path.join(rcds_dir, out_nam_det)
'''
out_nam = 'imagenet_%s_%d.tfrecord' % (fil_nam, sha_idx)
rcd_nam = os.path.join(rcds_dir, out_nam)
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
'''
with tf.python_io.TFRecordWriter(rcd_nam_cls, options=options) as writer_cls, \
tf.python_io.TFRecordWriter(rcd_nam_det, options=options) as writer_det:
'''
with tf.python_io.TFRecordWriter(rcd_nam, options=options) as writer:
sta_idx = sha_idx * num_per_sha
end_idx = min((sha_idx + 1) * num_per_sha, img_num)
for i in range(sta_idx, end_idx):
if i % 100 == 0:
print("Converting image %d/%d shard %d" % (i + 1, img_num, sha_idx))
#读取图像
img_nam = imgs_lst[i]
img = cv2.imread(img_nam)
if type(img) != np.ndarray:
print("Failed to find image %s" %(imgs_lst[i]))
continue
img_hgt, img_wdh = img.shape[0], img.shape[1]
if img.size == img_hgt * img_wdh:
print ('Gray Image %s' %(imgs_lst[i]))
img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_tmp[:, :, :] = img[:, :, np.newaxis]
img = img_tmp
img = img.astype(np.uint8)
assert img.size == img_wdh * img_hgt * 3, '%s' % str(i)
img = img[:, :, ::-1]
#读取标签
lbl = lbls_lst[i]
ann = img_nam.replace('Data', 'Annotations')
ann = ann.split('.')
ann[-1] = 'xml'
ann = '.'.join(ann)
if not os.path.exists(ann):
gbxs = np.array([[0.0, 0.0, img_hgt-1.0, img_wdh-1.0, lbl]], dtype=np.float32)
det = False
else:
tree = ET.parse(ann)
objs = tree.findall('object') #list
#img_siz = tree.find('size')
#img_hgt = float(img_siz.find('height').text)
#img_wdh = float(img_siz.find('width' ).text)
boxs = []
clss = []
for idx, obj in enumerate(objs):
box = obj.find('bndbox')
box_ymn = float(box.find('ymin').text)
box_xmn = float(box.find('xmin').text)
box_ymx = float(box.find('ymax').text)
box_xmx = float(box.find('xmax').text)
cls = self.cls_nam_to_cls_idx[obj.find('name').text.lower().strip()]
if lbl != -1:
assert lbl == cls, "label is wrong!"
else:
lbl = cls
dif = obj.find('difficult')
dif = 0 if dif == None else int(dif.text)
if dif: cls *= -1
boxs.append([box_ymn, box_xmn, box_ymx, box_xmx])
clss.append([cls])
boxs = np.asarray(boxs, dtype=np.float32)
clss = np.asarray(clss, dtype=np.float32)
boxs = bbox_clip_py(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
gbxs = np.concatenate([boxs, clss], axis=-1)
if len(gbxs) == 0:
print("No gt_boxes in this image!")
continue
det = True
#写tfrecords
img_raw = img.tostring()
gbxs_raw = gbxs.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image/image': _bytes_feature(img_raw),
'image/height': _int64_feature(img_hgt),
'image/width': _int64_feature(img_wdh),
'label/label': _int64_feature(lbl),
'label/num_instances': _int64_feature(gbxs.shape[0]), # N
'label/gt_boxes': _bytes_feature(gbxs_raw), # of shape (N, 5), (ymin, xmin, ymax, xmax, classid)
}))
writer.write(example.SerializeToString())
'''
if det:
writer_det.write(example.SerializeToString())
else:
writer_cls.write(example.SerializeToString())
'''
def resize_image_with_pad(self, img=None):
#####################按最短边进行比例缩放######################
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
if self.use_pad:
leh_min = tf.minimum(img_hgt, img_wdh)
leh_max = tf.maximum(img_hgt, img_wdh)
leh_rat = tf.minimum(self.img_siz_min/leh_min, self.img_siz_max/leh_max)
img_hgt = tf.cast(img_hgt*leh_rat, dtype=tf.int32)
img_wdh = tf.cast(img_wdh*leh_rat, dtype=tf.int32)
#对image操作后对boxs操作
img = tf.image.resize_images(img, [img_hgt, img_wdh], \
method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
################如果最长边过长则按中心对称进行裁剪################
#对image操作后对boxs操作
img = tf.image.resize_image_with_crop_or_pad(img, self.img_siz_max, self.img_siz_max)
else:
hgt_rat = self.img_siz_max / img_hgt
wdh_rat = self.img_siz_max / img_wdh
leh_rat = tf.stack([hgt_rat, wdh_rat], axis=0)
leh_rat = tf.tile(leh_rat, [2])
#对image操作后对boxs操作
img = tf.image.resize_images(img, [self.img_siz_max, self.img_siz_max], \
method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
return img
'''
def distort_crop(self, img=None, gbxs=None):
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
#boxs = gbxs[:, :-1]
#boxs = bbox_clip(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
boxs = tf.stack([0.0, 0.0, img_hgt-1.0, img_wdh-1.0], axis=0)
if self.use_exp:
exp_rat = tf.random.uniform(shape=[], minval=1.1, maxval=self.exp_rat, dtype=tf.float32)
#exp_rat = self.exp_rat
pad_hgt_all = tf.cast(img_hgt*(exp_rat-1.0), dtype=tf.int32)
pad_wdh_all = tf.cast(img_wdh*(exp_rat-1.0), dtype=tf.int32)
pad_hgt_fnt = tf.random.uniform(shape=[], minval=0, maxval=pad_hgt_all, dtype=tf.int32)
pad_wdh_fnt = tf.random.uniform(shape=[], minval=0, maxval=pad_wdh_all, dtype=tf.int32)
pad_hgt_bak = pad_hgt_all - pad_hgt_fnt
pad_wdh_bak = pad_wdh_all - pad_wdh_fnt
paddings = [[pad_hgt_fnt, pad_hgt_bak], [pad_wdh_fnt, pad_wdh_bak], [0, 0]]
img = tf.pad(img, paddings, "CONSTANT", constant_values=0)
pad_hgt_fnt = tf.cast(pad_hgt_fnt, dtype=tf.float32)
pad_wdh_fnt = tf.cast(pad_wdh_fnt, dtype=tf.float32)
beg = tf.stack([pad_hgt_fnt, pad_wdh_fnt], axis=0)
beg = tf.tile(beg, [2])
boxs = boxs + beg #padding中boxs不会超出边界,不用clip
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
########################crop the image randomly########################
boxs_tmp = boxs / tf.stack([[img_hgt-1.0, img_wdh-1.0, img_hgt-1.0, img_wdh-1.0]], axis=0)
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(tf.shape(img), bounding_boxes=tf.expand_dims(boxs_tmp, 0), \
min_object_covered=self.min_object_covered, \
aspect_ratio_range=self.aspect_ratio_range, \
area_range=self.area_range, max_attempts=self.max_attempts, \
use_image_if_no_bounding_boxes=True)
img = tf.slice(img, box_beg, box_siz)
###########resize image to the expected size with paddings############
img = self.resize_image_with_pad(img)
return img
'''
def distort_crop(self, img=None, img_siz_min=[256, 480]):
img_siz_min = tf.random.uniform(shape=[], minval=img_siz_min[0], maxval=img_siz_min[1], dtype=tf.float32)
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
leh_min = tf.minimum(img_hgt, img_wdh)
leh_rat = img_siz_min / leh_min
img_hgt = tf.cast(img_hgt*leh_rat, dtype=tf.int32)
img_wdh = tf.cast(img_wdh*leh_rat, dtype=tf.int32)
img = tf.image.resize_images(img, [img_hgt, img_wdh], \
method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
img = tf.image.random_crop(img, [self.img_siz_max, self.img_siz_max, 3])
return img
def preprocessing(self, img=None, gbxs=None):
if self.mod_tra:
#img= self.distort_crop(img, gbxs)
#img= self.resize_image_with_pad(img)
img = self.distort_crop(img, [256, 480])
else:
img = self.distort_crop(img, [256, 257])
#img= self.resize_image_with_pad(img)
return img
def preprocessing1(self, imgs=None):
imgs = tf.cast(imgs, dtype=tf.float32)
if self.mod_tra:
imgs = tf.image.random_flip_left_right(imgs)
imgs = apply_with_random_selector(imgs, lambda x, order: distort_color(x, order), num_cases=4)
imgs = imgs - self.img_avg
imgs = imgs / 255.0
else:
imgs = imgs - self.img_avg
imgs = imgs / 255.0
return imgs
def get_input(self):
def parse_function(serialized_example):
'''
定长特征解析:tf.FixedLenFeature(shape, dtype, default_value)
shape:可当 reshape 来用,如 vector 的 shape 从 (3,) 改动成了 (1,3)。
注:如果写入的 feature 使用了. tostring() 其 shape 就是 ()
dtype:必须是 tf.float32, tf.int64, tf.string 中的一种。
default_value:feature 值缺失时所指定的值。
不定长特征解析:tf.VarLenFeature(dtype)
注:可以不明确指定 shape,但得到的 tensor 是 SparseTensor。
变长的tensor转化为string后,shape是固定的,shape=[],所以可以用tf.FixedLenFeature进行解析
'''
parsed_example = tf.parse_single_example(
serialized_example,
features = {
'image/image': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'image/height': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'image/width': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/label': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/num_instances': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/gt_boxes': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
#'matrix': tf.VarLenFeature(dtype=dtype('float32')),
#'matrix_shape':tf.FixedLenFeature(shape=(2,), dtype=tf.int64),
}
)
img_hgt = tf.cast(parsed_example['image/height'], tf.int32)
img_wdh = tf.cast(parsed_example['image/width'], tf.int32)
lbl = tf.cast(parsed_example['label/label'], tf.int32)
gbx_num = tf.cast(parsed_example['label/num_instances'],tf.int32)
img = tf.decode_raw(parsed_example['image/image'], tf.uint8 )
gbxs = tf.decode_raw(parsed_example['label/gt_boxes'], tf.float32)
img = tf.reshape(img, [img_hgt, img_wdh, 3])
gbxs = tf.reshape(gbxs, [gbx_num, 5])
img = self.preprocessing(img, gbxs)
parsed_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'label/label': lbl,
}
#parsed_example['matrix'] = tf.sparse_tensor_to_dense(parsed_example['matrix'])
#parsed_example['matrix'] = tf.reshape(parsed_example['matrix'], parsed_example['matrix_shape'])
return parsed_example
fil_pat = os.path.join(self.dat_dir, 'imagenet', '*.tfrecord')
dataset = tf.data.Dataset.list_files(file_pattern=fil_pat, shuffle=True, seed=None)
dataset = dataset.prefetch(buffer_size=self.num_readers)
dataset = dataset.shuffle(buffer_size=self.fil_num+self.num_readers, seed=None, reshuffle_each_iteration=True)
dataset = dataset.apply(tf.data.experimental.\
parallel_interleave(lambda x: tf.data.TFRecordDataset(x, compression_type='ZLIB'), \
cycle_length=self.num_readers, \
block_length=10, sloppy=True, \
buffer_output_elements=10, \
prefetch_input_elements=10))
dataset = dataset.prefetch(buffer_size=self.bat_siz_all)
dataset = dataset.map(parse_function, num_parallel_calls=self.num_threads)
dataset = dataset.apply(tf.data.experimental.\
shuffle_and_repeat(buffer_size=self.capacity, count=self.epc_num, seed=None))
dataset = dataset.batch(batch_size=self.bat_siz_all, drop_remainder=True)
#dataset = dataset.apply(tf.data.experimental.\
# map_and_batch(parse_function, batch_size=self.bat_siz, num_parallel_batches=None, \
# drop_remainder=False, num_parallel_calls=self.num_threads))
dataset = dataset.prefetch(buffer_size=1)
iterator = dataset.make_one_shot_iterator()
example = iterator.get_next()
imgs_lst = tf.split(example['image/image'], self.gpu_num, axis=0)
lbls_lst = tf.split(example['label/label'], self.gpu_num, axis=0)
return imgs_lst, lbls_lst
"""
def get_input(self):
#创建文件列表,并通过文件列表创建输入文件队列。
#在调用输入数据处理流程前,需要统一所有原始数据的格式并将它们存储到TFRecord文件中
#文件列表应该包含所有提供训练数据的TFRecord文件
filename = os.path.join(self.dat_dir, 'imagenet', '*.tfrecord')
files = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(files, shuffle=True, capacity=1000)
#解析TFRecord文件里的数据
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
reader = tf.TFRecordReader(options=options)
_, serialized_example = reader.read(filename_queue)
parsed_example = tf.parse_single_example(
serialized_example,
features = {
'image/image': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'image/height': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'image/width': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/label': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/num_instances': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/gt_boxes': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
}
)
img_hgt = tf.cast(parsed_example['image/height'], tf.int32)
img_wdh = tf.cast(parsed_example['image/width'], tf.int32)
lbl = tf.cast(parsed_example['label/label'], tf.int32)
gbx_num = tf.cast(parsed_example['label/num_instances'],tf.int32)
img = tf.decode_raw(parsed_example['image/image'], tf.uint8 )
gbxs = tf.decode_raw(parsed_example['label/gt_boxes'], tf.float32)
img = tf.reshape(img, [img_hgt, img_wdh, 3])
gbxs = tf.reshape(gbxs, [gbx_num, 5])
img = self.preprocessing(img, gbxs)
img = tf.reshape(img, [self.img_siz_max, self.img_siz_max, 3])
#tf.train.shuffle_batch_join
imgs, lbls = tf.train.shuffle_batch(tensors=[img, lbl], batch_size=self.bat_siz_all, num_threads=self.num_threads, \
capacity=self.capacity, min_after_dequeue=self.min_after_dequeue)
imgs_lst = tf.split(imgs, self.gpu_num, axis=0)
lbls_lst = tf.split(lbls, self.gpu_num, axis=0)
return imgs_lst, lbls_lst
"""
def display_instances(self, img=None, lbl=None, img_hgt=None, img_wdh=None):
_, ax = plt.subplots(1, figsize=self.figsize)
#img = cv2.resize(img, (width, height), interpolation=cv2.INTER_LINEAR)
#Show area outside image boundaries.
#ax.set_ylim(img_hgt + 10, -10)
#ax.set_xlim(-10, img_wdh + 10)
ax.axis('off')
ax.set_title(str(lbl))
img = img * 255.0
img = img + self.img_avg
img = np.clip(img, 0.0, 225.0)
img = img.astype(dtype=np.uint8, copy=False)
ax.imshow(img)
plt.show()
plt.close()
def get_input_test(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs_lst, lbls_lst = self.get_input()
imgs = tf.concat(imgs_lst, axis=0)
lbls = tf.concat(lbls_lst, axis=0)
with tf.Session() as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
imgs_kep, lbls_kep = sess.run([imgs, lbls])
for i in range(self.bat_siz*self.gpu_num):
img_tmp = imgs_kep[i]
lbl_tmp = lbls_kep[i]
self.display_instances(img_tmp, lbl_tmp)
from Mybase.datasets.raw.coco.PythonAPI.pycocotools.coco import *
class GeneratorForCOCO(object):
def __init__(self, mod_tra=True, dat_dir=None, bat_siz=3, epc_num=20, min_after_dequeue=20, \
gpu_lst='0', fil_num=32, tst_shw=True, tst_sav=False):
self.mod_tra = mod_tra
self.use_pad = True
self.use_exp = False
self.exp_rat = 2.0
self.img_avg = np.array([123.7, 116.8, 103.9], dtype=np.float32)
self.img_siz_min = 800 #800 #700 #400
self.img_siz_max = 1025 #1025 #897 #513
self.box_siz_min = 5
self.box_isc_min = 0.5
self.box_msk_siz = [126, 126]
self.msk_min = 0.5
############for crop###########
self.min_object_covered = 0.5
self.aspect_ratio_range = (0.5, 2.0)
self.area_range = (0.1, 1.0)
self.max_attempts = 200
self.dat_dir = dat_dir
self.bat_siz = bat_siz
self.epc_num = epc_num
self.min_after_dequeue = min_after_dequeue
self.gpu_lst = gpu_lst
self.gpu_num = len(self.gpu_lst.split(','))
self.mdl_dev = '/cpu:%d' if self.gpu_num == 0 else '/gpu:%d'
self.gpu_num = 1 if self.gpu_num == 0 else self.gpu_num
self.fil_num = fil_num
self.num_readers = 2
self.num_threads = 20
self.bat_siz_all = self.bat_siz * self.gpu_num
self.capacity = self.min_after_dequeue + 3 * self.bat_siz_all
self.max_num = 100
self.tst_shw = tst_shw
self.tst_sav = tst_sav
self.cls_nams = ['background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self.cls_num = len(self.cls_nams)
self.cls_idx_to_rel_idx = \
{1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16,
18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24, 27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30,
35: 31, 36: 32, 37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40, 46: 41, 47: 42, 48: 43, 49: 44,
50: 45, 51: 46, 52: 47, 53: 48, 54: 49, 55: 50, 56: 51, 57: 52, 58: 53, 59: 54, 60: 55, 61: 56, 62: 57, 63: 58,
64: 59, 65: 60, 67: 61, 70: 62, 72: 63, 73: 64, 74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, 81: 72,
82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80}
self.rel_idx_to_cls_idx = \
{1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 13, 13: 14, 14: 15, 15: 16, 16: 17,
17: 18, 18: 19, 19: 20, 20: 21, 21: 22, 22: 23, 23: 24, 24: 25, 25: 27, 26: 28, 27: 31, 28: 32, 29: 33, 30: 34,
31: 35, 32: 36, 33: 37, 34: 38, 35: 39, 36: 40, 37: 41, 38: 42, 39: 43, 40: 44, 41: 46, 42: 47, 43: 48, 44: 49,
45: 50, 46: 51, 47: 52, 48: 53, 49: 54, 50: 55, 51: 56, 52: 57, 53: 58, 54: 59, 55: 60, 56: 61, 57: 62, 58: 63,
59: 64, 60: 65, 61: 67, 62: 70, 63: 72, 64: 73, 65: 74, 66: 75, 67: 76, 68: 77, 69: 78, 70: 79, 71: 80, 72: 81,
73: 82, 74: 84, 75: 85, 76: 86, 77: 87, 78: 88, 79: 89, 80: 90}
self.cls_idx_to_cls_nam = dict(zip(range(self.cls_num), self.cls_nams))
self.cls_nam_to_cls_idx = dict(zip(self.cls_nams, range(self.cls_num)))
########for test######
if self.tst_dir != None:
self.imgs_lst_tst = []
for ext in ['jpg', 'png', 'jpeg', 'JPG']:
self.imgs_lst_tst.extend(glob.glob(os.path.join(self.tst_dir, '*.{}'.format(ext))))
self.anns_lst_tst = []
self.gbxs_lst_tst = [] #暂不支持use_gbx==True
self.img_num_tst = len(self.imgs_lst_tst)
self.get_idx = 0
########for show######
self.title = ''
self.figsize = (15, 15)
def get_coco_masks(self, coco, img_idx, img_hgt, img_wdh, img_nam):
ann_idxs = coco.getAnnIds(imgIds=[img_idx], iscrowd=None)
if len(ann_idxs) == 0:
print ('There is no annotations for %s' % img_nam)
return None, None, None
anns = coco.loadAnns(ann_idxs)
boxs = []
clss = []
inss = []
sems = np.zeros((img_hgt, img_wdh), dtype=np.uint8)
for ann in anns:
cls = self.cls_idx_to_rel_idx[ann['category_id']]
ins = coco.annToMask(ann) # zero one mask, 此处ann为一个字典
ins = ins.astype(dtype=np.uint8, copy=False)
if ins.max() < 1:
continue
if ann['iscrowd']:
cls *= -1
if ins.shape[0]!=img_hgt or ins.shape[1]!=img_wdh:
ins = np.ones([img_hgt, img_wdh], dtype=np.uint8)
assert ins.shape[0]==img_hgt and ins.shape[1]==img_wdh, 'image %s and ann %s do not match' % (img_idx, ann)
boxs.append(ann['bbox'])
clss.append(cls)
inss.append(ins)
sem = ins * cls
sems[sem>0] = sem[sem>0]
boxs = np.asarray(boxs, dtype=np.float32)
clss = np.asarray(clss, dtype=np.float32)
inss = np.asarray(inss, dtype=np.uint8 )
if boxs.shape[0] <= 0:
print('There is no annotations for %s' % img_nam)
return None, None
boxs[:, 2] = boxs[:, 0] + boxs[:, 2]
boxs[:, 3] = boxs[:, 1] + boxs[:, 3]
boxs = np.stack([boxs[:, 1], boxs[:, 0], boxs[:, 3], boxs[:, 2]], axis=-1)#ymin, xmin, ymax, xmax
gbxs = np.concatenate([boxs, clss[:, np.newaxis]], axis=-1)
if inss.shape[0] != gbxs.shape[0]:
print('Shape Error for %s' % img_nam)
return None, None
return gbxs, inss, sems
def make_input(self, num_per_sha=500000):
##############此处添加image文件路径##############
imgs_dir = "Mybase/datasets/raw/coco"
##############此处添加annotation文件路径##############
anns_dir = "Mybase/datasets/raw/coco/annotations"
##############此处添加tfrecords文件保存路径##############
rcds_dir = "Mybase/datasets"
spl_nams = ['minival2014']
#spl_nams = ['train2014', 'valminusminival2014', 'train2017', 'val2017']
with tf.Graph().as_default(), tf.device('/cpu:0'):
imgs = []
cocs = []
for coc_idx, spl_nam in enumerate(spl_nams):
ann_fil = os.path.join(anns_dir, 'instances_%s.json' % (spl_nam))
cocs.append(COCO(ann_fil))
imgs.extend([(coc_idx, img_idx, spl_nam, cocs[coc_idx].imgs[img_idx]) for img_idx in cocs[coc_idx].imgs])
img_num = int(len(imgs))
sha_num = int(img_num/num_per_sha)
print('The dataset has %d images' %(img_num))
np.random.shuffle(imgs)
if sha_num == 0:
sha_num = 1
num_per_sha = img_num
else:
num_per_sha = int(math.ceil(img_num/sha_num))
for sha_idx in range(sha_num):
out_nam = 'coco_%05d-of-%05d.tfrecord' % (sha_idx, sha_num)
rcd_nam = os.path.join(rcds_dir, out_nam)
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
with tf.python_io.TFRecordWriter(rcd_nam, options=options) as writer:
sta_idx = sha_idx * num_per_sha
end_idx = min((sha_idx+1)*num_per_sha, img_num)
for i in range(sta_idx, end_idx):
if i % 50 == 0:
print("Converting image %d shard %d" % (i+1, sha_idx))
coc_idx = imgs[i][0]
img_idx = imgs[i][1]
spl_nam = imgs[i][2]
img_nam = imgs[i][3]['file_name']
#split = img_name.split('_')[1]
if spl_nam == 'valminusminival2014' or spl_nam == 'minival2014':
spl_nam = 'val2014'
img_nam = os.path.join(imgs_dir, spl_nam, img_nam)
img_hgt, img_wdh = imgs[i][3]['height'], imgs[i][3]['width']
gbxs, gmk_inss, gmk_sems = self.get_coco_masks(cocs[coc_idx], img_idx, img_hgt, img_wdh, img_nam)
if not isinstance(gbxs, np.ndarray):
continue
img = cv2.imread(img_nam)
if not isinstance(img, np.ndarray):
print("Failed to find image %s" %(img_nam))
continue
img_hgt, img_wdh = img.shape[0], img.shape[1]
if img.size == img_hgt * img_wdh:
print ('Gray Image %s' %(img_nam))
img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_tmp[:, :, :] = img[:, :, np.newaxis]
img = img_tmp
img = img.astype(np.uint8)
assert img.size == img_wdh * img_hgt * 3, '%s' % str(i)
img = img[:, :, ::-1]
img_raw = img.tostring()
gbxs_raw = gbxs.tostring()
gmk_inss_raw = gmk_inss.tostring()
gmk_sems_raw = gmk_sems.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image/img_id': _int64_feature(img_idx),
'image/image': _bytes_feature(img_raw),
'image/height': _int64_feature(img_hgt),
'image/width': _int64_feature(img_wdh),
'label/num_instances': _int64_feature(gbxs.shape[0]), # N
'label/gt_boxes': _bytes_feature(gbxs_raw), # (N, ymin, xmin, ymax, xmax, classid)
'label/gt_mask_inss': _bytes_feature(gmk_inss_raw), # (N, H, W)
'label/gt_mask_sems': _bytes_feature(gmk_sems_raw) # (H, W)
}))
writer.write(example.SerializeToString())
def resize_image_with_pad(self, img=None, gbxs=None, gmk_inss=None, gmk_sems=None):
#没有crop,不需要对gmk_inss操作
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
boxs = gbxs[:, :-1]
clss = gbxs[:, -1]
boxs = bbox_clip(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
gmk_sems = tf.expand_dims(gmk_sems, axis=-1)
if self.use_pad:
img = tf.image.resize_image_with_pad(img, self.img_siz, self.img_siz, \
method=tf.image.ResizeMethod.BILINEAR)
gmk_sems = tf.image.resize_image_with_pad(gmk_sems, self.img_siz, self.img_siz, \
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
#对image操作后对boxs操作
leh_min = tf.minimum(img_hgt, img_wdh)
leh_max = tf.maximum(img_hgt, img_wdh)
leh_rat = tf.minimum(self.img_siz/leh_min, self.img_siz/leh_max)
boxs = boxs * leh_rat
#将实际坐标加上偏移值
img_hgt = img_hgt * leh_rat
img_wdh = img_wdh * leh_rat
pad_hgt = self.img_siz - img_hgt
pad_wdh = self.img_siz - img_wdh
pad_top = tf.round(pad_hgt/2.0)
pad_lft = tf.round(pad_wdh/2.0)
beg = tf.stack([pad_top, pad_lft], axis=0)
beg = tf.tile(beg, [2])
boxs = boxs + beg #该boxs在原真实图片内,不需要clip
img_wdw = tf.stack([pad_top, pad_lft, pad_top+img_hgt-1, pad_lft+img_wdh-1], axis=0)
else:
img = tf.image.resize_images(img, [self.img_siz, self.img_siz], \
method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
gmk_sems = tf.image.resize_images(gmk_sems, [self.img_siz, self.img_siz], \
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True)
#对image操作后对boxs操作
hgt_rat = self.img_siz / img_hgt
wdh_rat = self.img_siz / img_wdh
leh_rat = tf.stack([hgt_rat, wdh_rat], axis=0)
leh_rat = tf.tile(leh_rat, [2])
boxs = boxs * leh_rat
img_wdw = tf.constant([0, 0, self.img_siz-1, self.img_siz-1], dtype=tf.int32)
#合成gt_boxes
clss = tf.expand_dims(clss, axis=-1)
gbxs = tf.concat([boxs, clss], axis=-1)
gbx_tmp = tf.zeros(shape=[1, 5], dtype=tf.float32) #防止没有一个gt_box
gbxs = tf.concat([gbxs, gbx_tmp], axis=0)
ins_tmp = tf.zeros(shape=[1]+self.box_msk_siz, dtype=tf.float32)
gmk_inss = tf.concat([gmk_inss, ins_tmp], axis=0)
gmk_sems = tf.squeeze(gmk_sems, axis=[-1])
return img, gbxs, gmk_inss, gmk_sems, img_wdw
def distort_crop(self, img=None, gbxs=None, gmk_inss=None, gmk_sems=None):
#gmk_inss --> (M, h, w)
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
boxs = gbxs[:, :-1]
clss = gbxs[:, -1]
boxs = bbox_clip(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
if self.use_exp:
exp_rat = tf.random.uniform(shape=[], minval=1.1, maxval=self.exp_rat, dtype=tf.float32)
#exp_rat = self.exp_rat
pad_hgt = tf.cast(img_hgt*(exp_rat-1.0), dtype=tf.int32)
pad_wdh = tf.cast(img_wdh*(exp_rat-1.0), dtype=tf.int32)
pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_hgt_all, dtype=tf.int32)
pad_lft = tf.random.uniform(shape=[], minval=0, maxval=pad_wdh_all, dtype=tf.int32)
pad_btm = pad_hgt - pad_top
pad_rgt = pad_wdh - pad_lft
paddings = [[pad_top, pad_btm], [pad_lft, pad_rgt], [0, 0]]
img = tf.pad(img, paddings, "CONSTANT", constant_values=0)
paddings = [[pad_top, pad_btm], [pad_lft, pad_rgt]]
gmk_sems = tf.pad(gmk_sems, paddings, "CONSTANT", constant_values=0)
#padding中boxs不会超出边界,不用对boxs和gmk_inss操作
pad_top = tf.cast(pad_top, dtype=tf.float32)
pad_lft = tf.cast(pad_lft, dtype=tf.float32)
beg = tf.stack([pad_top, pad_lft], axis=0)
beg = tf.tile(beg, [2])
boxs = boxs + beg
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
########################crop the image randomly########################
ncw_idxs = tf.where(clss>0)
boxs_tmp = tf.gather_nd(boxs, ncw_idxs)
boxs_tmp = boxs_tmp / tf.stack([img_hgt-1.0, img_wdh-1.0, img_hgt-1.0, img_wdh-1.0], axis=0)
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(tf.shape(img), bounding_boxes=tf.expand_dims(boxs_tmp, 0), \
min_object_covered=self.min_object_covered, \
aspect_ratio_range=self.aspect_ratio_range, \
area_range=self.area_range, max_attempts=self.max_attempts, \
use_image_if_no_bounding_boxes=True)
#对image操作后对boxs操作
img = tf.slice(img, box_beg, box_siz)
gmk_sems = tf.slice(gmk_sems, box_beg[:-1], box_siz[:-1])
img_hgt = tf.cast(box_siz[0], dtype=tf.float32)
img_wdh = tf.cast(box_siz[1], dtype=tf.float32)
#将实际坐标加上偏移值
beg = tf.cast(box_beg[0:2], dtype=tf.float32)
beg = tf.tile(beg, [2])
boxs_tmp = boxs - beg
#防止box超出边界
boxs = bbox_clip(boxs_tmp, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
#剔除过小的boxs和msks
box_iscs = bbox_intersects1(boxs_tmp, boxs)
idxs = tf.where(box_iscs<self.box_isc_min)
clss = tensor_update(clss, idxs, -1)
#idxs = tf.where(box_iscs>=self.box_isc_min)
#boxs = tf.gather_nd(boxs, idxs)
#clss = tf.gather_nd(clss, idxs)
#gmk_inss= tf.gather_nd(gmk_inss, idxs)
clss = tf.expand_dims(clss, axis=-1)
gbxs = tf.concat([boxs, clss], axis=-1)
#计算crop后的gmk_inss
begs = tf.stack([boxs_tmp[:, 0], boxs_tmp[:, 1], boxs_tmp[:, 0], boxs_tmp[:, 1]], axis=-1)
lehs = tf.stack([boxs_tmp[:, 2]-boxs_tmp[:, 0], boxs_tmp[:, 3]-boxs_tmp[:, 1],
boxs_tmp[:, 2]-boxs_tmp[:, 0], boxs_tmp[:, 3]-boxs_tmp[:, 1]], axis=-1)
boxs_tmp = (boxs - begs) / lehs
idxs = tf.range(tf.shape(boxs_tmp)[0])
gmk_inss = tf.expand_dims(gmk_inss, axis=-1) #(M, H, W, 1)
gmk_inss = tf.image.crop_and_resize(gmk_inss, boxs_tmp, idxs, self.box_msk_siz, method='bilinear') #(M, 256, 256)
gmk_inss = tf.squeeze(gmk_inss, axis=[-1]) #(M, H, W)
###########resize image to the expected size with paddings############
img, gbxs, gmk_inss, gmk_sems, img_wdw = self.resize_image_with_pad(img, gbxs, gmk_inss, gmk_sems)
return img, gbxs, gmk_inss, gmk_sems, img_wdw
def preprocessing(self, img=None, gbxs=None, gmk_inss=None):
img_shp = tf.shape(img) #[H, W, C]
box_leh = tf.cast(tf.tile(img_shp[:2], [2]), dtype=tf.float32) #[H, W, H, W]
boxs_tmp = gbxs[:,:-1] / box_leh #(M, 4)
idxs = tf.range(tf.shape(boxs_tmp)[0])
gmk_inss = tf.expand_dims(gmk_inss, axis=-1) #(M, H, W, 1)
gmk_inss = tf.image.crop_and_resize(gmk_inss, boxs_tmp, idxs, self.box_msk_siz, method='bilinear') #(M, 256, 256)
gmk_inss = tf.squeeze(gmk_inss, axis=[-1]) #(M, H, W)
return gmk_inss
def preprocessing0(self, elms=None):
img, gbxs, gmk_inss, gmk_sems, gbx_num, img_hgt, img_wdh = elms
img = img[:img_hgt, :img_wdh, :]
gbxs = gbxs[:gbx_num, :]
gmk_inss = gmk_inss[:gbx_num, :, :]
gmk_sems = gmk_sems[:img_hgt, :img_wdh]
if self.mod_tra:
img, gbxs, gmk_inss, gmk_sems, img_wdw = self.distort_crop(img, gbxs, gmk_inss, gmk_sems)
else:
img, gbxs, gmk_inss, gmk_sems, img_wdw = self.resize_image_with_pad(img, gbxs, gmk_inss, gmk_sems)
gbx_num = tf.shape(gbxs)[0]
paddings = [[0, self.max_num-gbx_num], [0, 0]]
gbxs = tf.pad(gbxs, paddings, "CONSTANT", constant_values=0)
paddings = [[0, self.max_num-gbx_num], [0, 0], [0, 0]]
gmk_inss = tf.pad(gmk_inss, paddings, "CONSTANT", constant_values=0)
return img, gbxs, gmk_inss, gmk_sems, gbx_num, img_wdw
def preprocessing1(self, imgs=None, gbxs=None, gmk_inss=None, gmk_sems=None, gbx_nums=None, img_hgts=None, img_wdhs=None):
imgs = tf.cast(imgs, dtype=tf.float32)
gmk_sems = tf.cast(gmk_sems, dtype=tf.int32 )
elms = [imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_hgts, img_wdhs]
if self.mod_tra:
imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws = \
tf.map_fn(self.preprocessing0, elms, \
dtype=(tf.float32, tf.float32, tf.float32, tf.int32, tf.int32, tf.float32),
parallel_iterations=self.bat_siz, back_prop=False, swap_memory=False, infer_shape=True)
######################随机翻转##########################
sig = tf.random.uniform(shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)
#imgs = tf.image.random_flip_left_right(imgs) #(N, H, W, C)
imgs_flr = tf.image.flip_left_right(imgs) #(N, H, W, C)
gmk_sems_flr = tf.expand_dims(gmk_sems, axis=-1) #(N, H, W, 1)
gmk_sems_flr = tf.image.flip_left_right(gmk_sems_flr) #(N, H, W, 1)
gmk_sems_flr = tf.squeeze(gmk_sems_flr, axis=[-1]) #(N, H, W)
gmk_inss_flr = tf.transpose(gmk_inss, [0,2,3,1]) #(N, H, W, M)
gmk_inss_flr = tf.image.flip_left_right(gmk_inss_flr) #(N, H, W, M)
gmk_inss_flr = tf.transpose(gmk_inss_flr, [0,3,1,2]) #(N, M, H, W)
gbxs_flr = tf.stack([gbxs[:,:,0], self.img_siz-1.0-gbxs[:,:,3], \
gbxs[:,:,2], self.img_siz-1.0-gbxs[:,:,1], gbxs[:,:,4]], axis=-1)
img_wdws_flr = tf.stack([img_wdws[:,0], self.img_siz-1.0-img_wdws[:,3], \
img_wdws[:,2], self.img_siz-1.0-img_wdws[:,1]], axis=-1)
imgs = tf.cond(sig<0.5, lambda: imgs_flr, lambda: imgs )
gmk_sems = tf.cond(sig<0.5, lambda: gmk_sems_flr, lambda: gmk_sems)
gmk_inss = tf.cond(sig<0.5, lambda: gmk_inss_flr, lambda: gmk_inss)
gbxs = tf.cond(sig<0.5, lambda: gbxs_flr, lambda: gbxs )
img_wdws = tf.cond(sig<0.5, lambda: img_wdws_flr, lambda: img_wdws)
#####################光学畸变###########################
imgs = apply_with_random_selector(imgs, lambda x, order: distort_color(x, order), num_cases=4)
imgs = imgs - self.img_avg
imgs = imgs / 255.0
else:
imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws = \
tf.map_fn(self.preprocessing0, elms, \
dtype=(tf.float32, tf.float32, tf.float32, tf.int32, tf.int32, tf.float32),
parallel_iterations=self.bat_siz, back_prop=False, swap_memory=False, infer_shape=True)
imgs = imgs - self.img_avg
imgs = imgs / 255.0
return imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws
def get_input(self):
def parse_function(serialized_example):
'''
定长特征解析:tf.FixedLenFeature(shape, dtype, default_value)
shape:可当 reshape 来用,如 vector 的 shape 从 (3,) 改动成了 (1,3)。
注:如果写入的 feature 使用了. tostring() 其 shape 就是 ()
dtype:必须是 tf.float32, tf.int64, tf.string 中的一种。
default_value:feature 值缺失时所指定的值。
不定长特征解析:tf.VarLenFeature(dtype)
注:可以不明确指定 shape,但得到的 tensor 是 SparseTensor。
变长的tensor转化为string后,shape是固定的,shape=[],所以可以用tf.FixedLenFeature进行解析
'''
parsed_example = tf.parse_single_example(
serialized_example,
features = {
'image/image': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'image/height': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'image/width': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/num_instances': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/gt_boxes': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'label/gt_mask_inss': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'label/gt_mask_sems': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
#'matrix': tf.VarLenFeature(dtype=dtype('float32')),
#'matrix_shape': tf.FixedLenFeature(shape=(2,), dtype=tf.int64),
}
)
img_hgt = tf.cast(parsed_example['image/height'], tf.int32)
img_wdh = tf.cast(parsed_example['image/width'], tf.int32)
gbx_num = tf.cast(parsed_example['label/num_instances'], tf.int32)
img = tf.decode_raw(parsed_example['image/image'], tf.uint8 )
gbxs = tf.decode_raw(parsed_example['label/gt_boxes'], tf.float32)
gmk_inss = tf.decode_raw(parsed_example['label/gt_mask_inss'], tf.uint8 )
gmk_sems = tf.decode_raw(parsed_example['label/gt_mask_sems'], tf.uint8 )
img = tf.reshape(img, [img_hgt, img_wdh, 3])
gbxs = tf.reshape(gbxs, [gbx_num, 5])
gmk_inss = tf.reshape(gmk_inss, [gbx_num, img_hgt, img_wdh])
gmk_sems = tf.reshape(gmk_sems, [img_hgt, img_wdh])
gmk_inss = self.preprocessing(img, gbxs, gmk_inss)
parsed_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'label/num_instances': gbx_num,
'label/gt_boxes': gbxs,
'label/gt_mask_inss': gmk_inss,
'label/gt_mask_sems': gmk_sems
}
#parsed_example['matrix'] = tf.sparse_tensor_to_dense(parsed_example['matrix'])
#parsed_example['matrix'] = tf.reshape(parsed_example['matrix'], parsed_example['matrix_shape'])
return parsed_example
fil_pat = os.path.join(self.dat_dir, 'coco', '*.tfrecord')
dataset = tf.data.Dataset.list_files(file_pattern=fil_pat, shuffle=True, seed=None)
dataset = dataset.prefetch(buffer_size=self.num_readers)
dataset = dataset.shuffle(buffer_size=self.fil_num+self.num_readers, seed=None, reshuffle_each_iteration=True)
dataset = dataset.apply(tf.data.experimental.\
parallel_interleave(lambda x: tf.data.TFRecordDataset(x, compression_type='ZLIB'), \
cycle_length=self.num_readers, \
block_length=10, sloppy=True, \
buffer_output_elements=10, \
prefetch_input_elements=10))
dataset = dataset.prefetch(buffer_size=self.bat_siz_all)
dataset = dataset.map(parse_function, num_parallel_calls=self.num_threads)
dataset = dataset.apply(tf.data.experimental.\
shuffle_and_repeat(buffer_size=self.capacity, count=self.epc_num, seed=None))
dataset = dataset.padded_batch(batch_size=self.bat_siz_all, \
padded_shapes={'image/image': [-1, -1, 3],
'image/height': [],
'image/width': [],
'label/num_instances': [],
'label/gt_boxes': [-1, 5],
'label/gt_mask_inss': [-1]+self.box_msk_siz,
'label/gt_mask_sems': [-1, -1]},
padding_values=None, drop_remainder=True)
#dataset = dataset.batch(batch_size=self.bat_siz_all, drop_remainder=True)
#dataset = dataset.apply(tf.data.experimental.\
# map_and_batch(parse_function, batch_size=self.bat_siz_all, num_parallel_batches=None, \
# drop_remainder=True, num_parallel_calls=self.num_threads))
dataset = dataset.prefetch(buffer_size=1)
iterator = dataset.make_one_shot_iterator()
example = iterator.get_next()
imgs_lst = tf.split(example['image/image'], self.gpu_num, axis=0)
gbxs_lst = tf.split(example['label/gt_boxes'], self.gpu_num, axis=0)
gmk_inss_lst = tf.split(example['label/gt_mask_inss'], self.gpu_num, axis=0)
gmk_sems_lst = tf.split(example['label/gt_mask_sems'], self.gpu_num, axis=0)
gbx_nums_lst = tf.split(example['label/num_instances'], self.gpu_num, axis=0)
img_hgts_lst = tf.split(example['image/height'], self.gpu_num, axis=0)
img_wdhs_lst = tf.split(example['image/width'], self.gpu_num, axis=0)
return imgs_lst, gbxs_lst, gmk_inss_lst, gmk_sems_lst, gbx_nums_lst, img_hgts_lst, img_wdhs_lst
def get_input2(self):
imgs_lst_tst = []
for ext in ['jpg', 'png', 'jpeg', 'JPG']:
imgs_lst_tst.extend(glob.glob(os.path.join(self.dat_dir, '*.{}'.format(ext))))
print('There are {:d} images to test!'.format(len(imgs_lst_tst)))
def data_generator():
for img_fil in imgs_lst_tst:
assert os.path.exists(img_fil), 'The image file does not exist: {:s}'.format(img_fil)
img = cv2.imread(img_fil)
img_nam = img_fil.split('/')[-1]
img_hgt = img.shape[0]
img_wdh = img.shape[1]
if img.size == img_hgt * img_wdh:
print ('Gray Image %s' %(imgs_lst[i]))
img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_tmp[:, :, :] = img[:, :, np.newaxis]
img = img_tmp
img = img.astype(np.uint8)
assert img.size == img_wdh * img_hgt * 3, '%s' % str(i)
img = img[:, :, ::-1]
generated_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'image/name': img_nam
}
yield generated_example
def parse_function(generated_example):
img = generated_example['image/image']
img_hgt = generated_example['image/height']
img_wdh = generated_example['image/width']
img_nam = generated_example['image/name']
parsed_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'image/name': img_nam,
'label/num_instances': 1,
'label/gt_boxes': tf.zeros(shape=[1, 5], dtype=tf.float32),
'label/gt_mask_inss': tf.zeros(shape=[1]+self.box_msk_siz, dtype=tf.float32),
'label/gt_mask_sems': tf.zeros(shape=[img_hgt, img_wdh], dtype=tf.uint8)
}
#parsed_example['matrix'] = tf.sparse_tensor_to_dense(parsed_example['matrix'])
#parsed_example['matrix'] = tf.reshape(parsed_example['matrix'], parsed_example['matrix_shape'])
return parsed_example
dataset = tf.data.Dataset.from_generator(data_generator,
output_types ={'image/image': tf.uint8, 'image/height': tf.int32,
'image/width': tf.int32, 'image/name': tf.string}, \
output_shapes={'image/image': [None, None, 3], 'image/height': [],
'image/width': [], 'image/name': []}, args=None)
dataset = dataset.repeat(count=1)
dataset = dataset.prefetch(buffer_size=self.bat_siz_all)
dataset = dataset.map(parse_function, num_parallel_calls=self.num_threads)
#dataset = dataset.batch(batch_size=self.bat_siz_all, drop_remainder=False)
#dataset = dataset.apply(tf.data.experimental.\
# map_and_batch(parse_function, batch_size=self.bat_siz_all, num_parallel_batches=None, \
# drop_remainder=False, num_parallel_calls=self.num_threads))
dataset = dataset.padded_batch(batch_size=self.bat_siz_all, \
padded_shapes={'image/image': [-1, -1, 3],
'image/height': [],
'image/width': [],
'image/name': [],
'label/num_instances': [],
'label/gt_boxes': [-1, 5],
'label/gt_mask_inss': [-1]+self.box_msk_siz,
'label/gt_mask_sems': [-1, -1]},
padding_values=None, drop_remainder=True)
#dataset = dataset.cache(filename=os.path.join(self.dat_dir, 'cache'))
dataset = dataset.prefetch(buffer_size=1)
iterator = dataset.make_one_shot_iterator()
example = iterator.get_next()
imgs_lst = tf.split(example['image/image'], self.gpu_num, axis=0)
gbxs_lst = tf.split(example['label/gt_boxes'], self.gpu_num, axis=0)
gmk_inss_lst = tf.split(example['label/gt_mask_inss'], self.gpu_num, axis=0)
gmk_sems_lst = tf.split(example['label/gt_mask_sems'], self.gpu_num, axis=0)
gbx_nums_lst = tf.split(example['label/num_instances'], self.gpu_num, axis=0)
img_hgts_lst = tf.split(example['image/height'], self.gpu_num, axis=0)
img_wdhs_lst = tf.split(example['image/width'], self.gpu_num, axis=0)
img_nams_lst = tf.split(example['image/name'], self.gpu_num, axis=0)
return imgs_lst, gbxs_lst, gmk_inss_lst, gmk_sems_lst, gbx_nums_lst, img_hgts_lst, img_wdhs_lst, img_nams_lst
def random_colors(self, N, bright=True):
'''
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
'''
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(self, image, mask, color, alpha=0.5):
'''
Apply the given mask to the image.
'''
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] * (1 - alpha) + alpha * color[c] * 255.0,
image[:, :, c])
return image
def recover_instances(self, img=None, boxs=None, msk_inss=None, msk_sems=None, img_wdw=None, img_hgt=None, img_wdh=None):
img_wdw_tmp = img_wdw.astype(dtype=np.int32, copy=False)
if isinstance(img, np.ndarray):
img = img[img_wdw_tmp[0]:img_wdw_tmp[2]+1, img_wdw_tmp[1]:img_wdw_tmp[3]+1, :] #因为window在原真实图片内
img = img * 255.0
img = img + self.img_avg
img = np.clip(img, 0.0, 255.0)
img = cv2.resize(img, (img_wdh, img_hgt), interpolation=cv2.INTER_LINEAR)
img = img.astype(dtype=np.uint8, copy=False)
if isinstance(msk_sems, np.ndarray):
msk_sems = msk_sems[img_wdw_tmp[0]:img_wdw_tmp[2]+1, img_wdw_tmp[1]:img_wdw_tmp[3]+1]
msk_sems = cv2.resize(msk_sems, (img_wdh, img_hgt), interpolation=cv2.INTER_NEAREST)
msk_sems = np.eye(self.cls_num, dtype=np.uint8)[msk_sems]
if isinstance(boxs, np.ndarray):
box_num = np.shape(boxs)[0]
boxs_tmp = boxs.astype(dtype=np.int32, copy=False)
if isinstance(msk_inss, np.ndarray):
msk_inss_lst = []
for i in range(box_num):
box_tmp = boxs_tmp[i]
msk_ins = msk_inss[i]
y1, x1, y2, x2 = box_tmp
msk_ins = cv2.resize(msk_ins, (x2-x1+1, y2-y1+1), interpolation=cv2.INTER_LINEAR)
paddings = [[y1, self.img_siz-y2-1], [x1, self.img_siz-x2-1]]
msk_ins = np.pad(msk_ins, paddings, mode='constant')
msk_inss_lst.append(msk_ins)
msk_inss = np.asarray(msk_inss_lst, dtype=np.float32)
msk_inss = np.transpose(msk_inss, [1, 2, 0])
msk_inss = msk_inss[img_wdw_tmp[0]:img_wdw_tmp[2]+1, img_wdw_tmp[1]:img_wdw_tmp[3]+1, :]
msk_inss = cv2.resize(msk_inss, (img_wdh, img_hgt), interpolation=cv2.INTER_LINEAR)
msk_inss = np.reshape(msk_inss, [img_hgt, img_wdh, box_num])
msk_inss = np.transpose(msk_inss, [2, 0, 1]) #(N, h, w)
msk_inss = msk_inss >= self.msk_min
msk_inss = msk_inss.astype(dtype=np.uint8, copy=False)
img_hgt_ = img_wdw[2] - img_wdw[0] + 1.0
img_wdh_ = img_wdw[3] - img_wdw[1] + 1.0
beg = np.array([img_wdw[0], img_wdw[1]], dtype=np.float32)
beg = np.tile(beg, [2])
rat = np.array([img_hgt/img_hgt_, img_wdh/img_wdh_], dtype=np.float32)
rat = np.tile(rat, [2])
boxs = boxs - beg
boxs = boxs * rat
boxs = bbox_clip_py(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
return img, boxs, msk_inss, msk_sems
def display_instances(self, img=None, boxs=None, box_clss=None, box_prbs=None, \
msk_inss=None, msk_sems=None, img_nam=None):
_, ax = plt.subplots(1, figsize=self.figsize)
random.seed(520)
img_hgt, img_wdh = np.shape(img)[0:2]
# Show area outside image boundaries.
#ax.set_ylim(img_hgt + 5, -5)
#ax.set_xlim(-5, img_wdh + 5)
ax.set_ylim(img_hgt, 1)
ax.set_xlim(0, img_wdh)
ax.axis('off')
ax.set_title(self.title)
if isinstance(msk_sems, np.ndarray):
colors = self.random_colors(self.cls_num)
for i in range(1, self.cls_num):
img = self.apply_mask(img, msk_sems[:,:,i], colors[i], 0.5)
if isinstance(boxs, np.ndarray):
box_num = np.shape(boxs)[0]
if not box_num:
print("No instances to display!")
return
color = self.random_colors(1)[0]
colors = self.random_colors(box_num)
#colors = self.random_colors(self.cls_num)
boxs = boxs.astype(np.int32, copy=False)
#boxs = boxs.reshape([-1, 4, 2])[:, :, ::-1]
for i in range(box_num):
y1, x1, y2, x2 = boxs[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="solid",
edgecolor=color, facecolor='none')
ax.add_patch(p)
'''
x1 = boxs[i, 0, 0]
y1 = boxs[i, 0, 1]
p = patches.Polygon(boxs[i], facecolor='none', edgecolor=color, linewidth=2, linestyle='-', fill=True)
ax.add_patch(p)
'''
box_cls = box_clss[i]
box_cls = int(box_cls)
if box_cls < 0:
box_cls = 0
if isinstance(msk_inss, np.ndarray):
img = self.apply_mask(img, msk_inss[i], colors[i], 0.5)
#img = self.apply_mask(img, msk_inss[i], colors[box_cls], 0.5)
cons = find_contours(msk_inss[i], 0.5)
for con in cons:
#Subtract the padding and flip (y, x) to (x, y)
con = np.fliplr(con) - 1
p = Polygon(con, facecolor="none", edgecolor=colors[i])
#p = Polygon(con, facecolor="none", edgecolor=colors[box_cls])
ax.add_patch(p)
box_prb = box_prbs[i] if box_prbs is not None else None
box_cls = self.cls_idx_to_cls_nam[box_cls]
caption = "{} {:.3f}".format(box_cls, box_prb) if box_prb else box_cls
xx = max(min(x1, img_wdh-100), 0)
yy = max(min(y1+8, img_hgt-20 ), 0)
ax.text(xx, yy, caption, color='k', bbox=dict(facecolor='w', alpha=0.5), size=11, backgroundcolor="none")
img = img.astype(dtype=np.uint8, copy=False)
ax.imshow(img)
plt.gca().set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
if self.tst_sav:
img_fil = os.path.join(self.dat_dir, 'result', img_nam)
plt.savefig(img_fil, format='jpg', bbox_inches='tight', pad_inches=0)
if self.tst_shw: plt.show()
plt.close()
def get_input_test(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs_lst, gbxs_lst, gmk_inss_lst, gmk_sems_lst, gbx_nums_lst, img_hgts_lst, img_wdhs_lst = self.get_input()
imgs = tf.concat(imgs_lst, axis=0)
gbxs = tf.concat(gbxs_lst, axis=0)
gmk_inss = tf.concat(gmk_inss_lst, axis=0)
gmk_sems = tf.concat(gmk_sems_lst, axis=0)
gbx_nums = tf.concat(gbx_nums_lst, axis=0)
img_hgts = tf.concat(img_hgts_lst, axis=0)
img_wdhs = tf.concat(img_wdhs_lst, axis=0)
with tf.device("/gpu:0"):
imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws = \
self.preprocessing1(imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_hgts, img_wdhs)
config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
with tf.Session(config=config) as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
imgs_kep, gbxs_kep, gmk_inss_kep, gmk_sems_kep, gbx_nums_kep, img_wdws_kep, img_hgts_kep, img_wdhs_kep = \
sess.run([imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws, img_hgts, img_wdhs])
for i in range(self.bat_siz*self.gpu_num):
img_tmp = imgs_kep[i]
gbx_num_tmp = gbx_nums_kep[i]
gbxs_tmp = gbxs_kep[i][:gbx_num_tmp]
gmk_inss_tmp = gmk_inss_kep[i][:gbx_num_tmp]
gmk_sems_tmp = gmk_sems_kep[i]
img_wdw_tmp = img_wdws_kep[i]
img_hgt_tmp = img_hgts_kep[i]
img_wdh_tmp = img_wdhs_kep[i]
boxs_tmp = gbxs_tmp[:, :-1]
box_clss_tmp = gbxs_tmp[:, -1]
img_tmp, boxs_tmp, msk_inss_tmp, msk_sems_tmp = \
self.recover_instances(img_tmp, boxs_tmp, gmk_inss_tmp, gmk_sems_tmp, \
img_wdw_tmp, img_hgt_tmp, img_wdh_tmp)
img_nam = str(i) + ".jpg"
self.display_instances(img_tmp, boxs_tmp, box_clss_tmp, None, None, msk_sems_tmp, img_nam)
def get_input_test2(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs_lst, gbxs_lst, gmk_inss_lst, gmk_sems_lst, gbx_nums_lst, \
img_hgts_lst, img_wdhs_lst, img_nams_lst = self.get_input2()
imgs = tf.concat(imgs_lst, axis=0)
gbxs = tf.concat(gbxs_lst, axis=0)
gmk_inss = tf.concat(gmk_inss_lst, axis=0)
gmk_sems = tf.concat(gmk_sems_lst, axis=0)
gbx_nums = tf.concat(gbx_nums_lst, axis=0)
img_hgts = tf.concat(img_hgts_lst, axis=0)
img_wdhs = tf.concat(img_wdhs_lst, axis=0)
img_nams = tf.concat(img_nams_lst, axis=0)
with tf.device("/gpu:0"):
imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws = \
self.preprocessing1(imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_hgts, img_wdhs)
config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
with tf.Session(config=config) as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
imgs_kep, gbxs_kep, gmk_inss_kep, gmk_sems_kep, gbx_nums_kep, img_wdws_kep, img_hgts_kep, img_wdhs_kep, img_nams_kep = \
sess.run([imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws, img_hgts, img_wdhs, img_nams])
for i in range(self.bat_siz_all):
img_tmp = imgs_kep[i]
gbx_num_tmp = gbx_nums_kep[i]
gbxs_tmp = gbxs_kep[i][:gbx_num_tmp]
gmk_inss_tmp = gmk_inss_kep[i][:gbx_num_tmp]
gmk_sems_tmp = gmk_sems_kep[i]
img_wdw_tmp = img_wdws_kep[i]
img_hgt_tmp = img_hgts_kep[i]
img_wdh_tmp = img_wdhs_kep[i]
img_nam_tmp = img_nams_kep[i]
boxs_tmp = gbxs_tmp[:, :-1]
box_clss_tmp = gbxs_tmp[:, -1]
img_tmp, boxs_tmp, msk_inss_tmp, msk_sems_tmp = \
self.recover_instances(img_tmp, boxs_tmp, None, msk_sems_tmp, \
img_wdw_tmp, img_hgt_tmp, img_wdh_tmp)
img_nam = str(i) + ".jpg"
self.display_instances(img_tmp, boxs_tmp, box_clss_tmp, None, None, msk_sems_tmp, img_nam_tmp)
class GeneratorForVOC(object):
def __init__(self, mod_tra=True, dat_dir='Mybase/datasets/train', bat_siz=6, epc_num=20, min_after_dequeue=20, \
gpu_lst='0', fil_num=32, tst_shw=True, tst_sav=False):
self.mod_tra = mod_tra
self.use_pad = True
self.use_exp = False
self.exp_rat = 2.0
self.img_avg = np.array([123.7, 116.8, 103.9], dtype=np.float32)
self.img_siz = 512 #1025 #897 #513
self.box_siz_min = 5
self.box_isc_min = 0.5
self.box_msk_siz = [126, 126]
self.msk_min = 0.5
############for crop###########
self.min_object_covered = 0.3
self.aspect_ratio_range = (0.5, 2.0)
self.area_range = (0.1, 1.0)
self.max_attempts = 200
self.dat_dir = dat_dir
self.bat_siz = bat_siz
self.epc_num = epc_num
self.min_after_dequeue = min_after_dequeue
self.gpu_lst = gpu_lst
self.gpu_num = len(self.gpu_lst.split(','))
self.mdl_dev = '/cpu:%d' if self.gpu_num == 0 else '/gpu:%d'
self.gpu_num = 1 if self.gpu_num == 0 else self.gpu_num
self.fil_num = fil_num
self.num_readers = 2
self.num_threads = 16
self.bat_siz_all = self.bat_siz * self.gpu_num
self.capacity = self.min_after_dequeue + 3 * self.bat_siz_all
self.max_num = 100
self.tst_shw = tst_shw
self.tst_sav = tst_sav
self.cls_nams = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv/monitor']
'''
self.cls_nams = ['background', "dachicun", "daodixian", "ganta", "jueyuanzi"]
'''
self.cls_num = len(self.cls_nams)
self.cls_idx_to_cls_nam = dict(zip(range(self.cls_num), self.cls_nams))
self.cls_nam_to_cls_idx = dict(zip(self.cls_nams, range(self.cls_num)))
########for show######
self.title = ""
self.figsize = (15, 15)
def make_input(self, train=True):
imgs_dir = 'Mybase/datasets/raw/voc/VOCdevkit/VOCSDS/img'
inss_dir = 'Mybase/datasets/raw/voc/VOCdevkit/VOCSDS/inst'
sems_dir = 'Mybase/datasets/raw/voc/VOCdevkit/VOCSDS/cls'
sets_dir = 'Mybase/datasets/raw/voc/VOCdevkit/VOCSDS/ImageSets/Main'
sets_fil = 'train.txt' if train else 'val.txt'
sets_fil = os.path.join(sets_dir, sets_fil)
rcds_dir = 'Mybase/datasets/train' if train else 'Mybase/datasets/val'
assert os.path.exists(sets_fil), 'The sets file does not exist: {:s}'.format(sets_fil)
with open(sets_fil) as f:
sets_idx = [x.strip() for x in f.readlines()]
img_num = len(sets_idx)
print('The number of images is {:d}'.format(img_num))
np.random.shuffle(sets_idx)
with tf.Graph().as_default(), tf.device('/cpu:0'):
out_nam = 'voc_train.tfrecord' if train else 'voc_val.tfrecord'
rcd_nam = os.path.join(rcds_dir, out_nam)
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
with tf.python_io.TFRecordWriter(rcd_nam, options=options) as writer:
for i, set_idx in enumerate(sets_idx):
if i % 50 == 0:
print('Converting image %d' % (i))
###read image file###
img_fil = os.path.join(imgs_dir, set_idx+'.jpg')
assert os.path.exists(img_fil), 'The image file does not exist: {:s}'.format(img_fil)
img = cv2.imread(img_fil)
img_hgt, img_wdh = img.shape[0], img.shape[1]
if img.size == img_hgt * img_wdh:
print ('Gray Image %s' %(imgs_lst[i]))
img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_tmp[:, :, :] = img[:, :, np.newaxis]
img = img_tmp
img = img.astype(np.uint8)
assert img.size == img_wdh * img_hgt * 3, '%s' % str(i)
img = img[:, :, ::-1]
###read semantic file###
sems_fil = os.path.join(sems_dir, set_idx+'.mat')
assert os.path.exists(sems_fil), 'The semantic file does not exist: {:s}'.format(sems_fil)
sems = sio.loadmat(sems_fil)
sems = sems['GTcls']['Segmentation'][0][0]
sems = sems.astype(dtype=np.uint8, copy=False)
###read instance file###
inss_fil = os.path.join(inss_dir, set_idx+'.mat')
assert os.path.exists(inss_fil), 'The instance file does not exist: {:s}'.format(inss_fil)
inss = sio.loadmat(inss_fil)
inss = inss['GTinst']['Segmentation'][0][0]
inss = inss.astype(dtype=np.uint8, copy=False)
###split the instances###
inss_uni = np.unique(inss)
bgd_idxs = np.where(inss_uni == 0)[0]
inss_uni = np.delete(inss_uni, bgd_idxs)
bod_idxs = np.where(inss_uni == 255)[0] #border idexes
inss_uni = np.delete(inss_uni, bod_idxs)
gbx_num = len(inss_uni)
boxs = np.zeros((gbx_num, 4), dtype=np.float32)
clss = np.zeros((gbx_num, 1), dtype=np.float32)
gmk_inss = np.zeros((gbx_num, img_hgt, img_wdh), dtype=np.uint8)
if gbx_num == 0:
print('There is no instances in the instance file: {:s}'.format(inss_fil))
continue
for idx, ins_uni in enumerate(inss_uni):
[r, c] = np.where(inss==ins_uni)
x1 = np.min(c)
x2 = np.max(c)
y1 = np.min(r)
y2 = np.max(r)
ins = (inss == ins_uni)
cls = np.unique(sems[ins])
assert cls.shape[0] == 1
ins = ins.astype(dtype=np.uint8, copy=False)
cls = cls[0]
clss[idx, :] = cls
boxs[idx, :] = [y1, x1, y2, x2]
gmk_inss[idx, :, :] = ins
gbxs = np.concatenate([boxs, clss], axis=-1)
gmk_sems = sems
#写tfrecords
img_raw = img.tostring()
gbxs_raw = gbxs.tostring()
gmk_inss_raw = gmk_inss.tostring()
gmk_sems_raw = gmk_sems.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image/image': _bytes_feature(img_raw),
'image/height': _int64_feature(img_hgt),
'image/width': _int64_feature(img_wdh),
'label/num_instances': _int64_feature(gbx_num), # N
'label/gt_boxes': _bytes_feature(gbxs_raw), # of shape (N, 5), (ymin, xmin, ymax, xmax, classid)
'label/gt_mask_inss': _bytes_feature(gmk_inss_raw), # of shape (N, H, W)
'label/gt_mask_sems': _bytes_feature(gmk_sems_raw) # of shape (H, W)
}))
writer.write(example.SerializeToString())
def resize_image_with_pad(self, img=None, gbxs=None, gmk_inss=None, gmk_sems=None):
#没有crop,不需要对gmk_inss操作
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
boxs = gbxs[:, :-1]
clss = gbxs[:, -1]
boxs = bbox_clip(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
gmk_sems = tf.expand_dims(gmk_sems, axis=-1)
if self.use_pad:
img = tf.image.resize_image_with_pad(img, self.img_siz, self.img_siz, \
method=tf.image.ResizeMethod.BILINEAR)
gmk_sems = tf.image.resize_image_with_pad(gmk_sems, self.img_siz, self.img_siz, \
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
#对image操作后对boxs操作
leh_min = tf.minimum(img_hgt, img_wdh)
leh_max = tf.maximum(img_hgt, img_wdh)
leh_rat = tf.minimum(self.img_siz/leh_min, self.img_siz/leh_max)
boxs = boxs * leh_rat
#将实际坐标加上偏移值
img_hgt = img_hgt * leh_rat
img_wdh = img_wdh * leh_rat
pad_hgt = self.img_siz - img_hgt
pad_wdh = self.img_siz - img_wdh
pad_top = tf.round(pad_hgt/2.0)
pad_lft = tf.round(pad_wdh/2.0)
beg = tf.stack([pad_top, pad_lft], axis=0)
beg = tf.tile(beg, [2])
boxs = boxs + beg #该boxs在原真实图片内,不需要clip
img_wdw = tf.stack([pad_top, pad_lft, pad_top+img_hgt-1, pad_lft+img_wdh-1], axis=0)
else:
img = tf.image.resize_images(img, [self.img_siz, self.img_siz], \
method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
gmk_sems = tf.image.resize_images(gmk_sems, [self.img_siz, self.img_siz], \
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True)
#对image操作后对boxs操作
hgt_rat = self.img_siz / img_hgt
wdh_rat = self.img_siz / img_wdh
leh_rat = tf.stack([hgt_rat, wdh_rat], axis=0)
leh_rat = tf.tile(leh_rat, [2])
boxs = boxs * leh_rat
img_wdw = tf.constant([0, 0, self.img_siz-1, self.img_siz-1], dtype=tf.int32)
#合成gt_boxes
clss = tf.expand_dims(clss, axis=-1)
gbxs = tf.concat([boxs, clss], axis=-1)
gbx_tmp = tf.zeros(shape=[1, 5], dtype=tf.float32) #防止没有一个gt_box
gbxs = tf.concat([gbxs, gbx_tmp], axis=0)
ins_tmp = tf.zeros(shape=[1]+self.box_msk_siz, dtype=tf.float32)
gmk_inss = tf.concat([gmk_inss, ins_tmp], axis=0)
gmk_sems = tf.squeeze(gmk_sems, axis=[-1])
return img, gbxs, gmk_inss, gmk_sems, img_wdw
def distort_crop(self, img=None, gbxs=None, gmk_inss=None, gmk_sems=None):
#gmk_inss --> (M, h, w)
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
boxs = gbxs[:, :-1]
clss = gbxs[:, -1]
boxs = bbox_clip(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
if self.use_exp:
exp_rat = tf.random.uniform(shape=[], minval=1.1, maxval=self.exp_rat, dtype=tf.float32)
#exp_rat = self.exp_rat
pad_hgt = tf.cast(img_hgt*(exp_rat-1.0), dtype=tf.int32)
pad_wdh = tf.cast(img_wdh*(exp_rat-1.0), dtype=tf.int32)
pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_hgt_all, dtype=tf.int32)
pad_lft = tf.random.uniform(shape=[], minval=0, maxval=pad_wdh_all, dtype=tf.int32)
pad_btm = pad_hgt - pad_top
pad_rgt = pad_wdh - pad_lft
paddings = [[pad_top, pad_btm], [pad_lft, pad_rgt], [0, 0]]
img = tf.pad(img, paddings, "CONSTANT", constant_values=0)
paddings = [[pad_top, pad_btm], [pad_lft, pad_rgt]]
gmk_sems = tf.pad(gmk_sems, paddings, "CONSTANT", constant_values=0)
#padding中boxs不会超出边界,不用对boxs和gmk_inss操作
pad_top = tf.cast(pad_top, dtype=tf.float32)
pad_lft = tf.cast(pad_lft, dtype=tf.float32)
beg = tf.stack([pad_top, pad_lft], axis=0)
beg = tf.tile(beg, [2])
boxs = boxs + beg
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
########################crop the image randomly########################
ncw_idxs = tf.where(clss>0)
boxs_tmp = tf.gather_nd(boxs, ncw_idxs)
boxs_tmp = boxs_tmp / tf.stack([img_hgt-1.0, img_wdh-1.0, img_hgt-1.0, img_wdh-1.0], axis=0)
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(tf.shape(img), bounding_boxes=tf.expand_dims(boxs_tmp, 0), \
min_object_covered=self.min_object_covered, \
aspect_ratio_range=self.aspect_ratio_range, \
area_range=self.area_range, max_attempts=self.max_attempts, \
use_image_if_no_bounding_boxes=True)
#对image操作后对boxs操作
img = tf.slice(img, box_beg, box_siz)
gmk_sems = tf.slice(gmk_sems, box_beg[:-1], box_siz[:-1])
img_hgt = tf.cast(box_siz[0], dtype=tf.float32)
img_wdh = tf.cast(box_siz[1], dtype=tf.float32)
#将实际坐标加上偏移值
beg = tf.cast(box_beg[0:2], dtype=tf.float32)
beg = tf.tile(beg, [2])
boxs_tmp = boxs - beg
#防止box超出边界
boxs = bbox_clip(boxs_tmp, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
#剔除过小的boxs和msks
box_iscs = bbox_intersects1(boxs_tmp, boxs)
idxs = tf.where(box_iscs<self.box_isc_min)
clss = tensor_update(clss, idxs, -1)
#idxs = tf.where(box_iscs>=self.box_isc_min)
#boxs = tf.gather_nd(boxs, idxs)
#clss = tf.gather_nd(clss, idxs)
#gmk_inss= tf.gather_nd(gmk_inss, idxs)
clss = tf.expand_dims(clss, axis=-1)
gbxs = tf.concat([boxs, clss], axis=-1)
#计算crop后的gmk_inss
begs = tf.stack([boxs_tmp[:, 0], boxs_tmp[:, 1], boxs_tmp[:, 0], boxs_tmp[:, 1]], axis=-1)
lehs = tf.stack([boxs_tmp[:, 2]-boxs_tmp[:, 0], boxs_tmp[:, 3]-boxs_tmp[:, 1],
boxs_tmp[:, 2]-boxs_tmp[:, 0], boxs_tmp[:, 3]-boxs_tmp[:, 1]], axis=-1)
boxs_tmp = (boxs - begs) / lehs
idxs = tf.range(tf.shape(boxs_tmp)[0])
gmk_inss = tf.expand_dims(gmk_inss, axis=-1) #(M, H, W, 1)
gmk_inss = tf.image.crop_and_resize(gmk_inss, boxs_tmp, idxs, self.box_msk_siz, method='bilinear') #(M, 256, 256)
gmk_inss = tf.squeeze(gmk_inss, axis=[-1]) #(M, H, W)
###########resize image to the expected size with paddings############
img, gbxs, gmk_inss, gmk_sems, img_wdw = self.resize_image_with_pad(img, gbxs, gmk_inss, gmk_sems)
return img, gbxs, gmk_inss, gmk_sems, img_wdw
def preprocessing(self, img=None, gbxs=None, gmk_inss=None):
img_shp = tf.shape(img) #[H, W, C]
box_leh = tf.cast(tf.tile(img_shp[:2], [2]), dtype=tf.float32) #[H, W, H, W]
boxs_tmp = gbxs[:,:-1] / box_leh #(M, 4)
idxs = tf.range(tf.shape(boxs_tmp)[0])
gmk_inss = tf.expand_dims(gmk_inss, axis=-1) #(M, H, W, 1)
gmk_inss = tf.image.crop_and_resize(gmk_inss, boxs_tmp, idxs, self.box_msk_siz, method='bilinear') #(M, 256, 256)
gmk_inss = tf.squeeze(gmk_inss, axis=[-1]) #(M, H, W)
return gmk_inss
def preprocessing0(self, elms=None):
img, gbxs, gmk_inss, gmk_sems, gbx_num, img_hgt, img_wdh = elms
img = img[:img_hgt, :img_wdh, :]
gbxs = gbxs[:gbx_num, :]
gmk_inss = gmk_inss[:gbx_num, :, :]
gmk_sems = gmk_sems[:img_hgt, :img_wdh]
if self.mod_tra:
img, gbxs, gmk_inss, gmk_sems, img_wdw = self.distort_crop(img, gbxs, gmk_inss, gmk_sems)
else:
img, gbxs, gmk_inss, gmk_sems, img_wdw = self.resize_image_with_pad(img, gbxs, gmk_inss, gmk_sems)
gbx_num = tf.shape(gbxs)[0]
paddings = [[0, self.max_num-gbx_num], [0, 0]]
gbxs = tf.pad(gbxs, paddings, "CONSTANT", constant_values=0)
paddings = [[0, self.max_num-gbx_num], [0, 0], [0, 0]]
gmk_inss = tf.pad(gmk_inss, paddings, "CONSTANT", constant_values=0)
return img, gbxs, gmk_inss, gmk_sems, gbx_num, img_wdw
def preprocessing1(self, imgs=None, gbxs=None, gmk_inss=None, gmk_sems=None, gbx_nums=None, img_hgts=None, img_wdhs=None):
imgs = tf.cast(imgs, dtype=tf.float32)
gmk_sems = tf.cast(gmk_sems, dtype=tf.int32 )
elms = [imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_hgts, img_wdhs]
if self.mod_tra:
imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws = \
tf.map_fn(self.preprocessing0, elms, \
dtype=(tf.float32, tf.float32, tf.float32, tf.int32, tf.int32, tf.float32),
parallel_iterations=self.bat_siz, back_prop=False, swap_memory=False, infer_shape=True)
######################随机翻转##########################
sig = tf.random.uniform(shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)
#imgs = tf.image.random_flip_left_right(imgs) #(N, H, W, C)
imgs_flr = tf.image.flip_left_right(imgs) #(N, H, W, C)
gmk_sems_flr = tf.expand_dims(gmk_sems, axis=-1) #(N, H, W, 1)
gmk_sems_flr = tf.image.flip_left_right(gmk_sems_flr) #(N, H, W, 1)
gmk_sems_flr = tf.squeeze(gmk_sems_flr, axis=[-1]) #(N, H, W)
gmk_inss_flr = tf.transpose(gmk_inss, [0,2,3,1]) #(N, H, W, M)
gmk_inss_flr = tf.image.flip_left_right(gmk_inss_flr) #(N, H, W, M)
gmk_inss_flr = tf.transpose(gmk_inss_flr, [0,3,1,2]) #(N, M, H, W)
gbxs_flr = tf.stack([gbxs[:,:,0], self.img_siz-1.0-gbxs[:,:,3], \
gbxs[:,:,2], self.img_siz-1.0-gbxs[:,:,1], gbxs[:,:,4]], axis=-1)
img_wdws_flr = tf.stack([img_wdws[:,0], self.img_siz-1.0-img_wdws[:,3], \
img_wdws[:,2], self.img_siz-1.0-img_wdws[:,1]], axis=-1)
imgs = tf.cond(sig<0.5, lambda: imgs_flr, lambda: imgs )
gmk_sems = tf.cond(sig<0.5, lambda: gmk_sems_flr, lambda: gmk_sems)
gmk_inss = tf.cond(sig<0.5, lambda: gmk_inss_flr, lambda: gmk_inss)
gbxs = tf.cond(sig<0.5, lambda: gbxs_flr, lambda: gbxs )
img_wdws = tf.cond(sig<0.5, lambda: img_wdws_flr, lambda: img_wdws)
#####################光学畸变###########################
imgs = apply_with_random_selector(imgs, lambda x, order: distort_color(x, order), num_cases=4)
imgs = imgs - self.img_avg
imgs = imgs / 255.0
else:
imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws = \
tf.map_fn(self.preprocessing0, elms, \
dtype=(tf.float32, tf.float32, tf.float32, tf.int32, tf.int32, tf.float32),
parallel_iterations=self.bat_siz, back_prop=False, swap_memory=False, infer_shape=True)
imgs = imgs - self.img_avg
imgs = imgs / 255.0
return imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws
def get_input(self):
def parse_function(serialized_example):
'''
定长特征解析:tf.FixedLenFeature(shape, dtype, default_value)
shape:可当 reshape 来用,如 vector 的 shape 从 (3,) 改动成了 (1,3)。
注:如果写入的 feature 使用了. tostring() 其 shape 就是 ()
dtype:必须是 tf.float32, tf.int64, tf.string 中的一种。
default_value:feature 值缺失时所指定的值。
不定长特征解析:tf.VarLenFeature(dtype)
注:可以不明确指定 shape,但得到的 tensor 是 SparseTensor。
变长的tensor转化为string后,shape是固定的,shape=[],所以可以用tf.FixedLenFeature进行解析
'''
parsed_example = tf.parse_single_example(
serialized_example,
features = {
'image/image': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'image/height': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'image/width': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/num_instances': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'label/gt_boxes': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'label/gt_mask_inss': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
'label/gt_mask_sems': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),
#'matrix': tf.VarLenFeature(dtype=dtype('float32')),
#'matrix_shape': tf.FixedLenFeature(shape=(2,), dtype=tf.int64),
}
)
img_hgt = tf.cast(parsed_example['image/height'], tf.int32)
img_wdh = tf.cast(parsed_example['image/width'], tf.int32)
gbx_num = tf.cast(parsed_example['label/num_instances'], tf.int32)
img = tf.decode_raw(parsed_example['image/image'], tf.uint8 )
gbxs = tf.decode_raw(parsed_example['label/gt_boxes'], tf.float32)
gmk_inss = tf.decode_raw(parsed_example['label/gt_mask_inss'], tf.uint8 )
gmk_sems = tf.decode_raw(parsed_example['label/gt_mask_sems'], tf.uint8 )
img = tf.reshape(img, [img_hgt, img_wdh, 3])
gbxs = tf.reshape(gbxs, [gbx_num, 5])
gmk_inss = tf.reshape(gmk_inss, [gbx_num, img_hgt, img_wdh])
gmk_sems = tf.reshape(gmk_sems, [img_hgt, img_wdh])
gmk_inss = self.preprocessing(img, gbxs, gmk_inss)
parsed_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'label/num_instances': gbx_num,
'label/gt_boxes': gbxs,
'label/gt_mask_inss': gmk_inss,
'label/gt_mask_sems': gmk_sems
}
#parsed_example['matrix'] = tf.sparse_tensor_to_dense(parsed_example['matrix'])
#parsed_example['matrix'] = tf.reshape(parsed_example['matrix'], parsed_example['matrix_shape'])
return parsed_example
fil_pat = os.path.join(self.dat_dir, 'voc', '*.tfrecord')
dataset = tf.data.Dataset.list_files(file_pattern=fil_pat, shuffle=True, seed=None)
dataset = dataset.prefetch(buffer_size=self.num_readers)
dataset = dataset.shuffle(buffer_size=self.fil_num+self.num_readers, seed=None, reshuffle_each_iteration=True)
dataset = dataset.apply(tf.data.experimental.\
parallel_interleave(lambda x: tf.data.TFRecordDataset(x, compression_type='ZLIB'), \
cycle_length=self.num_readers, \
block_length=10, sloppy=True, \
buffer_output_elements=10, \
prefetch_input_elements=10))
dataset = dataset.prefetch(buffer_size=self.bat_siz_all)
dataset = dataset.map(parse_function, num_parallel_calls=self.num_threads)
dataset = dataset.apply(tf.data.experimental.\
shuffle_and_repeat(buffer_size=self.capacity, count=self.epc_num, seed=None))
dataset = dataset.padded_batch(batch_size=self.bat_siz_all, \
padded_shapes={'image/image': [-1, -1, 3],
'image/height': [],
'image/width': [],
'label/num_instances': [],
'label/gt_boxes': [-1, 5],
'label/gt_mask_inss': [-1]+self.box_msk_siz,
'label/gt_mask_sems': [-1, -1]},
padding_values=None, drop_remainder=True)
#dataset = dataset.batch(batch_size=self.bat_siz_all, drop_remainder=True)
#dataset = dataset.apply(tf.data.experimental.\
# map_and_batch(parse_function, batch_size=self.bat_siz_all, num_parallel_batches=None, \
# drop_remainder=True, num_parallel_calls=self.num_threads))
dataset = dataset.prefetch(buffer_size=1)
iterator = dataset.make_one_shot_iterator()
example = iterator.get_next()
imgs_lst = tf.split(example['image/image'], self.gpu_num, axis=0)
gbxs_lst = tf.split(example['label/gt_boxes'], self.gpu_num, axis=0)
gmk_inss_lst = tf.split(example['label/gt_mask_inss'], self.gpu_num, axis=0)
gmk_sems_lst = tf.split(example['label/gt_mask_sems'], self.gpu_num, axis=0)
gbx_nums_lst = tf.split(example['label/num_instances'], self.gpu_num, axis=0)
img_hgts_lst = tf.split(example['image/height'], self.gpu_num, axis=0)
img_wdhs_lst = tf.split(example['image/width'], self.gpu_num, axis=0)
return imgs_lst, gbxs_lst, gmk_inss_lst, gmk_sems_lst, gbx_nums_lst, img_hgts_lst, img_wdhs_lst
def get_input2(self):
imgs_lst_tst = []
for ext in ['jpg', 'png', 'jpeg', 'JPG']:
imgs_lst_tst.extend(glob.glob(os.path.join(self.dat_dir, '*.{}'.format(ext))))
print('There are {:d} images to test!'.format(len(imgs_lst_tst)))
def data_generator():
for img_fil in imgs_lst_tst:
assert os.path.exists(img_fil), 'The image file does not exist: {:s}'.format(img_fil)
img = cv2.imread(img_fil)
img_nam = img_fil.split('/')[-1]
img_hgt = img.shape[0]
img_wdh = img.shape[1]
if img.size == img_hgt * img_wdh:
print ('Gray Image %s' %(imgs_lst[i]))
img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_tmp[:, :, :] = img[:, :, np.newaxis]
img = img_tmp
img = img.astype(np.uint8)
assert img.size == img_wdh * img_hgt * 3, '%s' % str(i)
img = img[:, :, ::-1]
generated_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'image/name': img_nam
}
yield generated_example
def parse_function(generated_example):
img = generated_example['image/image']
img_hgt = generated_example['image/height']
img_wdh = generated_example['image/width']
img_nam = generated_example['image/name']
parsed_example = {
'image/image': img,
'image/height': img_hgt,
'image/width': img_wdh,
'image/name': img_nam,
'label/num_instances': 1,
'label/gt_boxes': tf.zeros(shape=[1, 5], dtype=tf.float32),
'label/gt_mask_inss': tf.zeros(shape=[1]+self.box_msk_siz, dtype=tf.float32),
'label/gt_mask_sems': tf.zeros(shape=[img_hgt, img_wdh], dtype=tf.uint8)
}
#parsed_example['matrix'] = tf.sparse_tensor_to_dense(parsed_example['matrix'])
#parsed_example['matrix'] = tf.reshape(parsed_example['matrix'], parsed_example['matrix_shape'])
return parsed_example
dataset = tf.data.Dataset.from_generator(data_generator,
output_types ={'image/image': tf.uint8, 'image/height': tf.int32,
'image/width': tf.int32, 'image/name': tf.string}, \
output_shapes={'image/image': [None, None, 3], 'image/height': [],
'image/width': [], 'image/name': []}, args=None)
dataset = dataset.repeat(count=1)
dataset = dataset.prefetch(buffer_size=self.bat_siz_all)
dataset = dataset.map(parse_function, num_parallel_calls=self.num_threads)
#dataset = dataset.batch(batch_size=self.bat_siz_all, drop_remainder=False)
#dataset = dataset.apply(tf.data.experimental.\
# map_and_batch(parse_function, batch_size=self.bat_siz_all, num_parallel_batches=None, \
# drop_remainder=False, num_parallel_calls=self.num_threads))
dataset = dataset.padded_batch(batch_size=self.bat_siz_all, \
padded_shapes={'image/image': [-1, -1, 3],
'image/height': [],
'image/width': [],
'image/name': [],
'label/num_instances': [],
'label/gt_boxes': [-1, 5],
'label/gt_mask_inss': [-1]+self.box_msk_siz,
'label/gt_mask_sems': [-1, -1]},
padding_values=None, drop_remainder=True)
#dataset = dataset.cache(filename=os.path.join(self.dat_dir, 'cache'))
dataset = dataset.prefetch(buffer_size=1)
iterator = dataset.make_one_shot_iterator()
example = iterator.get_next()
imgs_lst = tf.split(example['image/image'], self.gpu_num, axis=0)
gbxs_lst = tf.split(example['label/gt_boxes'], self.gpu_num, axis=0)
gmk_inss_lst = tf.split(example['label/gt_mask_inss'], self.gpu_num, axis=0)
gmk_sems_lst = tf.split(example['label/gt_mask_sems'], self.gpu_num, axis=0)
gbx_nums_lst = tf.split(example['label/num_instances'], self.gpu_num, axis=0)
img_hgts_lst = tf.split(example['image/height'], self.gpu_num, axis=0)
img_wdhs_lst = tf.split(example['image/width'], self.gpu_num, axis=0)
img_nams_lst = tf.split(example['image/name'], self.gpu_num, axis=0)
return imgs_lst, gbxs_lst, gmk_inss_lst, gmk_sems_lst, gbx_nums_lst, img_hgts_lst, img_wdhs_lst, img_nams_lst
def random_colors(self, N, bright=True):
'''
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
'''
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(self, image, mask, color, alpha=0.5):
'''
Apply the given mask to the image.
'''
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] * (1 - alpha) + alpha * color[c] * 255.0,
image[:, :, c])
return image
def recover_instances(self, img=None, boxs=None, msk_inss=None, msk_sems=None, img_wdw=None, img_hgt=None, img_wdh=None):
img_wdw_tmp = img_wdw.astype(dtype=np.int32, copy=False)
if isinstance(img, np.ndarray):
img = img[img_wdw_tmp[0]:img_wdw_tmp[2]+1, img_wdw_tmp[1]:img_wdw_tmp[3]+1, :] #因为window在原真实图片内
img = img * 255.0
img = img + self.img_avg
img = np.clip(img, 0.0, 255.0)
img = cv2.resize(img, (img_wdh, img_hgt), interpolation=cv2.INTER_LINEAR)
img = img.astype(dtype=np.uint8, copy=False)
if isinstance(msk_sems, np.ndarray):
msk_sems = msk_sems[img_wdw_tmp[0]:img_wdw_tmp[2]+1, img_wdw_tmp[1]:img_wdw_tmp[3]+1]
msk_sems = cv2.resize(msk_sems, (img_wdh, img_hgt), interpolation=cv2.INTER_NEAREST)
msk_sems = np.eye(self.cls_num, dtype=np.uint8)[msk_sems]
if isinstance(boxs, np.ndarray):
box_num = np.shape(boxs)[0]
boxs_tmp = boxs.astype(dtype=np.int32, copy=False)
if isinstance(msk_inss, np.ndarray):
msk_inss_lst = []
for i in range(box_num):
box_tmp = boxs_tmp[i]
msk_ins = msk_inss[i]
y1, x1, y2, x2 = box_tmp
msk_ins = cv2.resize(msk_ins, (x2-x1+1, y2-y1+1), interpolation=cv2.INTER_LINEAR)
paddings = [[y1, self.img_siz-y2-1], [x1, self.img_siz-x2-1]]
msk_ins = np.pad(msk_ins, paddings, mode='constant')
msk_inss_lst.append(msk_ins)
msk_inss = np.asarray(msk_inss_lst, dtype=np.float32)
msk_inss = np.transpose(msk_inss, [1, 2, 0])
msk_inss = msk_inss[img_wdw_tmp[0]:img_wdw_tmp[2]+1, img_wdw_tmp[1]:img_wdw_tmp[3]+1, :]
msk_inss = cv2.resize(msk_inss, (img_wdh, img_hgt), interpolation=cv2.INTER_LINEAR)
msk_inss = np.reshape(msk_inss, [img_hgt, img_wdh, box_num])
msk_inss = np.transpose(msk_inss, [2, 0, 1]) #(N, h, w)
msk_inss = msk_inss >= self.msk_min
msk_inss = msk_inss.astype(dtype=np.uint8, copy=False)
img_hgt_ = img_wdw[2] - img_wdw[0] + 1.0
img_wdh_ = img_wdw[3] - img_wdw[1] + 1.0
beg = np.array([img_wdw[0], img_wdw[1]], dtype=np.float32)
beg = np.tile(beg, [2])
rat = np.array([img_hgt/img_hgt_, img_wdh/img_wdh_], dtype=np.float32)
rat = np.tile(rat, [2])
boxs = boxs - beg
boxs = boxs * rat
boxs = bbox_clip_py(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
return img, boxs, msk_inss, msk_sems
def display_instances(self, img=None, boxs=None, box_clss=None, box_prbs=None, \
msk_inss=None, msk_sems=None, img_nam=None):
_, ax = plt.subplots(1, figsize=self.figsize)
random.seed(520)
img_hgt, img_wdh = np.shape(img)[0:2]
# Show area outside image boundaries.
#ax.set_ylim(img_hgt + 5, -5)
#ax.set_xlim(-5, img_wdh + 5)
ax.set_ylim(img_hgt, 1)
ax.set_xlim(0, img_wdh)
ax.axis('off')
ax.set_title(self.title)
if isinstance(msk_sems, np.ndarray):
colors = self.random_colors(self.cls_num)
for i in range(1, self.cls_num):
img = self.apply_mask(img, msk_sems[:,:,i], colors[i], 0.5)
if isinstance(boxs, np.ndarray):
box_num = np.shape(boxs)[0]
if not box_num:
print("No instances to display!")
return
color = self.random_colors(1)[0]
colors = self.random_colors(box_num)
#colors = self.random_colors(self.cls_num)
boxs = boxs.astype(np.int32, copy=False)
#boxs = boxs.reshape([-1, 4, 2])[:, :, ::-1]
for i in range(box_num):
y1, x1, y2, x2 = boxs[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="solid",
edgecolor=color, facecolor='none')
ax.add_patch(p)
'''
x1 = boxs[i, 0, 0]
y1 = boxs[i, 0, 1]
p = patches.Polygon(boxs[i], facecolor='none', edgecolor=color, linewidth=2, linestyle='-', fill=True)
ax.add_patch(p)
'''
box_cls = box_clss[i]
box_cls = int(box_cls)
if box_cls < 0:
box_cls = 0
if isinstance(msk_inss, np.ndarray):
img = self.apply_mask(img, msk_inss[i], colors[i], 0.5)
#img = self.apply_mask(img, msk_inss[i], colors[box_cls], 0.5)
cons = find_contours(msk_inss[i], 0.5)
for con in cons:
#Subtract the padding and flip (y, x) to (x, y)
con = np.fliplr(con) - 1
p = Polygon(con, facecolor="none", edgecolor=colors[i])
#p = Polygon(con, facecolor="none", edgecolor=colors[box_cls])
ax.add_patch(p)
box_prb = box_prbs[i] if box_prbs is not None else None
box_cls = self.cls_idx_to_cls_nam[box_cls]
caption = "{} {:.3f}".format(box_cls, box_prb) if box_prb else box_cls
xx = max(min(x1, img_wdh-100), 0)
yy = max(min(y1+8, img_hgt-20 ), 0)
ax.text(xx, yy, caption, color='k', bbox=dict(facecolor='w', alpha=0.5), size=11, backgroundcolor="none")
img = img.astype(dtype=np.uint8, copy=False)
ax.imshow(img)
plt.gca().set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
if self.tst_sav:
img_fil = os.path.join(self.dat_dir, 'result', img_nam)
plt.savefig(img_fil, format='jpg', bbox_inches='tight', pad_inches=0)
if self.tst_shw: plt.show()
plt.close()
def get_input_test(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs_lst, gbxs_lst, gmk_inss_lst, gmk_sems_lst, gbx_nums_lst, img_hgts_lst, img_wdhs_lst = self.get_input()
imgs = tf.concat(imgs_lst, axis=0)
gbxs = tf.concat(gbxs_lst, axis=0)
gmk_inss = tf.concat(gmk_inss_lst, axis=0)
gmk_sems = tf.concat(gmk_sems_lst, axis=0)
gbx_nums = tf.concat(gbx_nums_lst, axis=0)
img_hgts = tf.concat(img_hgts_lst, axis=0)
img_wdhs = tf.concat(img_wdhs_lst, axis=0)
with tf.device("/gpu:0"):
imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws = \
self.preprocessing1(imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_hgts, img_wdhs)
config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
with tf.Session(config=config) as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
imgs_kep, gbxs_kep, gmk_inss_kep, gmk_sems_kep, gbx_nums_kep, img_wdws_kep, img_hgts_kep, img_wdhs_kep = \
sess.run([imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws, img_hgts, img_wdhs])
for i in range(self.bat_siz*self.gpu_num):
img_tmp = imgs_kep[i]
gbx_num_tmp = gbx_nums_kep[i]
gbxs_tmp = gbxs_kep[i][:gbx_num_tmp]
gmk_inss_tmp = gmk_inss_kep[i][:gbx_num_tmp]
gmk_sems_tmp = gmk_sems_kep[i]
img_wdw_tmp = img_wdws_kep[i]
img_hgt_tmp = img_hgts_kep[i]
img_wdh_tmp = img_wdhs_kep[i]
boxs_tmp = gbxs_tmp[:, :-1]
box_clss_tmp = gbxs_tmp[:, -1]
img_tmp, boxs_tmp, msk_inss_tmp, msk_sems_tmp = \
self.recover_instances(img_tmp, boxs_tmp, gmk_inss_tmp, gmk_sems_tmp, \
img_wdw_tmp, img_hgt_tmp, img_wdh_tmp)
img_nam = str(i) + ".jpg"
self.display_instances(img_tmp, boxs_tmp, box_clss_tmp, None, None, msk_sems_tmp, img_nam)
def get_input_test2(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs_lst, gbxs_lst, gmk_inss_lst, gmk_sems_lst, gbx_nums_lst, \
img_hgts_lst, img_wdhs_lst, img_nams_lst = self.get_input2()
imgs = tf.concat(imgs_lst, axis=0)
gbxs = tf.concat(gbxs_lst, axis=0)
gmk_inss = tf.concat(gmk_inss_lst, axis=0)
gmk_sems = tf.concat(gmk_sems_lst, axis=0)
gbx_nums = tf.concat(gbx_nums_lst, axis=0)
img_hgts = tf.concat(img_hgts_lst, axis=0)
img_wdhs = tf.concat(img_wdhs_lst, axis=0)
img_nams = tf.concat(img_nams_lst, axis=0)
with tf.device("/gpu:0"):
imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws = \
self.preprocessing1(imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_hgts, img_wdhs)
config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
with tf.Session(config=config) as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
imgs_kep, gbxs_kep, gmk_inss_kep, gmk_sems_kep, gbx_nums_kep, img_wdws_kep, img_hgts_kep, img_wdhs_kep, img_nams_kep = \
sess.run([imgs, gbxs, gmk_inss, gmk_sems, gbx_nums, img_wdws, img_hgts, img_wdhs, img_nams])
for i in range(self.bat_siz_all):
img_tmp = imgs_kep[i]
gbx_num_tmp = gbx_nums_kep[i]
gbxs_tmp = gbxs_kep[i][:gbx_num_tmp]
gmk_inss_tmp = gmk_inss_kep[i][:gbx_num_tmp]
gmk_sems_tmp = gmk_sems_kep[i]
img_wdw_tmp = img_wdws_kep[i]
img_hgt_tmp = img_hgts_kep[i]
img_wdh_tmp = img_wdhs_kep[i]
img_nam_tmp = img_nams_kep[i]
boxs_tmp = gbxs_tmp[:, :-1]
box_clss_tmp = gbxs_tmp[:, -1]
img_tmp, boxs_tmp, msk_inss_tmp, msk_sems_tmp = \
self.recover_instances(img_tmp, boxs_tmp, None, msk_sems_tmp, \
img_wdw_tmp, img_hgt_tmp, img_wdh_tmp)
img_nam = str(i) + ".jpg"
self.display_instances(img_tmp, boxs_tmp, box_clss_tmp, None, None, msk_sems_tmp, img_nam_tmp)
"""
#from Mybase.leye_utils.proposals_target_layer import *
class GeneratorForSynthText(object):
def __init__(self, mod_tra=False, dat_dir='train', bat_siz=5, min_after_dequeue=20):
self.mod_tra = mod_tra
self.use_pad = False
self.img_siz_min = 400
self.img_siz_max = 513
self.box_siz_min = 5
self.box_isc_min = 0.5
############for crop###########
self.min_object_covered = 1.0
self.aspect_ratio_range = (0.5, 2.0)
self.area_range = (0.3, 1.0)
self.max_attempts = 200
self.dat_dir = dat_dir
self.max_num = 100
self.bat_siz = bat_siz
self.min_after_dequeue = min_after_dequeue
self.num_threads = 16
self.cls_nams = ['background', 'text']
self.cls_num = len(self.cls_nams)
self.cls_idx_to_cls_nam = dict(zip(range(self.cls_num), self.cls_nams))
self.cls_nam_to_cls_idx = dict(zip(self.cls_nams, range(self.cls_num)))
########for test######
self.imgs_lst_tst = []
self.imgs_dir_lst = ["Mybase/datasets/test"]
for img_dir in self.imgs_dir_lst:
for ext in ['jpg', 'png', 'jpeg', 'JPG']:
self.imgs_lst_tst.extend(glob.glob(os.path.join(img_dir, '*.{}'.format(ext))))
self.anns_lst_tst = []
self.gbxs_lst_tst = [] #暂不支持use_gbx==True
self.img_num_tst = len(self.imgs_lst_tst)
self.get_idx = 0
def load_meta(self, mets_dir=None):
met_dats = loadmat(mets_dir, struct_as_record=False)
img_nams = list(met_dats['imnames'][0])
wrd_gbxs = list(met_dats['wordBB' ][0])
cha_gbxs = list(met_dats['charBB' ][0])
gbx_lbls = list(met_dats['txt' ][0])
return img_nams, wrd_gbxs, cha_gbxs, gbx_lbls
def make_input(self, num_per_sha=3000):
##############此处添加image文件路径##############
imgs_dir = "Mybase/datasets/SynthText"
##############此处添加annotation文件路径##############
mets_dir = "Mybase/datasets/SynthText"
##############此处添加tfrecords文件保存路径##############
rcds_dir = "Mybase/tfrecords"
img_nams_lst, wrd_gbxs_lst, cha_gbxs_lst, gbx_lbls_lst = self.load_meta(os.path.join(mets_dir, 'gt.mat'))
assert len(img_nams_lst) == len(wrd_gbxs_lst) == len(cha_gbxs_lst) == len(gbx_lbls_lst), 'img_num wrong!'
imgs_lst = [os.path.join(mets_dir, x[0]) for x in img_nams_lst]
img_num = len(imgs_lst)
print("The datasets have a total of {:d} images!".format(img_num))
idxs = np.arange(img_num)
np.random.shuffle(idxs)
imgs_lst = [imgs_lst[x] for x in idxs]
wrd_gbxs_lst = [wrd_gbxs_lst[x] for x in idxs]
cha_gbxs_lst = [cha_gbxs_lst[x] for x in idxs]
gbx_lbls_lst = [gbx_lbls_lst[x] for x in idxs]
wrd_gbxs_kep = []
cha_gbxs_kep = []
for i in range(img_num):
wrd_gbxs = np.asarray(wrd_gbxs_lst[i], dtype=np.float32)
cha_gbxs = np.asarray(cha_gbxs_lst[i], dtype=np.float32)
wrd_gbxs = np.reshape(np.transpose(np.reshape(wrd_gbxs, [2, 4, -1]), [2, 1, 0])[:, :, ::-1], [-1, 8])
cha_gbxs = np.reshape(np.transpose(np.reshape(wrd_gbxs, [2, 4, -1]), [2, 1, 0])[:, :, ::-1], [-1, 8])
#boxs = bbox_clip_py2(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
wrd_clss = np.ones(shape=[wrd_gbxs.shape[0], 1], dtype=np.float32)
wrd_gbxs = np.concatenate([wrd_gbxs, wrd_clss], axis=-1)
cha_clss = np.ones(shape=[cha_gbxs.shape[0], 1], dtype=np.float32)
cha_gbxs = np.concatenate([cha_gbxs, cha_clss], axis=-1)
wrd_gbxs_kep.append(wrd_gbxs)
cha_gbxs_kep.append(cha_gbxs)
with tf.Graph().as_default(), tf.device('/cpu:0'):
sha_num = int(img_num/num_per_sha)
if sha_num == 0:
sha_num = 1
num_per_sha = img_num
else:
num_per_sha = int(math.ceil(img_num/sha_num))
for sha_idx in range(sha_num):
out_nam = 'synthtext_%05d-of-%05d.tfrecord' % (sha_idx, sha_num)
rcd_nam = os.path.join(rcds_dir, out_nam)
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
with tf.python_io.TFRecordWriter(rcd_nam, options=options) as writer:
sta_idx = sha_idx * num_per_sha
end_idx = min((sha_idx + 1) * num_per_sha, img_num)
for i in range(sta_idx, end_idx):
if i % 50 == 0:
print("Converting image %d/%d shard %d" % (i + 1, img_num, sha_idx))
#读取图像
img = imgs_lst[i]
img = cv2.imread(img)
if type(img) != np.ndarray:
print("Failed to find image %s" %(imgs_lst[i]))
continue
img_hgt, img_wdh = img.shape[0], img.shape[1]
if img.size == img_hgt * img_wdh:
print ('Gray Image %s' %(imgs_lst[i]))
img_tmp = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_tmp[:, :, :] = img[:, :, np.newaxis]
img = img_tmp
img = img.astype(np.uint8)
assert img.size == img_wdh * img_hgt * 3, '%s' % str(i)
img = img[:, :, ::-1]
#读取标签
wrd_gbxs = wrd_gbxs_kep[i]
cha_gbxs = cha_gbxs_kep[i]
gbx_lbls = gbx_lbls_lst[i]
if len(wrd_gbxs)==0 or len(cha_gbxs)==0 or len(gbx_lbls)==0:
print("No gt_boxes in this image!")
continue
#写tfrecords
img_raw = img.tostring()
wrd_gbxs_raw = wrd_gbxs.tostring()
cha_gbxs_raw = cha_gbxs.tostring()
gbx_lbls_raw = gbx_lbls.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image/image': _bytes_feature(img_raw),
'image/height': _int64_feature(img_hgt),
'image/width': _int64_feature(img_wdh),
'label/wrds_num': _int64_feature(wrd_gbxs.shape[0]), # N
'label/chas_num': _int64_feature(cha_gbxs.shape[0]), # N
'label/wrd_gbxs': _bytes_feature(wrd_gbxs_raw), #(N, 8), (y0, x0, y1, x1, y2, x2, y3, x3, cls)
'label/cha_gbxs': _bytes_feature(cha_gbxs_raw),
'label/gbx_lbls': _bytes_feature(gbx_lbls_raw),
}))
writer.write(example.SerializeToString())
def resize_image_with_pad(self, img=None, boxs=None, clss=None):
#####################按最短边进行比例缩放######################
im_h = tf.shape(img)[0]
im_w = tf.shape(img)[1]
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
if self.use_pad:
leh_min = tf.minimum(img_hgt, img_wdh)
leh_max = tf.maximum(img_hgt, img_wdh)
leh_rat = tf.minimum(self.img_siz_min/leh_min, self.img_siz_max/leh_max)
img_hgt = tf.cast(img_hgt*leh_rat, dtype=tf.int32)
img_wdh = tf.cast(img_wdh*leh_rat, dtype=tf.int32)
#对image操作后对boxs操作
img = tf.image.resize_images(img, [img_hgt, img_wdh], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
boxs = boxs * leh_rat
################如果最长边过长则按中心对称进行裁剪################
img_hgt = tf.cast(img_hgt, dtype=tf.float32)
img_wdh = tf.cast(img_wdh, dtype=tf.float32)
pad_hgt_all = tf.cast(self.img_siz_max-img_hgt, dtype=tf.float32)
pad_wdh_all = tf.cast(self.img_siz_max-img_wdh, dtype=tf.float32)
pad_hgt_fnt = tf.round(pad_hgt_all/2.0)
pad_wdh_fnt = tf.round(pad_wdh_all/2.0)
img_wdw = tf.stack([pad_hgt_fnt, pad_wdh_fnt, pad_hgt_fnt+img_hgt-1, pad_wdh_fnt+img_wdh-1], axis=0) #该边框在原真实图片内
#对image操作后对boxs操作
img = tf.image.resize_image_with_crop_or_pad(img, self.img_siz_max, self.img_siz_max)
#将实际坐标加上偏移值
beg = tf.stack([pad_hgt_fnt, pad_wdh_fnt], axis=0)
beg = tf.tile(beg, [4])
boxs_tmp = boxs + beg
#防止box超出边界
#防止box超出边界
boxs = bbox_clip2(boxs_tmp, [0.0, 0.0, self.img_siz_max-1.0, self.img_siz_max-1.0])
#box_iscs = bbox_intersects1(boxs_tmp, boxs)
#idxs = tf.where(box_iscs<self.box_isc_min)
box_edgs = bbox_edges2(boxs)
#idxs = tf.where(tf.not_equal(tf.reduce_sum(tf.cast(box_edgs>=self.box_siz_min, tf.int32), axis=1), 4))
#clss = tensor_update(clss, idxs, -1)
idxs = tf.where(tf.equal(tf.reduce_sum(tf.cast(box_edgs>=self.box_siz_min, tf.int32), axis=1), 4))
boxs = tf.gather_nd(boxs, idxs)
clss = tf.gather_nd(clss, idxs)
else:
hgt_rat = self.img_siz_max / img_hgt
wdh_rat = self.img_siz_max / img_wdh
leh_rat = tf.stack([hgt_rat, wdh_rat], axis=0)
leh_rat = tf.tile(leh_rat, [4])
#对image操作后对boxs操作
img = tf.image.resize_images(img, [self.img_siz_max, self.img_siz_max], method=tf.image.ResizeMethod.BILINEAR, \
align_corners=False)
boxs = boxs * leh_rat
img_wdw = tf.constant([0, 0, self.img_siz_max-1, self.img_siz_max-1], dtype=tf.float32)
#合成gt_boxes
clss = tf.expand_dims(clss, axis=-1)
gbxs = tf.concat([boxs, clss], axis=-1)
gbxs = tf.cast(gbxs, dtype=tf.float32)
gbx_tmp = tf.zeros(shape=[1, 9], dtype=tf.float32) #防止没有一个gt_box
gbxs = tf.concat([gbxs, gbx_tmp], axis=0)
return img, gbxs, img_wdw, im_h, im_w
def distort_crop(self, img=None, gbxs=None):
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
boxs = gbxs[:, :-1]
clss = gbxs[:, -1]
boxs = bbox_clip2(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
########################crop the image randomly########################
ncw_idxs = tf.where(clss>0)
boxs_tmp = tf.gather_nd(boxs, ncw_idxs)
boxs_tmp = bbox_bound2(boxs_tmp)
boxs_tmp = boxs_tmp / tf.stack([img_hgt-1.0, img_wdh-1.0, img_hgt-1.0, img_wdh-1.0], axis=0)
box_beg, box_siz, box_bnd = \
tf.image.sample_distorted_bounding_box(tf.shape(img), bounding_boxes=tf.expand_dims(boxs_tmp, 0), \
min_object_covered=self.min_object_covered, \
aspect_ratio_range=self.aspect_ratio_range, \
area_range=self.area_range, max_attempts=self.max_attempts, \
use_image_if_no_bounding_boxes=True)
#对image操作后对boxs操作
img = tf.slice(img, box_beg, box_siz)
img_hgt = tf.cast(box_siz[0], dtype=tf.float32)
img_wdh = tf.cast(box_siz[1], dtype=tf.float32)
#将实际坐标加上偏移值
beg = tf.cast(box_beg[0:2], dtype=tf.float32)
beg = tf.tile(beg, [4])
boxs_tmp = boxs - beg
#防止box超出边界
boxs = bbox_clip2(boxs_tmp, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
#box_iscs = bbox_intersects1(boxs_tmp, boxs)
#idxs = tf.where(box_iscs<self.box_isc_min)
box_edgs = bbox_edges2(boxs)
#idxs = tf.where(tf.not_equal(tf.reduce_sum(tf.cast(box_edgs>=self.box_siz_min, tf.int32), axis=1), 4))
#clss = tensor_update(clss, idxs, -1)
idxs = tf.where(tf.equal(tf.reduce_sum(tf.cast(box_edgs>=self.box_siz_min, tf.int32), axis=1), 4))
boxs = tf.gather_nd(boxs, idxs)
clss = tf.gather_nd(clss, idxs)
###########resize image to the expected size with paddings############
img, gbxs, img_wdw, im_h, im_w = self.resize_image_with_pad(img, boxs, clss)
return img, gbxs, img_wdw, im_h, im_w
def preprocessing(self, img=None, gbxs=None):
self.img_avg = tf.constant([123.7, 116.8, 103.9], dtype=tf.float32)
img = tf.cast(img, dtype=tf.float32)
####################归化到0、1之间######################
#if img.dtype != tf.float32:
# img = tf.image.convert_image_dtype(img, dtype=tf.float32)
if self.mod_tra == True:
#####################光学畸变###########################
# Randomly distort the colors. There are 4 ways to do it.
img = apply_with_random_selector(img, lambda x, order: distort_color(x, order), num_cases=4)
#####################随机裁剪###########################
img, gbxs, img_wdw, im_h, im_w = self.distort_crop(img, gbxs)
#boxs = gbxs[:, :-1]
#clss = gbxs[:, -1]
#img, gbxs, img_wdw = self.resize_image_with_pad(img, boxs, clss)
######################随机翻转##########################
sig = tf.random.uniform([2])
#####################随机左右翻转#######################
img_hgt = tf.cast(tf.shape(img)[0], dtype=tf.float32)
img_wdh = tf.cast(tf.shape(img)[1], dtype=tf.float32)
#img = tf.image.random_flip_left_right(img)
img_lft_rgt = tf.image.flip_left_right(img)
gbxs_lft_rgt = tf.stack([gbxs[:, 2], img_wdh-1.0-gbxs[:, 3], \
gbxs[:, 0], img_wdh-1.0-gbxs[:, 1], \
gbxs[:, 6], img_wdh-1.0-gbxs[:, 7], \
gbxs[:, 4], img_wdh-1.0-gbxs[:, 5], \
gbxs[:, 8]], axis=-1)
img_wdw_lft_rgt = tf.stack([img_wdw[0], img_wdh-1.0-img_wdw[3], img_wdw[2], img_wdh-1.0-img_wdw[1]], axis=-1)
img = tf.cond(sig[0]<0.5, lambda: img_lft_rgt, lambda: img )
gbxs = tf.cond(sig[0]<0.5, lambda: gbxs_lft_rgt, lambda: gbxs )
img_wdw = tf.cond(sig[0]<0.5, lambda: img_wdw_lft_rgt, lambda: img_wdw)
#img = tf.image.per_image_standardization(img)
img = img - self.img_avg
return img, gbxs, img_wdw, im_h, im_w
else:
boxs = gbxs[:, :-1]
clss = gbxs[:, -1]
img, gbxs, img_wdw, im_h, im_w = self.resize_image_with_pad(img, boxs, clss)
img = img - self.img_avg
return img, gbxs, img_wdw, im_h, im_w
def get_input(self):
#创建文件列表,并通过文件列表创建输入文件队列。
#在调用输入数据处理流程前,需要统一所有原始数据的格式并将它们存储到TFRecord文件中
#文件列表应该包含所有提供训练数据的TFRecord文件
filename = os.path.join(self.dat_dir, "*.tfrecord")
files = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(files, shuffle=True, capacity=1000)
#解析TFRecord文件里的数据
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
reader = tf.TFRecordReader(options=options)
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features = {
'image/image': tf.FixedLenFeature([], tf.string),
'image/height': tf.FixedLenFeature([], tf.int64 ),
'image/width': tf.FixedLenFeature([], tf.int64 ),
'label/wrds_num': tf.FixedLenFeature([], tf.int64 ),
'label/chas_num': tf.FixedLenFeature([], tf.int64 ),
'label/wrd_gbxs': tf.FixedLenFeature([], tf.string),
'label/cha_gbxs': tf.FixedLenFeature([], tf.string),
'label/gbx_lbls': tf.FixedLenFeature([], tf.string),
}
)
img_hgt = tf.cast(features['image/height' ], tf.int32)
img_wdh = tf.cast(features['image/width' ], tf.int32)
gbx_num = tf.cast(features['label/wrds_num'], tf.int32)
img = tf.decode_raw(features['image/image' ], tf.uint8 )
gbxs = tf.decode_raw(features['label/wrd_gbxs'], tf.float32)
gbx_lbls = features['label/gbx_lbls']
img = tf.reshape(img, [img_hgt, img_wdh, 3])
gbxs = tf.reshape(gbxs, [gbx_num, 9])
img, gbxs, img_wdw, img_hgt, img_wdh = self.preprocessing(img, gbxs)
img = tf.reshape(img, [self.img_siz_max, self.img_siz_max, 3])
gbx_num = tf.shape(gbxs)[0]
paddings = [[0, self.max_num-gbx_num], [0, 0]]
gbxs = tf.pad(gbxs, paddings)
gbxs = tf.reshape(gbxs, [self.max_num, 9])
#tf.train.shuffle_batch_join
imgs, img_hgts, img_wdhs, img_wdws, gbxs, gbx_nums = tf.train.shuffle_batch(
tensors=[img, img_hgt, img_wdh, img_wdw, gbxs, gbx_num], batch_size=self.bat_siz, \
num_threads=self.num_threads, capacity=capacity, min_after_dequeue=self.min_after_dequeue)
'''
imgs, img_hgts, img_wdhs, img_wdws, gbxs, gbx_nums = tf.train.batch(
tensors=[img, img_hgt, img_wdh, img_wdw, gbxs, gbx_num], batch_size=self.bat_siz, \
num_threads=self.num_threads, capacity=capacity)
'''
return imgs, img_hgts, img_wdhs, img_wdws, gbxs, gbx_nums
def get_input2(self, sess=None, use_gbx=False):
with tf.device("/cpu:0"):
img_tmp = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3], name="image")
gbxs_tmp = tf.placeholder(dtype=tf.float32, shape=[None, 9], name="gt_boxes")
img, gbxs, img_wdw, im_h, im_w = self.preprocessing(img_tmp, gbxs_tmp)
img = tf.reshape(img, [self.img_siz_max, self.img_siz_max, 3])
img_wdw = tf.reshape(img_wdw, [4])
gbx_num = tf.shape(gbxs)[0]
paddings = [[0, self.max_num-gbx_num], [0, 0]]
gbxs = tf.pad(gbxs, paddings)
gbxs = tf.reshape(gbxs, [self.max_num, 9])
imgs_lst = []
img_nams_lst = []
img_hgts_lst = []
img_wdhs_lst = []
img_wdws_lst = []
gbxs_lst = []
gbx_nums_lst = []
self.get_idx = 0
while True:
try:
#读取图像
img_kep = self.imgs_lst_tst[self.get_idx]
img_nams_lst.append(img_kep.split('/')[-1])
img_kep = cv2.imread(img_kep)
if type(img_kep) != np.ndarray:
print("Failed to find image %s" %(img_kep))
continue
img_hgt, img_wdh = img_kep.shape[0], img_kep.shape[1]
if img_kep.size == img_hgt * img_wdh:
print ('Gray Image %s' % str(self.get_idx))
img_zro = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)
img_zro[:, :, :] = img_kep[:, :, np.newaxis]
img_kep = img_zro
img_kep = img_kep.astype(np.uint8)
assert img_kep.size == img_wdh * img_hgt * 3, '%s' % str(self.get_idx)
img_kep = img_kep[:, :, ::-1]
#读取标签
if use_gbx:
gbxs_kep = self.gbxs_lst_tst[self.get_idx]
else:
gbxs_kep = np.zeros(shape=[1, 9], dtype=np.float32)
img_kep, img_wdw_kep, gbxs_kep, gbx_num_kep = sess.run([img, img_wdw, gbxs, gbx_num], \
feed_dict={img_tmp: img_kep, gbxs_tmp: gbxs_kep})
imgs_lst.append(img_kep)
img_hgts_lst.append(img_hgt)
img_wdhs_lst.append(img_wdh)
img_wdws_lst.append(img_wdw_kep)
gbxs_lst.append(gbxs_kep)
gbx_nums_lst.append(gbx_num_kep)
self.get_idx = self.get_idx + 1
self.get_idx = self.get_idx % self.img_num_tst
if len(imgs_lst) == self.bat_siz:
imgs_lst = np.asarray(imgs_lst, dtype=np.float32) #4维
img_hgts_lst = np.asarray(img_hgts_lst, dtype=np.float32) #1维
img_wdhs_lst = np.asarray(img_wdhs_lst, dtype=np.float32) #1维
img_wdws_lst = np.asarray(img_wdws_lst, dtype=np.float32) #4维
gbxs_lst = np.asarray(gbxs_lst, dtype=np.float32) #3维
gbx_nums_lst = np.asarray(gbx_nums_lst, dtype=np.int32 ) #1维
yield imgs_lst, img_nams_lst, img_hgts_lst, img_wdhs_lst, img_wdws_lst, gbxs_lst, gbx_nums_lst
imgs_lst = []
img_nams_lst = []
img_hgts_lst = []
img_wdhs_lst = []
img_wdws_lst = []
gbxs_lst = []
gbx_nums_lst = []
except Exception as e:
print(e)
import traceback
traceback.print_exc()
continue
def random_colors(self, N, bright=True):
'''
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
'''
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(self, image, mask, color, alpha=0.5):
'''
Apply the given mask to the image.
'''
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] * (1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(self, img=None, boxs=None, box_clss=None, box_scrs=None, boxs_tmp=None, box_msks=None,
img_hgt=None, img_wdh=None, img_wdw=None, title="", figsize=(16, 16), ax=None):
'''
Args:
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
figsize: (optional) the size of the image.
'''
self.img_avg = np.array([123.7, 116.8, 103.9], dtype=np.float32)
# Generate random colors
colors = self.random_colors(9)
# Number of instances
N = boxs.shape[0]
if not N:
print("\n*** No instances to display *** \n")
return
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
# Show area outside image boundaries.
ax.set_ylim(img_hgt + 20, -20)
ax.set_xlim(-20, img_wdh + 20)
#ax.set_ylim(img_hgt, 0)
#ax.set_xlim(0, img_wdh)
ax.axis('off')
ax.set_title(title)
if len(img_wdw) != 0:
hgt_tmp = img_wdw[2] - img_wdw[0] + 1
wdh_tmp = img_wdw[3] - img_wdw[1] + 1
beg = np.array([img_wdw[0], img_wdw[1]], dtype=np.float32)
beg = np.tile(beg, [4])
rat = np.array([img_hgt/hgt_tmp, img_wdh/wdh_tmp], dtype=np.float32)
rat = np.tile(rat, [4])
boxs = boxs - beg
boxs = boxs * rat
boxs = bbox_clip_py2(boxs, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
if len(boxs_tmp) > 0:
boxs_tmp = boxs_tmp.reshape([-1, 4])
beg = np.array([img_wdw[0], img_wdw[1]], dtype=np.float32)
beg = np.tile(beg, [2])
boxs_tmp = boxs_tmp - beg
boxs_tmp = boxs_tmp.reshape([4, -1, 4]) #(ymn, xmn, ymx, xmx)
boxs_tmp = boxs_tmp * img_hgt / img_hgt_tmp
#boxs_tmp = bbox_clip_py(boxs_tmp, [0.0, 0.0, img_hgt-1.0, img_wdh-1.0])
boxs_tmp = np.around(boxs_tmp).astype(np.int32, copy=False)
img_wdw = img_wdw.astype(np.int32, copy=False)
'''
img_zro = np.empty((self.img_siz_max, self.img_siz_max, 3), dtype=np.float32)
img_zro[:, :, :] = box_msks[3][:, :, np.newaxis] * 255.0
img_zro = img_zro[img_wdw[0]:img_wdw[2]+1, img_wdw[1]:img_wdw[3]+1, :] #因为window在原真实图片内
img_zro = np.clip(img_zro, 0.0, 255.0)
img_zro = img_zro.astype(np.uint8)
img_zro = cv2.resize(img_zro, (int(img_wdh), int(img_hgt)), interpolation=cv2.INTER_LINEAR)
ax.imshow(img_zro)
'''
img = img + self.img_avg
if len(box_msks) > 0:
for i in range(4):
color = colors[2*i+2]
for c in range(3):
img[:, :, c] = np.where(box_msks[i]==1.0, 0.5*img[:, :, c] + 0.5*color[c]*255.0, img[:, :, c])
img = img[img_wdw[0]:img_wdw[2]+1, img_wdw[1]:img_wdw[3]+1, :] #因为window在原真实图片内
img = np.clip(img, 0.0, 255.0)
img = img.astype(np.uint8, copy=False)
img = cv2.resize(img, (int(img_wdh), int(img_hgt)), interpolation=cv2.INTER_LINEAR)
ax.imshow(img)
boxs = np.around(boxs).astype(np.int32, copy=False)
boxs = boxs.reshape([-1, 4, 2])[:, :, ::-1]
color = colors[0]
for i in range(N):
'''
y1, x1, y2, x2 = boxs[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="solid",
edgecolor=color, facecolor='none')
ax.add_patch(p)
'''
x1 = boxs[i, 0, 0]
y1 = boxs[i, 0, 1]
p = patches.Polygon(boxs[i], facecolor='none', edgecolor=color, linewidth=2, linestyle='-', fill=True)
ax.add_patch(p)
# Label
box_cls = box_clss[i]
box_cls = int(box_cls)
if box_cls < 0:
box_cls = 0
box_scr = box_scrs[i] if box_scrs is not None else None
box_cls = self.cls_idx_to_cls_nam[box_cls]
caption = "{} {:.3f}".format(box_cls, box_scr) if box_scr else box_cls
ax.text(x1, y1+8, caption, color='k', bbox=dict(facecolor='w', alpha=0.5), size=11, backgroundcolor="none")
if len(boxs_tmp) > 0:
for i in range(4):
color = colors[2*i+1]
boxs_kep = boxs_tmp[i]
for j in range(len(boxs_kep)):
y1, x1, y2, x2 = boxs_kep[j]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="solid",
edgecolor=color, facecolor='none')
'''
p = patches.Circle(((x1+x2)/2, (y1+y2)/2), radius=3, alpha=0.7, linestyle="solid",
edgecolor=color, facecolor=color)
'''
ax.add_patch(p)
plt.show()
plt.close()
def display_detections(imgs, img_hgts, img_wdhs, img_wdws, boxs, box_imxs, box_clss, box_prbs, box_msks):
img_num = len(imgs)
for i in range(img_num):
img = imgs[i]
img_hgt = img_hgts[i]
img_wdh = img_wdhs[i]
img_wdw = img_wdws[i]
idxs = np.where(box_imxs==i)[0]
boxs_img = boxs[idxs]
box_clss_img = box_clss[idxs]
box_prbs_img = box_prbs[idxs]
box_msks_img = box_msks[idxs]
self.display_instances(img, box_msks_img, boxs_img, box_clss_img, box_prbs_img, 1024, \
img_hgt, img_wdh, img_wdw, title="", figsize=(13, 13), ax=None)
'''
def write_image(self):
if boxes is not None:
res_file = os.path.join(FLAGS.output_dir, '{}.txt'.format(os.path.basename(im_fn).split('.')[0]))
with open(res_file, 'w') as f:
for box in boxes:
# to avoid submitting errors
box = sort_poly(box.astype(np.int32))
if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3]-box[0]) < 5:
continue
f.write('{},{},{},{},{},{},{},{}\r\n'.format(
box[0, 0], box[0, 1], box[1, 0], box[1, 1], box[2, 0], box[2, 1], box[3, 0], box[3, 1],
))
cv2.polylines(im[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(255, 255, 0), thickness=1)
if not FLAGS.no_write_images:
img_path = os.path.join(FLAGS.output_dir, os.path.basename(im_fn))
cv2.imwrite(img_path, im[:, :, ::-1])
'''
def get_input_test(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs, img_hgts, img_wdhs, img_wdws, gbxs, gbx_nums = self.get_input()
with tf.Session() as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
imgs_kep, img_hgts_kep, img_wdhs_kep, img_wdws_kep, gbxs_kep, gbx_nums_kep = \
sess.run([imgs, img_hgts, img_wdhs, img_wdws, gbxs, gbx_nums])
for i in range(self.bat_siz):
img = imgs_kep[i]
gbx_num = gbx_nums_kep[i]
boxs = gbxs_kep[i][:gbx_num]
#print(boxs)
img_hgt = img_hgts_kep[i]
img_wdh = img_wdhs_kep[i]
img_wdw = img_wdws_kep[i]
self.display_instances(img, boxs[:, 0:-1], boxs[:, -1], None, [], [],
img_hgt, img_wdh, img_wdw, title="", figsize=(12, 12), ax=None)
coord.request_stop()
coord.join(threads)
def get_input_test2(self):
tf.reset_default_graph()
with tf.Session() as sess:
imgs, img_nams, img_hgts, img_wdhs, img_wdws, gbxs, gbx_nums = next(self.get_input2(sess))
for i in range(self.bat_siz):
img = imgs[i]
img_nam = img_nams[i]
gbx_num = gbx_nums[i]
boxs = gbxs[i][:gbx_num]
#print(boxs)
img_hgt = img_hgts[i]
img_wdh = img_wdhs[i]
img_wdw = img_wdws[i]
print(img_nam)
self.display_instances(img, boxs[:, 0:-1], boxs[:, -1], None, [], [],
img_hgt, img_wdh, img_wdw, title="", figsize=(12, 12), ax=None)
def get_input_test3(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
imgs, img_hgts, img_wdhs, img_wdws, gbxs, gbx_nums = self.get_input()
PT = ProposalsTargetLayer(img_shp=[self.img_siz_max, self.img_siz_max])
gbxs_tmp, gbx_msks, gbx_nums_tmp = PT.generate_gbxs(gbxs, gbx_nums) #(img_num, 4, -1, 5)/(img_num, 4, H, W)/(img_num, 4)
with tf.Session() as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
imgs_kep, img_hgts_kep, img_wdhs_kep, img_wdws_kep, \
gbxs_kep, gbx_nums_kep, gbxs_tmp_kep, gbx_msks_kep, gbx_nums_tmp_kep = \
sess.run([imgs, img_hgts, img_wdhs, img_wdws, gbxs, gbx_nums, gbxs_tmp, gbx_msks, gbx_nums_tmp])
#print(gbxs_tmp_kep.shape)
#print(gbx_msks_kep.shape)
#print(gbx_nums_tmp_kep)
for i in range(self.bat_siz):
img = imgs_kep[i]
gbx_num = gbx_nums_kep[i]
boxs = gbxs_kep[i][:gbx_num]
gbx_num_tmp = gbx_nums_tmp_kep[i][0]
boxs_tmp = gbxs_tmp_kep[i][:, :gbx_num_tmp, :] #(4, -1, 5)
box_msks = gbx_msks_kep[i] #(4, H, W)
#print(boxs)
img_hgt = img_hgts_kep[i]
img_wdh = img_wdhs_kep[i]
img_wdw = img_wdws_kep[i]
self.display_instances(img, boxs[:, 0:-1], boxs[:, -1], None, boxs_tmp[:, :, :-1], box_msks,
img_hgt, img_wdh, img_wdw, title="", figsize=(12, 12), ax=None)
coord.request_stop()
coord.join(threads)
#from .bboxes_target_layer import generate_bboxes_pre_py
#from Mybase.leye_utils.keys import charset
class GeneratorForICDAR(object):
def __init__(self, fil_nam='train', bat_siz=3, min_after_dequeue=3):
self.mod_tra = False
self.use_pad = False
self.img_siz_min = 400
self.img_siz_max = 512
self.box_siz_min = 0.001
############for crop###########
self.min_object_covered = 0.6
self.aspect_ratio_range = (0.9, 1.1)
self.area_range = (0.3, 1.0)
self.max_attempts = 200
self.fil_nam = fil_nam
self.max_num1 = 100
self.max_num2 = 300
self.bat_siz = bat_siz
self.min_after_dequeue = min_after_dequeue
self.num_threads = 16
self.cls_nams = ['__background__', 'text']
self.cat_id_to_cls_name = dict(zip(range(len(self.cls_nams)), self.cls_nams))
self.cls_name_to_cat_id = dict(zip(self.cls_nams, range(len(self.cls_nams))))
self.enc_maps = {}
self.dec_maps = {}
for i, cha in enumerate(charset, 0):
self.enc_maps[cha] = i
self.dec_maps[i] = cha
def make_input(self, num_per_shard=300):
#image_path_list record_path
img_pat_lst = ["Mybase/datasets/ali/image_1000",
"Mybase/datasets/ali/image_9000"]
rcd_pat = "Mybase/tfrecords"
img_lst = []
for img_pat in img_pat_lst:
for ext in ['jpg', 'png', 'jpeg', 'JPG']:
img_lst.extend(glob.glob(os.path.join(img_pat, '*.{}'.format(ext))))
np.random.shuffle(img_lst)
print(len(img_lst))
gdt_lst = []
for img in img_lst:
img_ext = img.split('.')[-1]
img_bas = img.split('.')[:-1]
gdt = ".".join(img_bas+["txt"])
gdt_lst.append(gdt)
print(len(gdt_lst))
gbxs_lst = []
gbx_lbls_lst = []
gbx_clss_lst = []
bad_chas_set = []
for gdt in gdt_lst:
with open(gdt) as f:
anns = [x.strip().strip('\ufeff').strip('\xef\xbb\xbf').strip('\ue76c') for x in f.readlines()]
anns = [x.split(',') for x in anns]
gbxs = [x[0:8] for x in anns]
gbxs = [list(map(float, x)) for x in gbxs]
gbxs = np.asarray(gbxs, dtype=np.float32).reshape(-1, 4, 2)[:, :, ::-1]
gbx_lbls = [x[8] for x in anns]
bad_chas = [y for x in gbx_lbls for y in list(x) if y not in charset]
bad_chas_set.extend(bad_chas)
bad_chas_set = list(set(bad_chas_set))
bad_chas_set = "".join(bad_chas_set)
print(bad_chas_set)
return bad_chas_set
'''
elif "ICDAR2015/" in gt:
ann = [x.split(',') for x in ann]
gt_boxes = [x[0: 8] for x in ann]
gt_labls = [x[8] for x in ann]
gt_boxes = [list(map(float, x)) for x in gt_boxes]
gt_boxes = np.asarray(gt_boxes, dtype=np.float32).reshape(-1, 4, 2)[:, :, ::-1]
gt_labls = np.asarray(gt_labls, dtype=np.str)
gt_clses = []
for lbl in gt_labls:
if lbl == '*' or lbl == '###':
gt_clses.append(-1.0)
else:
gt_clses.append(1.0)
gt_clses = np.asarray(gt_clses, dtype=np.float32)
gt_boxes = [[x[0], x[1], x[2], x[1], x[2], x[3], x[0], x[3]] for x in ann]
gt_labls = [x[4].strip('\"') for x in ann]
gt_boxes = [list(map(float, x)) for x in gt_boxes]
gt_boxes = np.asarray(gt_boxes, dtype=np.float32).reshape(-1, 4, 2)[:, :, ::-1]
gt_labls = np.asarray(gt_labls, dtype=np.str)
gt_clses = []
for lbl in gt_labls:
if lbl == '*' or lbl == '###':
gt_clses.append(-1.0)
else:
gt_clses.append(1.0)
gt_clses = np.asarray(gt_clses, dtype=np.float32)
else:
print("Invalid dataset!")
area = bbox_area_py2(gt_boxes.reshape(-1, 8))>0
if np.sum(area>0) != gt_boxes.shape[0]:
print("gt_boxes wrong!")
print(gt)
print(area)
print(gt_boxes)
gt_boxes_list.append(gt_boxes)
gt_labls_list.append(gt_labls)
gt_clses_list.append(gt_clses)
'''
'''
print('The dataset has a total of %d images' %(len(im_list)))
num_shards = int(len(im_list) / num_per_shard)
if num_shards == 0:
num_shards = 1
num_per_shard = len(im_list)
else:
num_per_shard = int(math.ceil(len(im_list) / float(num_shards)))
for shard_id in range(num_shards):
output_filename = 'icdar_%05d-of-%05d.tfrecord' % (shard_id, num_shards)
record_filename = os.path.join(record_path, output_filename)
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
with tf.python_io.TFRecordWriter(record_filename, options=options) as writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id + 1) * num_per_shard, len(im_list))
for i in range(start_ndx, end_ndx):
if i % 50 == 0:
print("Converting image %d/%d shard %d" % (i + 1, len(im_list), shard_id))
img = cv2.imread(im_list[i])
if type(img) != np.ndarray:
print("Failed to find image %s" %(img_ind))
continue
height, width = img.shape[0], img.shape[1]
if img.size == height * width:
print ('Gray Image %s' % str(img_ind))
im = np.empty((height, width, 3), dtype=np.uint8)
im[:, :, :] = img[:, :, np.newaxis]
img = im
assert img.size == width * height * 3, '%d' % i
img = img.astype(np.uint8)
gt_boxes = gt_boxes_list[i]
gt_labls = gt_labls_list[i]
gt_clses = gt_clses_list[i]
if len(gt_boxes) == 0:
print("No gt_boxes!")
continue
gt_boxes = gt_boxes / np.array([height-1, width-1], dtype=np.float32)
gt_boxes = gt_boxes.reshape(-1, 8)
gt_boxes = np.concatenate([gt_boxes, gt_clses[:, np.newaxis]], axis=-1)
gt_boxes = gt_boxes.astype(dtype=np.float32, copy=False)
img_raw = img.tostring()
gt_boxes_raw = gt_boxes.tostring()
gt_labls_raw = gt_labls.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image/image': _bytes_feature(img_raw),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'label/gt_num': _int64_feature(gt_boxes.shape[0]), # N
'label/gt_boxes': _bytes_feature(gt_boxes_raw), # of shape (N, 9), (p0, p1, p2, p3, classid)
'label/gt_labls': _bytes_feature(gt_labls_raw)
}))
writer.write(example.SerializeToString())
'''
def resize_image_with_pad(self, image=None, boxes=None, clses=None):
#####################按最短边进行比例缩放######################
im_h = tf.cast(tf.shape(image)[0], dtype=tf.float32)
im_w = tf.cast(tf.shape(image)[1], dtype=tf.float32)
l_min = tf.minimum(im_w, im_h)
l_max = tf.maximum(im_w, im_h)
rat = tf.minimum(self.img_siz_min/l_min, self.img_siz_max/l_max)
im_h = tf.cast(im_h*rat, dtype=tf.int32)
im_w = tf.cast(im_w*rat, dtype=tf.int32)
image = tf.image.resize_images(image, [im_h, im_w], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
image = tf.image.resize_image_with_crop_or_pad(image, self.img_siz_max, self.img_siz_max)
################如果最长边过长则按中心对称进行裁剪################
im_h = tf.cast(im_h, dtype=tf.float32)
im_w = tf.cast(im_w, dtype=tf.float32)
pad_h_all = tf.cast(self.img_siz_max-im_h, dtype=tf.float32)
pad_w_all = tf.cast(self.img_siz_max-im_w, dtype=tf.float32)
pad_h_fnt = tf.round(pad_h_all/2.0)
pad_w_fnt = tf.round(pad_w_all/2.0)
window = tf.stack([pad_h_fnt, pad_w_fnt, pad_h_fnt+im_h-1, pad_w_fnt+im_w-1], axis=0) #该边框在原真实图片内
window = window / (self.img_siz_max-1)
box_tmp = tf.stack([im_h-1, im_w-1])
box_tmp = tf.tile(box_tmp, [4])
boxes = boxes * box_tmp
box_tmp = tf.stack([pad_h_fnt, pad_w_fnt])
box_tmp = tf.tile(box_tmp, [4])
boxes = boxes + box_tmp
box_tmp = tf.constant([0, 0, self.img_siz_max-1, self.img_siz_max-1], dtype=tf.float32)
boxes = bbox_clip2(boxes, box_tmp)
box_tmp = tf.constant(self.img_siz_max-1, dtype=tf.float32, shape=[8])
boxes = boxes / box_tmp
box_edgs = bbox_edges2(boxes)
vld_inds = tf.where(tf.equal(tf.reduce_sum(tf.cast(box_edgs>self.box_siz_min, tf.int32), axis=1), 4))
boxes = tf.gather_nd(boxes, vld_inds)
clses = tf.gather_nd(clses, vld_inds)
clses = tf.expand_dims(clses, axis=-1)
gt_boxes = tf.concat([boxes, clses], axis=-1)
gt_boxes = tf.cast(gt_boxes, dtype=tf.float32)
return image, gt_boxes, window
def distort_crop(self, image=None, gt_boxes=None):
im_h = tf.cast(tf.shape(image)[0], dtype=tf.float32)
im_w = tf.cast(tf.shape(image)[1], dtype=tf.float32)
boxes = gt_boxes[:, 0:8]
clses = gt_boxes[:, 8]
########################crop the image randomly########################
no_crowd_id = tf.where(clses>0)
bbox_tmp = tf.gather_nd(boxes, no_crowd_id)
bbox_tmp = bbox_bound2(bbox_tmp)
bbox_begin, bbox_size, distort_bbox = \
tf.image.sample_distorted_bounding_box(tf.shape(image), bounding_boxes=tf.expand_dims(bbox_tmp, 0), \
min_object_covered=self.min_object_covered, \
aspect_ratio_range=self.aspect_ratio_range, \
area_range=self.area_range, max_attempts=self.max_attempts, \
use_image_if_no_bounding_boxes=True)
distort_bbox = distort_bbox[0, 0] #(batch, N, 4)
image = tf.slice(image, bbox_begin, bbox_size)
beg = tf.stack([distort_bbox[0], distort_bbox[1]])
beg = tf.tile(beg, [4])
leh = tf.stack([distort_bbox[2]-distort_bbox[0], distort_bbox[3]-distort_bbox[1]])
leh = tf.tile(leh, [4])
boxes = boxes - beg
boxes = boxes / leh
boxes = bbox_clip2(boxes, [0.0, 0.0, 1.0, 1.0])
box_edgs = bbox_edges2(boxes)
vld_inds = tf.where(tf.equal(tf.reduce_sum(tf.cast(box_edgs>self.box_siz_min, tf.int32), axis=1), 4))
boxes = tf.gather_nd(boxes, vld_inds)
clses = tf.gather_nd(clses, vld_inds)
########################resize image to the expected size with paddings########################
image, gt_boxes, window = self.resize_image_with_pad(image, boxes, clses)
return image, gt_boxes, window
def preprocessing(self, image=None, gt_boxes=None):
self.img_avg = tf.constant([103.939, 116.779, 123.68], dtype=tf.float32)
self.img_avg = self.img_avg / 255.0
####################归化到0、1之间######################
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if self.mod_tra == True:
#####################光学畸变###########################
# Randomly distort the colors. There are 4 ways to do it.
distorted_image = apply_with_random_selector(image, lambda x, order: distort_color(x, order), num_cases=4)
#####################随机裁剪###########################
distorted_image, distorted_gt_boxes, distorted_window = self.distort_crop(distorted_image, gt_boxes)
'''
boxes = gt_boxes[:, 0:8]
clses = gt_boxes[:, 8]
distorted_image, distorted_gt_boxes, distorted_window = self.resize_image_with_pad(distorted_image, boxes, clses)
'''
#####################随机翻转##########################
signal = tf.random.uniform([2])
#####################随机左右翻转#######################
#distorted_image = tf.image.random_flip_left_right(distorted_image)
image_left_right = tf.image.flip_left_right(distorted_image)
gt_boxes_left_right = tf.stack([distorted_gt_boxes[:, 0], 1.0-distorted_gt_boxes[:, 3], \
distorted_gt_boxes[:, 2], 1.0-distorted_gt_boxes[:, 1], \
distorted_gt_boxes[:, 4], 1.0-distorted_gt_boxes[:, 7], \
distorted_gt_boxes[:, 6], 1.0-distorted_gt_boxes[:, 5], \
distorted_gt_boxes[:, 8]], axis=-1)
window_left_right = tf.stack([distorted_window[0], 1.0-distorted_window[3], \
distorted_window[2], 1.0-distorted_window[1]], axis=-1)
distorted_image = tf.cond(signal[0]<0.5, lambda: image_left_right, lambda: distorted_image)
distorted_gt_boxes = tf.cond(signal[0]<0.5, lambda: gt_boxes_left_right, lambda: distorted_gt_boxes)
distorted_window = tf.cond(signal[0]<0.5, lambda: window_left_right, lambda: distorted_window)
#distorted_image = tf.image.per_image_standardization(image)
distorted_image = distorted_image - self.img_avg
return distorted_image, distorted_gt_boxes, distorted_window
else:
boxes = gt_boxes[:, 0:8]
clses = gt_boxes[:, 8]
image, gt_boxes, window = self.resize_image_with_pad(image, boxes, clses)
image = image - self.img_avg
return image, gt_boxes, window
def get_input(self):
#创建文件列表,并通过文件列表创建输入文件队列。
#在调用输入数据处理流程前,需要统一所有原始数据的格式并将它们存储到TFRecord文件中
#文件列表应该包含所有提供训练数据的TFRecord文件
filename = os.path.join("Mybase/tfrecords", self.filename, "*.tfrecord")
files = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(files, shuffle = True, capacity=1000)
#解析TFRecord文件里的数据
options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
reader = tf.TFRecordReader(options=options)
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features = {
'image/image': tf.FixedLenFeature([], tf.string),
'image/height': tf.FixedLenFeature([], tf.int64),
'image/width': tf.FixedLenFeature([], tf.int64),
'label/gt_num': tf.FixedLenFeature([], tf.int64),
'label/gt_boxes': tf.FixedLenFeature([], tf.string),
'label/gt_labls': tf.FixedLenFeature([], tf.string),
}
)
height = tf.cast(features['image/height'], tf.int32)
width = tf.cast(features['image/width'], tf.int32)
gt_num = tf.cast(features['label/gt_num'], tf.int32)
image = tf.decode_raw(features['image/image'], tf.uint8)
gt_boxes = tf.decode_raw(features['label/gt_boxes'], tf.float32)
gt_labls = features['label/gt_labls']
image = tf.reshape(image, [height, width, 3])
gt_boxes = tf.reshape(gt_boxes, [gt_num, 9])
image, gt_boxes, window = self.preprocessing(image, gt_boxes)
image = tf.reshape(image, [self.img_siz_max, self.img_siz_max, 3])
window = tf.reshape(window, [4])
gt_num = tf.shape(gt_boxes)[0]
paddings = [[0, self.max_num-gt_num], [0, 0]]
gt_boxes = tf.pad(gt_boxes, paddings)
gt_boxes = tf.reshape(gt_boxes, [self.max_num, 9])
capacity = self.min_after_dequeue + 3 * self.batch_size
#tf.train.shuffle_batch_join
images, heights, widths, gt_boxes, gt_nums, windows = tf.train.shuffle_batch(
tensors=[image, height, width, gt_boxes, gt_num, window], batch_size=self.batch_size, \
num_threads=self.num_threads, capacity=capacity, min_after_dequeue=self.min_after_dequeue)
'''
boxes = []
for i in range(batch_size):
boxes.append(gt_boxes[i][:gt_nums[i]])
'''
return images, heights, widths, gt_boxes, gt_nums, windows
def random_colors(self, N, bright=True):
'''
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
'''
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(self, image, mask, color, alpha=0.5):
'''
Apply the given mask to the image.
'''
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] * (1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(self, image=None, boxes=None, cls_ids=None, scores=None,
height=None, width=None, window=None, title="", figsize=(16, 16), ax=None):
'''
Args:
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
figsize: (optional) the size of the image.
'''
self.img_avg = np.array([103.939, 116.779, 123.68], dtype=np.float32)
self.img_avg = self.img_avg / 255.0
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
return
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = self.random_colors(1)
beg = np.array([window[0], window[1]], dtype=np.float32)
beg = np.tile(beg, [4])
leh = np.array([window[2]-window[0], window[3]-window[1]], dtype=np.float32)
leh = np.tile(leh, [4])
boxes = (boxes - beg) / leh
boxes = bbox_clip_py2(boxes, [0.0, 0.0, 1.0, 1.0])
box_tmp = np.array([height-1, width-1], dtype=np.float32)
box_tmp = np.tile(box_tmp, [4])
boxes = boxes * box_tmp
boxes = np.around(boxes).astype(np.int32, copy=False)
boxes = boxes.reshape([-1, 4, 2])[:, :, ::-1]
window = (window * (self.img_siz_max-1)).astype(dtype=np.int32)
image = image[window[0]:window[2]+1, window[1]:window[3]+1, :] #因为window在原真实图片内
image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR)
# Show area outside image boundaries.
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
#image = image * 255
#image = image.astype(np.float32, copy=False)
image = image + self.img_avg
image = np.clip(image, 0.0, 1.0)
for i in range(N):
color = colors[0]
'''
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="solid",
edgecolor=color, facecolor='none')
'''
x1 = boxes[i, 0, 0]
y1 = boxes[i, 0, 1]
p = patches.Polygon(boxes[i], facecolor='none', edgecolor=color, linewidth=2, linestyle='-', fill=True)
ax.add_patch(p)
# Label
cls_id = cls_ids[i]
if cls_id < 0:
cls_id *= -1
scor = scores[i] if scores is not None else None
clss = self.cat_id_to_cls_name[cls_id]
caption = "{} {:.3f}".format(clss, scor) if scor else clss
ax.text(x1-15, y1-8, caption, color='w', size=11, backgroundcolor="none")
ax.imshow(image)
plt.show()
plt.close()
def display_detections(images, heights, widths, windows, bboxes, bbox_imid, bbox_clss, bbox_prbs, bbox_msks):
img_num = len(images)
for i in range(img_num):
image = images[i]
height = heights[i]
width = widths[i]
window = windows[i]
inds = np.where(bbox_imid==i)[0]
bboxes_img = bboxes[inds]
bbox_clss_img = bbox_clss[inds]
bbox_prbs_img = bbox_prbs[inds]
bbox_msks_img = bbox_msks[inds]
display_instances(image, bbox_msks_img, bboxes_img, bbox_clss_img, bbox_prbs_img, 1024, \
height, width, window, title="", figsize=(13, 13), ax=None)
def write_image(self):
'''
if boxes is not None:
res_file = os.path.join(FLAGS.output_dir, '{}.txt'.format(os.path.basename(im_fn).split('.')[0]))
with open(res_file, 'w') as f:
for box in boxes:
# to avoid submitting errors
box = sort_poly(box.astype(np.int32))
if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3]-box[0]) < 5:
continue
f.write('{},{},{},{},{},{},{},{}\r\n'.format(
box[0, 0], box[0, 1], box[1, 0], box[1, 1], box[2, 0], box[2, 1], box[3, 0], box[3, 1],
))
cv2.polylines(im[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(255, 255, 0), thickness=1)
if not FLAGS.no_write_images:
img_path = os.path.join(FLAGS.output_dir, os.path.basename(im_fn))
cv2.imwrite(img_path, im[:, :, ::-1])
'''
def get_input_test(self):
tf.reset_default_graph()
with tf.device("/cpu:0"):
images, heights, widths, gt_boxes, gt_nums, windows = self.get_input()
with tf.Session() as sess:
init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
images, heights, widths, gt_boxes, gt_nums, windows \
= sess.run([images, heights, widths, gt_boxes, gt_nums, windows])
#print(images.shape)
#print(heights)
#print(widths)
#print(gt_boxes)
#print(gt_nums)
#print(windows)
for i in range(self.batch_size):
image = images[i]
boxes = gt_boxes[i]
height = heights[i]
width = widths[i]
window = windows[i]
#boxid = np.zeros(shape=[boxes.shape[0]], dtype=np.int32)
#probs = np.ones(shape=[boxes.shape[0]], dtype=np.float32)
#stats = generate_bboxes_pre_py(boxes[:, 0:4], boxid, boxes[:, 4], probs, masks, \
# [imid], [window], [height], [width])
#print(stats)
self.display_instances(image, boxes[:, 0:8], boxes[:, 8], None,
height, width, window, title="", figsize=(12, 12), ax=None)
coord.request_stop()
coord.join(threads)
"""
"""
class GeneratorForCRNN(object):
def __init__(self, batch_size=32, min_len=4, max_len=10, box_h=32, pool_scale=4):
#from .keys import alphabet
self.batch_size = batch_size
self.min_len = min_len
self.max_len = max_len
self.box_h = box_h
self.pool_scale = pool_scale
#self.charset = alphabet[:]
self.charset = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
#self.font = '/home/ziyechen/MyCTPN_CRNN_CTC/Mybase/ctpn_crnn_ctc_utils/fonts/simsun.ttf'
self.num_classes = len(self.charset) + 2
self.encode_maps = {}
self.decode_maps = {}
for i, char in enumerate(self.charset, 1):
self.encode_maps[char] = i
self.decode_maps[i] = char
self.encode_maps[' '] = 0
self.decode_maps[0] = ' '
def randRGB(self):
return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def gen_rand(self):
buf = ""
max_len = random.randint(self.min_len, self.max_len)
for i in range(max_len):
buf += random.choice(self.charset)
buf = list(buf)
bnk_loc = random.randint(1, max_len-1)
buf[bnk_loc] = ' '
buf = "".join(buf)
return buf
def generateImg(self):
#captcha = ImageCaptcha(fonts=[self.font])
#if not os.path.exists(self.font):
# print('cannot open the font')
captcha = ImageCaptcha(width=560, height=70)
label = self.gen_rand()
image = captcha.generate_image(label)
return np.array(image), label
def groupBatch(self, images, labels):
max_w = 0
imgs_keep = []
slns_keep = [] #seq_lengths
indices = []
values = []
for img in range(len(images)):
label = labels[img]
label = [self.encode_maps[c] for c in list(label)]
indices.extend(zip([img]*len(label), [i for i in range(len(label))]))
values.extend(label)
image = images[img]
img_h, img_w = image.shape[:2]
ratio = self.box_h / img_h
box_w = int(ratio * img_w)
max_w = max(max_w, box_w)
image = cv2.resize(image, (box_w, self.box_h), interpolation=cv2.INTER_LINEAR)
box_w = len(label) * 13 #!!!!!!!!根据验证码产生的特殊情况而设定
#box_w = 256 #!!!!!!!!根据验证码产生的特殊情况而设定
slen = int(box_w/self.pool_scale) - 1
imgs_keep.append(image)
slns_keep.append(slen)
imgs_keep = np.stack(imgs_keep, axis=0)
slns_keep = np.stack(slns_keep, axis=0)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=np.int32)
shape = np.asarray([len(labels), np.asarray(indices).max(axis=0)[1]+1], dtype=np.int64)
labels = (indices, values, shape)
max_w = math.ceil(max_w/self.pool_scale) * self.pool_scale
#for img in range(len(images)):
# image = imgs_keep[img]
# paddings = [[0, 0], [0, max_w-box_w], [0, 0]]
# image = np.pad(image, paddings, 'constant', constant_values=0)
# imgs_keep[img] = image
#imgs_keep = np.stack(imgs_keep, axis=0)
return imgs_keep, labels, slns_keep, max_w
def generate(self):
images = []
labels = []
while True:
try:
image, label = self.generateImg()
#if cfg.NCHANNELS == 1: im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
images.append(image)
labels.append(label)
if len(images) == self.batch_size:
images, lables, slens, max_w = self.groupBatch(images, labels)
#string = self.decode(lables)
#print(string)
yield images, lables, slens, max_w
images = []
labels = []
except Exception as e:
print(e)
import traceback
traceback.print_exc()
continue
def decode(self, decoded=None):
decoded_indices = decoded[0]
decoded_values = decoded[1]
decoded_shape = decoded[2]
decoded_strs = []
for img in range(decoded_shape[0]):
inds = np.where(decoded_indices[:, 0]==img)
decoded_dats_img = decoded_values[inds]
decoded_strs_img = [self.decode_maps[i] for i in decoded_dats_img]
decoded_strs_img = "".join(decoded_strs_img)
decoded_strs.append(decoded_strs_img)
return decoded_strs
def evaluate(self, decoded=None, labels=None):
decoded_strs = self.decode(decoded)
labels_strs = self.decode(labels)
#print("############")
#print(decoded_strs)
#print("!!!!!!!!!!!!")
#print(labels_strs)
assert len(decoded_strs) == len(labels_strs), "The number of the decoded and the number of the labels are mismatched"
acc = []
for s in range(len(decoded_strs)):
if decoded_strs[s] == labels_strs[s]:
acc.append(1.0)
else:
acc.append(0.0)
mean_acc = np.mean(acc)
return mean_acc
"""
| 51.012323
| 133
| 0.521836
| 28,145
| 219,404
| 3.823343
| 0.039758
| 0.014107
| 0.011523
| 0.008921
| 0.835198
| 0.807607
| 0.786057
| 0.768112
| 0.752946
| 0.740651
| 0
| 0.031622
| 0.347359
| 219,404
| 4,301
| 134
| 51.012323
| 0.719873
| 0.060778
| 0
| 0.808551
| 0
| 0
| 0.049259
| 0.004582
| 0
| 0
| 0
| 0
| 0.007126
| 1
| 0.034679
| false
| 0
| 0.010926
| 0.001425
| 0.073634
| 0.013302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
821971faf8232d9bf2263c241f9c83831e28b865
| 89
|
py
|
Python
|
src/urlshortening/tests/__init__.py
|
Balkeerat/django-url-shortening
|
1a5d1671298f6a556f99a079fdc3910adf687cde
|
[
"BSD-3-Clause"
] | 8
|
2017-10-05T12:30:33.000Z
|
2020-10-07T13:13:59.000Z
|
src/urlshortening/tests/__init__.py
|
Balkeerat/django-url-shortening
|
1a5d1671298f6a556f99a079fdc3910adf687cde
|
[
"BSD-3-Clause"
] | 2
|
2017-09-24T14:05:18.000Z
|
2021-12-18T13:07:47.000Z
|
src/urlshortening/tests/__init__.py
|
Balkeerat/django-url-shortening
|
1a5d1671298f6a556f99a079fdc3910adf687cde
|
[
"BSD-3-Clause"
] | 5
|
2018-01-15T20:59:06.000Z
|
2021-12-18T13:13:15.000Z
|
from urlshortening.tests.test_model import *
from urlshortening.tests.test_view import *
| 29.666667
| 44
| 0.842697
| 12
| 89
| 6.083333
| 0.583333
| 0.465753
| 0.60274
| 0.712329
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089888
| 89
| 2
| 45
| 44.5
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8224265dbee042653fd3165549c423bef73475ea
| 277
|
py
|
Python
|
cattree/__init__.py
|
ndingwall/cattrees
|
4beb87fdf1b6e04e13d015b8b31c2951850e8c29
|
[
"MIT"
] | null | null | null |
cattree/__init__.py
|
ndingwall/cattrees
|
4beb87fdf1b6e04e13d015b8b31c2951850e8c29
|
[
"MIT"
] | null | null | null |
cattree/__init__.py
|
ndingwall/cattrees
|
4beb87fdf1b6e04e13d015b8b31c2951850e8c29
|
[
"MIT"
] | null | null | null |
from .one_hot_decoder import OneHotDecoder
from .tree_models import CategoricalDecisionTreeClassifier
from .tree_models import CategoricalDecisionTreeRegressor
from .tree_models import CategoricalRandomForestClassifier
from .tree_models import CategoricalRandomForestRegressor
| 46.166667
| 58
| 0.909747
| 26
| 277
| 9.461538
| 0.461538
| 0.130081
| 0.227642
| 0.325203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072202
| 277
| 5
| 59
| 55.4
| 0.957198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4135f5e1e4810e0b3842caafddaa255f51bca18d
| 5,069
|
py
|
Python
|
Server/Helper/Repository.py
|
CorneliusTantius/TCON-API-V2
|
e9628df57291af10a824148e6a8edbb48e13c4e5
|
[
"MIT"
] | 1
|
2021-10-05T17:46:46.000Z
|
2021-10-05T17:46:46.000Z
|
Server/Helper/Repository.py
|
CorneliusTantius/TCON-API-V2
|
e9628df57291af10a824148e6a8edbb48e13c4e5
|
[
"MIT"
] | null | null | null |
Server/Helper/Repository.py
|
CorneliusTantius/TCON-API-V2
|
e9628df57291af10a824148e6a8edbb48e13c4e5
|
[
"MIT"
] | null | null | null |
### Package Import ###
from typing import List
from bson import ObjectId
from pydantic import parse, parse_obj_as
from pymongo.message import _query_compressed
### AppCode Import ###
from Server.Helper.MongoHelper import GetMongoClient
from Server.Model import *
from Server.Model.ModelConsultant import Consultant, ConsultantUpdateModel
from Server.Model.ModelUser import User, UserUpdateModel
###############################################################################
class UserRepository():
def __init__(self):
self._client = GetMongoClient('User')
async def Insert(self, item) -> bool:
try:
self._client.insert_one(item)
return True
except:
return False
async def Delete(self, Id:str) -> bool:
if Id:
try:
query = { "$and":[{ "_id" : Id }] }
self._client.delete_one(query)
return True
except:
return False
else:
return False
async def Search(self, query=None, pageSize=None) -> List[User]:
if query:
if pageSize:
items = list(self._client.find(query).limit(pageSize))
else:
items = list(self._client.find(query))
return parse_obj_as(List[User], items)
else:
if pageSize:
items = list(self._client.find().limit(pageSize))
else:
items = list(self._client.find())
return parse_obj_as(List[User], items)
async def SearchOne(self, query=None) -> User:
if query:
item = self._client.find_one(query)
if item != None:
item = User.parse_obj(item)
return item
else:
item = self._client.find_one()
if item != None:
item = User.parse_obj(item)
return item
async def SearchOneId(self, Id:str) -> User:
if Id:
query = { "$and":[{ "_id" : Id }] }
item = self._client.find_one(query)
if item != None:
item = User.parse_obj(item)
return item
async def Update(self, Id:str, Data: UserUpdateModel) -> bool:
try:
dictionarizedData = {k: v for k, v in Data.dict().items() if v is not None}
searchQuery = { "_id": Id }
updateData = { "$set":dictionarizedData }
self._client.update_one(searchQuery, updateData)
return True
except Exception as e:
print(e)
return False
###############################################################################
class ConsultantRepository():
def __init__(self):
self._client = GetMongoClient('Consultant')
async def Insert(self, item) -> bool:
try:
self._client.insert_one(item)
return True
except:
return False
async def Delete(self, Id:str) -> bool:
if Id:
try:
query = { "$and":[{ "_id" : Id }] }
self._client.delete_one(query)
return True
except:
return False
else:
return False
async def Search(self, query=None, pageSize=None) -> List[Consultant]:
if query:
if pageSize:
items = list(self._client.find(query).limit(pageSize))
else:
items = list(self._client.find(query))
return parse_obj_as(List[Consultant], items)
else:
if pageSize:
items = list(self._client.find().limit(pageSize))
else:
items = list(self._client.find())
return parse_obj_as(List[Consultant], items)
async def SearchOne(self, query=None) -> Consultant:
if query:
item = self._client.find_one(query)
if item != None:
item = Consultant.parse_obj(item)
return item
else:
item = self._client.find_one()
if item != None:
item = Consultant.parse_obj(item)
return item
async def SearchOneId(self, Id:str) -> Consultant:
if Id:
query = { "$and":[{ "_id" : Id }] }
item = self._client.find_one(query)
if item != None:
item = Consultant.parse_obj(item)
return item
async def Update(self, Id:str, Data: ConsultantUpdateModel) -> bool:
try:
dictionarizedData = {k: v for k, v in Data.dict().items() if v is not None}
searchQuery = { "_id": Id }
updateData = { "$set":dictionarizedData }
self._client.update_one(searchQuery, updateData)
return True
except Exception as e:
print(e)
return False
###############################################################################
###############################################################################
| 34.958621
| 87
| 0.499112
| 507
| 5,069
| 4.859961
| 0.145957
| 0.089286
| 0.079545
| 0.061688
| 0.813312
| 0.813312
| 0.784903
| 0.74513
| 0.74513
| 0.74513
| 0
| 0
| 0.345828
| 5,069
| 144
| 88
| 35.201389
| 0.743064
| 0.005918
| 0
| 0.84375
| 0
| 0
| 0.011892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015625
| false
| 0
| 0.0625
| 0
| 0.28125
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
418e464302d744287bb85c2fa1cb551a9565e4be
| 116
|
py
|
Python
|
trajnetbaselines/trajnetbaselines/mlp/__init__.py
|
vita-epfl/RRB
|
9099356565c4150d2c53e9a6cfc75bdb792a8929
|
[
"BSD-2-Clause"
] | 37
|
2021-04-11T06:23:19.000Z
|
2022-03-27T22:06:14.000Z
|
trajnetbaselines/trajnetbaselines/mlp/__init__.py
|
swb19/RRB
|
9099356565c4150d2c53e9a6cfc75bdb792a8929
|
[
"BSD-2-Clause"
] | 6
|
2021-03-15T14:54:43.000Z
|
2022-03-04T16:32:16.000Z
|
trajnetbaselines/trajnetbaselines/mlp/__init__.py
|
swb19/RRB
|
9099356565c4150d2c53e9a6cfc75bdb792a8929
|
[
"BSD-2-Clause"
] | 6
|
2021-05-11T08:34:02.000Z
|
2022-03-17T09:48:59.000Z
|
from .mlp import EDN
from .mlp import EDN_M
from .mlp import RRB
from .mlp import RRB_M
# from .utils import *
| 19.333333
| 23
| 0.715517
| 21
| 116
| 3.857143
| 0.333333
| 0.345679
| 0.641975
| 0.395062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224138
| 116
| 5
| 24
| 23.2
| 0.9
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
419fbbe796da3f826ba71efa922a33ab02a28b85
| 6,412
|
py
|
Python
|
django_x509/migrations/0001_initial.py
|
dwang/django-x509
|
0c5e82993a0bafa52650612674212534db5c843e
|
[
"BSD-3-Clause"
] | 1
|
2019-12-22T16:09:30.000Z
|
2019-12-22T16:09:30.000Z
|
django_x509/migrations/0001_initial.py
|
nikitaermishin/django-x509
|
1eb28afba31c6ed0ed04bbc98cc3c98f181dbdcf
|
[
"BSD-3-Clause"
] | null | null | null |
django_x509/migrations/0001_initial.py
|
nikitaermishin/django-x509
|
1eb28afba31c6ed0ed04bbc98cc3c98f181dbdcf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-15 15:36
from __future__ import unicode_literals
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
import model_utils.fields
from django.db import migrations, models
import django_x509.base.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ca',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('notes', models.TextField(blank=True)),
('key_length', models.CharField(blank=True, choices=[(b'', b''), (b'512', b'512'), (b'1024', b'1024'), (b'2048', b'2048'), (b'4096', b'4096')], default=django_x509.base.models.default_key_length, help_text='bits', max_length=6, verbose_name='key length')),
('digest', models.CharField(blank=True, choices=[(b'', b''), (b'sha1', b'SHA1'), (b'sha224', b'SHA224'), (b'sha256', b'SHA256'), (b'sha384', b'SHA384'), (b'sha512', b'SHA512')], default=django_x509.base.models.default_digest_algorithm, help_text='bits', max_length=8, verbose_name='digest algorithm')),
('validity_start', models.DateTimeField(blank=True, default=django_x509.base.models.default_validity_start, null=True)),
('validity_end', models.DateTimeField(blank=True, default=django_x509.base.models.default_ca_validity_end, null=True)),
('country_code', models.CharField(blank=True, max_length=2)),
('state', models.CharField(blank=True, max_length=64, verbose_name='state or province')),
('city', models.CharField(blank=True, max_length=64, verbose_name='city')),
('organization', models.CharField(blank=True, max_length=64, verbose_name='organization')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('common_name', models.CharField(blank=True, max_length=63, verbose_name='common name')),
('extensions', jsonfield.fields.JSONField(blank=True, default=list, help_text='additional x509 certificate extensions', verbose_name='extensions')),
('serial_number', models.PositiveIntegerField(blank=True, help_text='leave blank to determine automatically', null=True, verbose_name='serial number')),
('public_key', models.TextField(blank=True, help_text=b'certificate in X.509 PEM format')),
('private_key', models.TextField(blank=True, help_text=b'private key in X.509 PEM format')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
],
options={
'verbose_name': 'CA',
'verbose_name_plural': 'CAs',
},
),
migrations.CreateModel(
name='Cert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('notes', models.TextField(blank=True)),
('key_length', models.CharField(blank=True, choices=[(b'', b''), (b'512', b'512'), (b'1024', b'1024'), (b'2048', b'2048'), (b'4096', b'4096')], default=django_x509.base.models.default_key_length, help_text='bits', max_length=6, verbose_name='key length')),
('digest', models.CharField(blank=True, choices=[(b'', b''), (b'sha1', b'SHA1'), (b'sha224', b'SHA224'), (b'sha256', b'SHA256'), (b'sha384', b'SHA384'), (b'sha512', b'SHA512')], default=django_x509.base.models.default_digest_algorithm, help_text='bits', max_length=8, verbose_name='digest algorithm')),
('validity_start', models.DateTimeField(blank=True, default=django_x509.base.models.default_validity_start, null=True)),
('validity_end', models.DateTimeField(blank=True, default=django_x509.base.models.default_cert_validity_end, null=True)),
('country_code', models.CharField(blank=True, max_length=2)),
('state', models.CharField(blank=True, max_length=64, verbose_name='state or province')),
('city', models.CharField(blank=True, max_length=64, verbose_name='city')),
('organization', models.CharField(blank=True, max_length=64, verbose_name='organization')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('common_name', models.CharField(blank=True, max_length=63, verbose_name='common name')),
('extensions', jsonfield.fields.JSONField(blank=True, default=list, help_text='additional x509 certificate extensions', verbose_name='extensions')),
('serial_number', models.PositiveIntegerField(blank=True, help_text='leave blank to determine automatically', null=True, verbose_name='serial number')),
('public_key', models.TextField(blank=True, help_text=b'certificate in X.509 PEM format')),
('private_key', models.TextField(blank=True, help_text=b'private key in X.509 PEM format')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('revoked', models.BooleanField(default=False, verbose_name='revoked')),
('revoked_at', models.DateTimeField(blank=True, default=None, null=True, verbose_name='revoked at')),
('ca', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_x509.Ca', verbose_name='CA')),
],
options={
'verbose_name': 'certificate',
'verbose_name_plural': 'certificates',
},
),
migrations.AlterUniqueTogether(
name='cert',
unique_together=set([('ca', 'serial_number')]),
),
]
| 74.55814
| 318
| 0.645508
| 756
| 6,412
| 5.309524
| 0.17328
| 0.084953
| 0.069756
| 0.083707
| 0.824863
| 0.816144
| 0.816144
| 0.816144
| 0.816144
| 0.816144
| 0
| 0.04042
| 0.197442
| 6,412
| 85
| 319
| 75.435294
| 0.739604
| 0.010449
| 0
| 0.618421
| 1
| 0
| 0.189057
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.092105
| 0
| 0.144737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
68f46192c919ebefd2404a5721b60e6702e861f0
| 120
|
py
|
Python
|
discord/utils.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
discord/utils.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
discord/utils.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
from disnake.utils import *
from disnake.utils import __dict__ as __original_dict__
locals().update(__original_dict__)
| 24
| 55
| 0.833333
| 16
| 120
| 5.375
| 0.5625
| 0.255814
| 0.372093
| 0.511628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 120
| 4
| 56
| 30
| 0.796296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
68feb9909b529399e26fed0192533be12fce058d
| 186
|
py
|
Python
|
beep.py
|
Kazutaka333/behavioral_cloning
|
a0360783e613fc9e3a7092dade51c18df6583319
|
[
"MIT"
] | null | null | null |
beep.py
|
Kazutaka333/behavioral_cloning
|
a0360783e613fc9e3a7092dade51c18df6583319
|
[
"MIT"
] | null | null | null |
beep.py
|
Kazutaka333/behavioral_cloning
|
a0360783e613fc9e3a7092dade51c18df6583319
|
[
"MIT"
] | null | null | null |
import subprocess
subprocess.call(['echo', '-en', "\007"])
subprocess.call(['echo', '-en', "\007"])
subprocess.call(['echo', '-en', "\007"])
subprocess.call(['echo', '-en', "\007"])
| 31
| 41
| 0.580645
| 22
| 186
| 4.909091
| 0.272727
| 0.518519
| 0.666667
| 0.740741
| 0.851852
| 0.851852
| 0.851852
| 0.851852
| 0.851852
| 0.851852
| 0
| 0.071429
| 0.096774
| 186
| 5
| 42
| 37.2
| 0.571429
| 0
| 0
| 0.8
| 0
| 0
| 0.236559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
ec2e2d62e45f4a00948110b1f2fa420fb1119a46
| 2,044
|
py
|
Python
|
scattergun/scattergun_coreapp/migrations/0003_auto_20160311_0255.py
|
Team4761/Scattergun
|
73a959daf9cec625267b8cda2895968ac3b94aea
|
[
"MIT"
] | null | null | null |
scattergun/scattergun_coreapp/migrations/0003_auto_20160311_0255.py
|
Team4761/Scattergun
|
73a959daf9cec625267b8cda2895968ac3b94aea
|
[
"MIT"
] | 2
|
2016-03-06T20:13:01.000Z
|
2016-03-06T20:18:54.000Z
|
scattergun/scattergun_coreapp/migrations/0003_auto_20160311_0255.py
|
Team4761/Scattergun
|
73a959daf9cec625267b8cda2895968ac3b94aea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-11 02:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scattergun_coreapp', '0002_auto_20160311_0241'),
]
operations = [
migrations.AlterField(
model_name='roundreport',
name='a_defense',
field=models.CharField(blank=True, choices=[('Portcullis', 'Portcullis'), ('Cheval de Frise', 'Cheval de Frise'), ('Ramparts', 'Ramparts'), ('Moats', 'Moats'), ('Drawbridge', 'Drawbridge'), ('Sally Port', 'Sally Port'), ('Rock Wall', 'Rock Wall'), ('Rough Terrain', 'Rough Terrain')], max_length=15, null=True),
),
migrations.AlterField(
model_name='roundreport',
name='b_defense',
field=models.CharField(blank=True, choices=[('Portcullis', 'Portcullis'), ('Cheval de Frise', 'Cheval de Frise'), ('Ramparts', 'Ramparts'), ('Moats', 'Moats'), ('Drawbridge', 'Drawbridge'), ('Sally Port', 'Sally Port'), ('Rock Wall', 'Rock Wall'), ('Rough Terrain', 'Rough Terrain')], max_length=15, null=True),
),
migrations.AlterField(
model_name='roundreport',
name='c_defense',
field=models.CharField(blank=True, choices=[('Portcullis', 'Portcullis'), ('Cheval de Frise', 'Cheval de Frise'), ('Ramparts', 'Ramparts'), ('Moats', 'Moats'), ('Drawbridge', 'Drawbridge'), ('Sally Port', 'Sally Port'), ('Rock Wall', 'Rock Wall'), ('Rough Terrain', 'Rough Terrain')], max_length=15, null=True),
),
migrations.AlterField(
model_name='roundreport',
name='d_defense',
field=models.CharField(blank=True, choices=[('Portcullis', 'Portcullis'), ('Cheval de Frise', 'Cheval de Frise'), ('Ramparts', 'Ramparts'), ('Moats', 'Moats'), ('Drawbridge', 'Drawbridge'), ('Sally Port', 'Sally Port'), ('Rock Wall', 'Rock Wall'), ('Rough Terrain', 'Rough Terrain')], max_length=15, null=True),
),
]
| 56.777778
| 323
| 0.611546
| 218
| 2,044
| 5.637615
| 0.293578
| 0.052075
| 0.084622
| 0.094386
| 0.833198
| 0.833198
| 0.797396
| 0.797396
| 0.797396
| 0.797396
| 0
| 0.024375
| 0.197162
| 2,044
| 35
| 324
| 58.4
| 0.724558
| 0.032779
| 0
| 0.571429
| 1
| 0
| 0.385512
| 0.011651
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b91130e913685382cee3d6b10a0c20ceee69553
| 185
|
py
|
Python
|
pyeasee/__init__.py
|
perjarne/pyeasee
|
f67d8c893ec6820d32ab2536b0fc7b7b2b0db14a
|
[
"MIT"
] | 15
|
2020-10-21T20:17:52.000Z
|
2022-02-08T18:46:19.000Z
|
pyeasee/__init__.py
|
perjarne/pyeasee
|
f67d8c893ec6820d32ab2536b0fc7b7b2b0db14a
|
[
"MIT"
] | 19
|
2020-10-15T08:30:34.000Z
|
2022-03-21T20:46:54.000Z
|
pyeasee/__init__.py
|
perjarne/pyeasee
|
f67d8c893ec6820d32ab2536b0fc7b7b2b0db14a
|
[
"MIT"
] | 5
|
2020-07-04T07:43:50.000Z
|
2020-09-09T19:49:25.000Z
|
"""
Easee charger API library
"""
from .easee import * # noqa:
from .charger import * # noqa:
from .site import * # noqa:
from .utils import * # noqa:
from .const import * # noqa:
| 20.555556
| 31
| 0.637838
| 24
| 185
| 4.916667
| 0.416667
| 0.423729
| 0.474576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.221622
| 185
| 8
| 32
| 23.125
| 0.819444
| 0.302703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6b930e86e561c035d06c41900119855d73759a3d
| 12,545
|
py
|
Python
|
test/test_model.py
|
jks-liu/noiseplanet
|
907b2bb9a93df48a266ad01c7cad0d8ef2367e78
|
[
"Apache-2.0"
] | 27
|
2020-06-13T21:00:53.000Z
|
2022-03-29T03:22:06.000Z
|
test/test_model.py
|
voodooed/noiseplanet
|
fca79ef3b81826a5286d566ebf3bf9340df13201
|
[
"Apache-2.0"
] | 12
|
2020-12-24T08:14:00.000Z
|
2022-02-06T18:01:51.000Z
|
test/test_model.py
|
voodooed/noiseplanet
|
fca79ef3b81826a5286d566ebf3bf9340df13201
|
[
"Apache-2.0"
] | 9
|
2020-08-28T16:06:54.000Z
|
2022-01-26T01:05:02.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 18 23:19:54 2020
@author: arthurd
"""
import osmnx as ox
import numpy as np
import time
# Visualize the data
import matplotlib.pyplot as plt
from matplotlib import collections as mc
# Test the import
from noiseplanet.matcher import model
from noiseplanet.matcher.model import route
def test_nearest():
print('Test match_nearest_edge(graph, track)')
track = np.array( [[45.75815477, 4.83579607],
[45.75815501, 4.83578568],
[45.75813165, 4.83577836],
[45.75811917, 4.83577826],
[45.75811917, 4.83577826],
[45.75810508, 4.83574771],
[45.75807665, 4.83572188],
[45.75807665, 4.83572188],
[45.75806175, 4.83570647],
[45.75805712, 4.8356987 ],
[45.75804746, 4.83569629],
[45.75803752, 4.83568949],
[45.75802591, 4.83566137],
[45.75802629, 4.83565147],
[45.75802629, 4.83565147],
[45.75801363, 4.83562952],
[45.75800574, 4.83562477],
[45.75800574, 4.83562477],
[45.75799968, 4.83560606],
[45.75800028, 4.83559067],
[45.75800028, 4.83559067],
[45.75799686, 4.83558285],
[45.75797956, 4.83556543],
[45.75797466, 4.83555272],
[45.75797466, 4.83555272],
[45.75796105, 4.83554806],
[45.75793787, 4.83553723],
[45.75793787, 4.83553723],
[45.75793882, 4.83553601],
[45.75792874, 4.83550413],
[45.75793143, 4.83549635],
[45.75793143, 4.83549635],
[45.75791593, 4.83548718],
[45.75788795, 4.83546475],
[45.75788795, 4.83546475],
[45.75786658, 4.83547026],
[45.75783632, 4.83546862],
[45.75783632, 4.83546862],
[45.75782964, 4.83547046],
[45.75780487, 4.83546808],
[45.7577991 , 4.83545974]])
# Create graph
graph = model.graph_from_track(track, network='all')
# Compute the path
start = time.time()
track_corr, route_corr, edgeid, stats = model.match_nearest_edge(graph, track)
print('Map Matching nearest in {0}s'.format(round(time.time() - start, 5)))
# Visualization Nearest
fig, ax = ox.plot_graph(graph, node_color="skyblue", node_alpha=.5, node_size=15, show=False, close=False, annotate=False)
plt.title("Map Matching to the closest edge", color="#999999")
plt.scatter(track[:, 1], track[:, 0], s=30, marker='.', color="black", zorder=2, label='Original Point')
plt.plot(track[:, 1], track[:, 0], linewidth=2, alpha=.7, color="black")
plt.scatter(track_corr[:, 1], track_corr[:, 0], s=30, marker='.', color="darkcyan", zorder=2, label='Projected Point')
plt.plot(route_corr[:, 1], route_corr[:, 0], linewidth=2, alpha=.7, color="darkcyan")
# projection between the two tracks
lines = [[(track[i, 1], track[i, 0]), (track_corr[i, 1], track_corr[i, 0])] for i in range(len(track))]
lc = mc.LineCollection(lines, linestyle='--', colors='skyblue', alpha=1, linewidths=1, zorder=1, label='Projection')
ax.add_collection(lc)
ax.legend(loc=1, frameon=True, facecolor='w')
def test_leuven():
print('Test match_leuven(graph, track)')
track = np.array( [[45.75815477, 4.83579607],
[45.75815501, 4.83578568],
[45.75813165, 4.83577836],
[45.75811917, 4.83577826],
[45.75811917, 4.83577826],
[45.75810508, 4.83574771],
[45.75807665, 4.83572188],
[45.75807665, 4.83572188],
[45.75806175, 4.83570647],
[45.75805712, 4.8356987 ],
[45.75804746, 4.83569629],
[45.75803752, 4.83568949],
[45.75802591, 4.83566137],
[45.75802629, 4.83565147],
[45.75802629, 4.83565147],
[45.75801363, 4.83562952],
[45.75800574, 4.83562477],
[45.75800574, 4.83562477],
[45.75799968, 4.83560606],
[45.75800028, 4.83559067],
[45.75800028, 4.83559067],
[45.75799686, 4.83558285],
[45.75797956, 4.83556543],
[45.75797466, 4.83555272],
[45.75797466, 4.83555272],
[45.75796105, 4.83554806],
[45.75793787, 4.83553723],
[45.75793787, 4.83553723],
[45.75793882, 4.83553601],
[45.75792874, 4.83550413],
[45.75793143, 4.83549635],
[45.75793143, 4.83549635],
[45.75791593, 4.83548718],
[45.75788795, 4.83546475],
[45.75788795, 4.83546475],
[45.75786658, 4.83547026],
[45.75783632, 4.83546862],
[45.75783632, 4.83546862],
[45.75782964, 4.83547046],
[45.75780487, 4.83546808],
[45.7577991 , 4.83545974]])
# Create graph
graph = model.graph_from_track(track, network='all')
# Compute the path
start = time.time()
track_corr, route_corr, edgeid, stats = model.match_leuven(graph, track)
print('Map Matching hmm in {0}s'.format(round(time.time() - start, 5)))
# Visualization Nearest
fig, ax = ox.plot_graph(graph, node_color="skyblue", node_alpha=.5, node_size=15, show=False, close=False, annotate=False)
plt.title("Map Matching to the closest edge", color="#999999")
plt.scatter(track[:, 1], track[:, 0], s=30, marker='.', color="black", zorder=2, label='Original Point')
plt.plot(track[:, 1], track[:, 0], linewidth=2, alpha=.7, color="black")
plt.scatter(track_corr[:, 1], track_corr[:, 0], s=30, marker='.', color="darkcyan", zorder=2, label='Projected Point')
plt.plot(route_corr[:, 1], route_corr[:, 0], linewidth=2, alpha=.7, color="darkcyan")
# projection between the two tracks
lines = [[(track[i, 1], track[i, 0]), (track_corr[i, 1], track_corr[i, 0])] for i in range(len(track))]
lc = mc.LineCollection(lines, linestyle='--', colors='skyblue', alpha=1, linewidths=1, zorder=1, label='Projection')
ax.add_collection(lc)
ax.legend(loc=1, frameon=True, facecolor='w')
def test_route_from_track():
print('Test route_from_track(track)')
track = np.array( [[45.75809136, 4.83577159],
[45.7580932 , 4.83576182],
[45.7580929 , 4.8357634 ],
[45.75809207, 4.8357678 ],
[45.75809207, 4.8357678 ],
[45.75809647, 4.83574439],
[45.75809908, 4.83573054],
[45.75809908, 4.83573054],
[45.75810077, 4.83572153],
[45.75810182, 4.83571596],
[45.75810159, 4.83571719],
[45.7581021 , 4.83571442],
[45.7580448 , 4.83558152],
[45.75804304, 4.83558066],
[45.75804304, 4.83558066],
[45.75802703, 4.83557288],
[45.75801895, 4.83556895],
[45.75801895, 4.83556895],
[45.75800954, 4.83556438],
[45.75800681, 4.83556305],
[45.75800681, 4.83556305],
[45.75800209, 4.83556076],
[45.75798288, 4.83555142],
[45.75797578, 4.83554797],
[45.75797578, 4.83554797],
[45.75796259, 4.83554156],
[45.7579395 , 4.83553033],
[45.7579395 , 4.83553033],
[45.75794009, 4.83553062],
[45.75792429, 4.83552294],
[45.75792505, 4.83552331],
[45.75792505, 4.83552331],
[45.7579092 , 4.83551561],
[45.75787935, 4.8355011 ],
[45.75787935, 4.8355011 ],
[45.75786135, 4.83549235],
[45.75783387, 4.83547899],
[45.75783387, 4.83547899],
[45.75782827, 4.83547626],
[45.75780555, 4.83546522],
[45.7577986 , 4.83546184]])
edgeid = np.array([ [6135818902, 6135818901],
[6135818901, 6135818902],
[6135818902, 6135818901],
[6135818901, 6135818902],
[6135818901, 6135818902],
[6135818901, 6135818902],
[6135818901, 6135818902],
[6135818901, 6135818902],
[6135818902, 6135818901],
[6135818902, 6135818901],
[6135818901, 6135818902],
[6135818902, 6135818901],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765],
[ 192313667, 1777581765]])
graph = route.graph_from_track(track)
start = time.time()
route_corr, stats = route.route_from_track(graph, track, edgeid=None)
print('Route without edgeid computed in : {0}s'.format(round(time.time() - start, 5)))
start = time.time()
route_corr, stats = route.route_from_track(graph, track, edgeid=edgeid)
print('Route with edgeid computed in : {0}s'.format(round(time.time() - start, 5)))
# Visualization leuven - Leuven Map Matching
fig, ax = ox.plot_graph(graph, node_color="skyblue", node_alpha=.5, node_size=15, show=False, close=False, annotate=False)
plt.title("Map Matching with Viterbi's algorithm (leuven)", color="#999999")
plt.scatter(track[:, 1], track[:, 0], s=30, marker='.', color="darkcyan", zorder=2, label='Original Point')
plt.plot(route_corr[:, 1], route_corr[:, 0], linewidth=2, alpha=.7, color="darkcyan")
if __name__ == "__main__":
# Testing the different method of Map Matching
test_nearest()
test_leuven()
# Testing route functions
test_route_from_track()
| 45.452899
| 126
| 0.472937
| 1,191
| 12,545
| 4.922754
| 0.218304
| 0.093979
| 0.13372
| 0.181477
| 0.831997
| 0.748081
| 0.727614
| 0.726249
| 0.709193
| 0.705782
| 0
| 0.427112
| 0.399043
| 12,545
| 275
| 127
| 45.618182
| 0.350577
| 0.031487
| 0
| 0.768182
| 0
| 0
| 0.047344
| 0.003959
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013636
| false
| 0
| 0.031818
| 0
| 0.045455
| 0.031818
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2e31e6f74aad53ccce2eece719edfee3ab00d049
| 6,090
|
py
|
Python
|
tests/activity/test_activity_verify_lax_response.py
|
elifesciences/elife-bot
|
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
|
[
"MIT"
] | 17
|
2015-02-10T07:10:29.000Z
|
2021-05-14T22:24:45.000Z
|
tests/activity/test_activity_verify_lax_response.py
|
elifesciences/elife-bot
|
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
|
[
"MIT"
] | 459
|
2015-03-31T18:24:23.000Z
|
2022-03-30T19:44:40.000Z
|
tests/activity/test_activity_verify_lax_response.py
|
elifesciences/elife-bot
|
d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9
|
[
"MIT"
] | 9
|
2015-04-18T16:57:31.000Z
|
2020-10-30T11:49:13.000Z
|
import unittest
from ddt import ddt, data
from mock import patch
from activity.activity_VerifyLaxResponse import activity_VerifyLaxResponse
import tests.activity.settings_mock as settings_mock
from tests.activity.classes_mock import FakeSession, FakeLogger
def fake_emit_monitor_event(
settings, item_identifier, version, run, event_type, status, message
):
pass
@ddt
class TestVerifyLaxResponse(unittest.TestCase):
def setUp(self):
self.verifylaxresponse = activity_VerifyLaxResponse(
settings_mock, FakeLogger(), None, None, None
)
@data(
{
"run": "74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"article_id": "353",
"result": "ingested",
"status": "vor",
"version": "1",
"expanded_folder": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"requested_action": "ingest",
"force": False,
"message": None,
"update_date": "2012-12-13T00:00:00Z",
}
)
@patch("activity.activity_VerifyLaxResponse.get_session")
@patch.object(activity_VerifyLaxResponse, "emit_monitor_event")
def test_do_activity(self, data, fake_emit_monitor, fake_get_session):
fake_emit_monitor.side_effect = fake_emit_monitor_event
fake_session = FakeSession({})
fake_get_session.return_value = fake_session
result = self.verifylaxresponse.do_activity(data)
fake_emit_monitor.assert_called_with(
settings_mock,
data["article_id"],
data["version"],
data["run"],
"Verify Lax Response",
"end",
" Finished Verification. Lax has responded with result: ingested."
" Article: " + data["article_id"],
)
self.assertEqual(result, self.verifylaxresponse.ACTIVITY_SUCCESS)
self.assertEqual(fake_session.get_value("published"), False)
@data(
{
"run": "74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"article_id": "353",
"result": "ingested",
"status": "vor",
"version": "1",
"expanded_folder": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"requested_action": "ingest",
"force": True,
"message": None,
"update_date": "2012-12-13T00:00:00Z",
}
)
@patch("activity.activity_VerifyLaxResponse.get_session")
@patch.object(activity_VerifyLaxResponse, "emit_monitor_event")
def test_do_activity_force_true(self, data, fake_emit_monitor, fake_get_session):
fake_emit_monitor.side_effect = fake_emit_monitor_event
fake_session = FakeSession({})
fake_get_session.return_value = fake_session
result = self.verifylaxresponse.do_activity(data)
fake_emit_monitor.assert_called_with(
settings_mock,
data["article_id"],
data["version"],
data["run"],
"Verify Lax Response",
"end",
" Finished Verification. Lax has responded with result: ingested."
" Article: " + data["article_id"],
)
self.assertEqual(result, self.verifylaxresponse.ACTIVITY_SUCCESS)
self.assertEqual(fake_session.get_value("published"), True)
@data(
{
"run": "74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"article_id": "353",
"result": "error",
"status": "vor",
"version": "1",
"expanded_folder": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"requested_action": "ingest",
"force": False,
"message": None,
"update_date": "2012-12-13T00:00:00Z",
}
)
@patch("activity.activity_VerifyLaxResponse.get_session")
@patch.object(activity_VerifyLaxResponse, "emit_monitor_event")
def test_do_activity_error_no_message(self, data, fake_emit_monitor, fake_session):
fake_emit_monitor.side_effect = fake_emit_monitor_event
fake_session.return_value = FakeSession({})
result = self.verifylaxresponse.do_activity(data)
fake_emit_monitor.assert_called_with(
settings_mock,
data["article_id"],
data["version"],
data["run"],
"Verify Lax Response",
"error",
"Lax has not ingested article "
+ data["article_id"]
+ " result from lax:"
+ str(data["result"])
+ "; message from lax: "
+ "(empty message)",
)
self.assertEqual(result, self.verifylaxresponse.ACTIVITY_PERMANENT_FAILURE)
@data(
{
"run": "74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"article_id": "353",
"result": "error",
"status": "poa",
"version": "1",
"expanded_folder": "00353.1/74e22d8f-6b5d-4fb7-b5bf-179c1aaa7cff",
"requested_action": "ingest",
"force": False,
"message": "An error has occurred",
"update_date": "2012-12-13T00:00:00Z",
}
)
@patch("activity.activity_VerifyLaxResponse.get_session")
@patch.object(activity_VerifyLaxResponse, "emit_monitor_event")
def test_do_activity_error(self, data, fake_emit_monitor, fake_session):
fake_emit_monitor.side_effect = fake_emit_monitor_event
fake_session.return_value = FakeSession({})
result = self.verifylaxresponse.do_activity(data)
fake_emit_monitor.assert_called_with(
settings_mock,
data["article_id"],
data["version"],
data["run"],
"Verify Lax Response",
"error",
"Lax has not ingested article "
+ data["article_id"]
+ " result from lax:"
+ str(data["result"])
+ "; message from lax: "
+ data["message"],
)
self.assertEqual(result, self.verifylaxresponse.ACTIVITY_PERMANENT_FAILURE)
if __name__ == "__main__":
unittest.main()
| 36.909091
| 87
| 0.599015
| 609
| 6,090
| 5.720854
| 0.16092
| 0.066303
| 0.073192
| 0.045924
| 0.850172
| 0.850172
| 0.850172
| 0.850172
| 0.850172
| 0.808266
| 0
| 0.04961
| 0.285057
| 6,090
| 164
| 88
| 37.134146
| 0.750574
| 0
| 0
| 0.707792
| 0
| 0
| 0.271757
| 0.083415
| 0
| 0
| 0
| 0
| 0.064935
| 1
| 0.038961
| false
| 0.006494
| 0.038961
| 0
| 0.084416
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2e39101c89a442effada76c1637886cd6ac6692c
| 51,280
|
py
|
Python
|
core/plugins/blb.py
|
TheArchives/Nexus
|
5482def8b50562fdbae980cda9b1708bfad8bffb
|
[
"BSD-2-Clause"
] | 1
|
2021-04-06T18:54:31.000Z
|
2021-04-06T18:54:31.000Z
|
core/plugins/blb.py
|
TheArchives/Nexus
|
5482def8b50562fdbae980cda9b1708bfad8bffb
|
[
"BSD-2-Clause"
] | null | null | null |
core/plugins/blb.py
|
TheArchives/Nexus
|
5482def8b50562fdbae980cda9b1708bfad8bffb
|
[
"BSD-2-Clause"
] | 1
|
2021-12-20T18:11:25.000Z
|
2021-12-20T18:11:25.000Z
|
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
import threading
from reqs.twisted.internet import reactor
from core.plugins import ProtocolPlugin
from core.decorators import *
from core.constants import *
class BlbPlugin(ProtocolPlugin):
commands = {
"z": "commandBlb",
"blb": "commandBlb",
"draw": "commandBlb",
"cuboid": "commandBlb",
"cub": "commandBlb",
"box": "commandBlb",
"bhb": "commandHBlb",
"hbox": "commandHBlb",
"bwb": "commandWBlb",
"bcb": "commandBcb",
"bhcb": "commandBhcb",
"bfb": "commandFBlb",
"newblb": "commandNBlb",
"oblb": "commandOneBlb",
"bob": "commandOneBlb",
"bxb": "commandBxb",
"xbtb": "commandXBtb",
"zbtb": "commandZBtb",
"bwcb": "commandBwcb",
}
def getBuildLimit(self, overriderank):
if self.client.isDirectorPlus() or overriderank:
return self.client.factory.build_director
elif self.client.isAdmin() or self.client.isCoder():
return self.client.factory.build_admin
elif self.client.isMod():
return self.client.factory.build_mod
elif self.client.isOp() or self.client.isWorldOwner():
return self.client.factory.build_op
else:
return self.client.factory.build_other
@build_list
@builder_only
def commandOneBlb(self, parts, fromloc, overriderank):
"/bob blockname [x y z] - Builder\nAliases: oblb\nSets block to blocktype.\nClick 1 block then do the command."
if len(parts) < 5 and len(parts) != 2:
self.client.sendServerMessage("Please enter a type (and possibly a coord triple)")
else:
block = self.client.GetBlockValue(parts[1])
if block == None:
return
# If they only provided the type argument, use the last block place
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
except IndexError:
self.client.sendServerMessage("You have not clicked a block yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
try:
if not self.client.AllowedToBuild(x, y, z) and not overriderank:
return
self.client.world[x, y, z] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=self.client.world)
self.client.sendBlock(x, y, z, block)
except AssertionError:
self.client.sendErrorMessage("Out of bounds bob error.")
return
else:
if fromloc == "user":
self.client.sendServerMessage("Your bob just finished.")
@build_list
@director_only
def commandNBlb(self, parts, fromloc, overriderank):
"/newblb blockname [x y z x2 y2 z2] - Director\nSets all blocks in this area to block.\nClick 2 corners then do the command."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a type (and possibly two coord triples)")
else:
block = self.client.GetBlockValue(parts[1])
if block == None:
return
# If they only provided the type argument, use the last two block places
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
realLimit = (x2 - x) * (y2 - y) * (z2 - z)
if realLimit > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to blb.")
return
world = self.client.world
if realLimit >= 45565: # To test it out first, will try a bigger one later - tyteen
def doBlocks():
# This implements 2 new things: Respawn method and try-the-whole-loop.
# Since the loop stops when an AssertionErrors pops up, so we just
# go and check the whole loop, so there isn't a need to try the block
# Everytime.
# The respawn method changes the BLB proceedures as follows:
# 1. Change the block but DOES NOT send it to users
# 2. Respawn the users in world
# Since this method does not send blocks one by one but respawns to download
# the map at one time, it saves time.
# All clients will get respawned too.
# Credits to UberFoX for this idea. Thanks Stacy!
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k) and not overriderank:
return
world[i, j, k] = block
self.client.sendServerMessage("BLB finished. Respawning...")
return True
except AssertionError:
self.client.sendErrorMessage("Out of bounds blb error.")
return
threading.Thread(target=doBlocks).start()
# Now the fun part. Respawn them all!
for client in world.clients:
self.client.queueTask(TASK_INSTANTRESPAWN, self.client.username, world=world)
if fromloc == "user":
self.client.sendServerMessage("Your blb just completed.")
else:
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k) and not overriderank:
return
world[i, j, k] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world=world)
self.client.sendBlock(i, j, k, block)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds blb error.")
return
block_iter = iter(generate_changes())
def do_step():
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your blb just completed.")
pass
do_step()
@build_list
@builder_only
def commandBlb(self, parts, fromloc, overriderank):
"/blb blockname [x y z x2 y2 z2] - Builder\nAliases: box, cub, cuboid, draw\nSets all blocks in this area to block.\nClick 2 corners then do the command."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a type (and possibly two coord triples)")
else:
block = self.client.GetBlockValue(parts[1])
if block == None:
return
# If they only provided the type argument, use the last two block places
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
# Stop them doing silly things
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to blb.")
return
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k) and not overriderank:
return
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != block:
world[i, j, k] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world)
self.client.sendBlock(i, j, k, block)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds blb error.")
return
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your blb just completed.")
pass
do_step()
@build_list
@builder_only
def commandHBlb(self, parts, fromloc, overriderank):
"/bhb blockname [x y z x2 y2 z2] - Builder\nAliases: hbox\nSets all blocks in this area to block, hollow."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a block type")
else:
block = self.client.GetBlockValue(parts[1])
if block == None:
return
# If they only provided the type argument, use the last two block places
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
# Stop them doing silly things
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to bhb.")
return
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k) and not overriderank:
return
if i==x or i==x2 or j==y or j==y2 or k==z or k==z2:
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != block:
world[i, j, k] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world)
self.client.sendBlock(i, j, k, block)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds bhb error.")
return
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your bhb just completed.")
pass
do_step()
@build_list
@builder_only
def commandWBlb(self, parts, fromloc, overriderank):
"/bwb blockname [x y z x2 y2 z2] - Builder\nBuilds four walls between the two areas.\nHollow, with no roof or floor."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a block type")
else:
block = self.client.GetBlockValue(parts[1])
if block == None:
return
# If they only provided the type argument, use the last two block places
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
# Stop them doing silly things
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to bwb.")
return
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k) and not overriderank:
return
if i==x or i==x2 or k==z or k==z2:
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != block:
world[i, j, k] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world)
self.client.sendBlock(i, j, k, block)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds bwb error.")
return
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your bwb just completed.")
pass
do_step()
@build_list
@builder_only
def commandBcb(self, parts, fromloc, overriderank):
"/bcb blockname blockname2 [x y z x2 y2 z2] - Builder\nSets all blocks in this area to block, checkered."
if len(parts) < 9 and len(parts) != 3:
self.client.sendServerMessage("Please enter two types (and possibly two coord triples)")
else:
# Try getting block2 as a direct integer type.
try:
block2 = chr(int(parts[2]))
except ValueError:
# OK, try a symbolic type.
try:
block2 = chr(globals()['BLOCK_%s' % parts[2].upper()])
except KeyError:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[2])
return
# Try getting the block as a direct integer type.
try:
block = chr(int(parts[1]))
except ValueError:
# OK, try a symbolic type.
try:
block = chr(globals()['BLOCK_%s' % parts[1].upper()])
except KeyError:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
# Check the block is valid
if ord(block) > 49:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
op_blocks = [BLOCK_SOLID, BLOCK_WATER, BLOCK_LAVA]
if ord(block) in op_blocks and not self.client.isOpPlus():
self.client.sendServerMessage("Sorry, but you can't use that block.")
return
# Check that block2 is valid
if ord(block2) > 49:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
if ord(block2) == 7:
try:
username = self.client.factory.usernames[self.client.username.lower()]
except:
self.client.sendErrorMessage("ERROR Identity could not be confirmed")
return
if username.isMember() or username.isOpPlus():
pass
else:
self.client.sendErrorMessage("Solid is op-only")
return
# If they only provided the type argument, use the last two block places
if len(parts) == 3:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[3])
y = int(parts[4])
z = int(parts[5])
x2 = int(parts[6])
y2 = int(parts[7])
z2 = int(parts[8])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
# Stop them doing silly things
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to bcb.")
return
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k):
return
if (i+j+k)%2 == 0:
curNewBlock = block2
else:
curNewBlock = block
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != curNewBlock:
world[i, j, k] = curNewBlock
self.client.runHook("blockchange", x, y, z, ord(curNewBlock), ord(curNewBlock), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, curNewBlock), world)
self.client.sendBlock(i, j, k, curNewBlock)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds bcb error.")
return
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your bcb just completed.")
pass
do_step()
@build_list
@builder_only
def commandBhcb(self, parts, fromloc, overriderank):
"/bhcb blockname blockname2 [x y z x2 y2 z2] - Builder\nSets all blocks in this area to blocks, checkered hollow."
if len(parts) < 9 and len(parts) != 3:
self.client.sendServerMessage("Please enter two block types")
else:
# Try getting block2 as a direct integer type.
try:
block2 = chr(int(parts[2]))
except ValueError:
# OK, try a symbolic type.
try:
block2 = chr(globals()['BLOCK_%s' % parts[2].upper()])
except KeyError:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[2])
return
# Try getting the block as a direct integer type.
try:
block = chr(int(parts[1]))
except ValueError:
# OK, try a symbolic type.
try:
block = chr(globals()['BLOCK_%s' % parts[1].upper()])
except KeyError:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
# Check the block is valid
if ord(block) > 49:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
op_blocks = [BLOCK_SOLID, BLOCK_WATER, BLOCK_LAVA]
if ord(block) in op_blocks and not self.client.isOpPlus():
self.client.sendErrorMessage("Sorry, but you can't use that block.")
return
# If they only provided the type argument, use the last two block places
if len(parts) == 3:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendErrorMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
# Stop them doing silly things
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to bhcb.")
return
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k):
return
if i==x or i==x2 or j==y or j==y2 or k==z or k==z2:
if (i+j+k)%2 == 0:
curNewBlock = block2
else:
curNewBlock = block
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != curNewBlock:
world[i, j, k] = curNewBlock
self.client.runHook("blockchange", x, y, z, ord(curNewBlock), ord(curNewBlock), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, curNewBlock), world)
self.client.sendBlock(i, j, k, curNewBlock)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds bhcb error.")
return
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your bhcb just completed.")
pass
do_step()
@build_list
@builder_only
def commandFBlb(self, parts, fromloc, overriderank):
"/bfb blockname [x y z x2 y2 z2] - Builder\nSets all blocks in this area to block, wireframe."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a block type")
else:
# Try getting the block as a direct integer type.
try:
block = chr(int(parts[1]))
except ValueError:
# OK, try a symbolic type.
try:
block = chr(globals()['BLOCK_%s' % parts[1].upper()])
except KeyError:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
# Check the block is valid
if ord(block) > 49:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
op_blocks = [BLOCK_SOLID, BLOCK_WATER, BLOCK_LAVA]
if ord(block) in op_blocks and not self.client.isOpPlus():
self.client.sendErrorMessage("Sorry, but you can't use that block.")
return
# If they only provided the type argument, use the last two block places
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
# Stop them doing silly things
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to bfb.")
return
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k):
return
if (i==x and j==y) or (i==x2 and j==y2) or (j==y2 and k==z2) or (i==x2 and k==z2) or (j==y and k==z) or (i==x and k==z) or (i==x and k==z2) or (j==y and k==z2) or (i==x2 and k==z) or (j==y2 and k==z) or (i==x and j==y2) or (i==x2 and j==y):
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != block:
world[i, j, k] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world)
self.client.sendBlock(i, j, k, block)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds bfb error.")
return
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your bfb just completed.")
pass
do_step()
@build_list
@builder_only
def commandBxb(self, parts, fromloc, overriderank):
"/bxb blockname [x y z x2 y2 z2] - Builder\nSets all blocks in this area to block, cross style."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a block type")
else:
try:
block = chr(int(parts[1]))
except ValueError:
try:
block = chr(globals()['BLOCK_%s' % parts[1].upper()])
except KeyError:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
if ord(block) > 49:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
op_blocks = [BLOCK_SOLID, BLOCK_WATER, BLOCK_LAVA]
if ord(block) in op_blocks and not self.client.isOpPlus():
self.client.sendErrorMessage("Sorry, but you can't use that block.")
return
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All parameters must be integers")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to bxb.")
return
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k):
return
if (i==(x+x2)/2 and j==y) or (i==(x+x2)/2 and j==y2) or (i==x and j==(y+y2)/2) or (i==x2 and j==(y+y2)/2) or (j==y and k==(z+z2)/2) or (i==(x+x2)/2 and k==z) or (j==(y+y2)/2 and k==z) or (i==x2 and k==(z+z2)/2) or (i==(x+x2)/2 and k==z2) or (j==y2 and k==(z+z2)/2) or (i==x and k==(z+z2)/2) or (j==(y+y2)/2 and k==z2) or (i==(x+x2)/2 and j==y) or (i==x2 and j==(y+y2)/2):
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != block:
world[i, j, k] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world)
self.client.sendBlock(i, j, k, block)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds bxb error.")
return
block_iter = iter(generate_changes())
def do_step():
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your bxb just completed.")
pass
do_step()
@build_list
@builder_only
def commandBwcb(self, parts, fromloc, overriderank):
"/bwcb blockname blockname2 [x y z x2 y2 z2] - Builder\nSets all blocks in this area to blocks, checkered walls."
if len(parts) < 9 and len(parts) != 3:
self.client.sendServerMessage("Please enter two block types")
else:
try:
block2 = chr(int(parts[2]))
except ValueError:
try:
block2 = chr(globals()['BLOCK_%s' % parts[2].upper()])
except KeyError:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[2])
return
try:
block = chr(int(parts[1]))
except ValueError:
try:
block = chr(globals()['BLOCK_%s' % parts[1].upper()])
except KeyError:
self.client.sendErrorMessage("'%s' is not a valid block type." % parts[1])
return
if ord(block) > 49:
self.client.sendServerMessage("'%s' is not a valid block type." % parts[1])
return
op_blocks = [BLOCK_SOLID, BLOCK_WATER, BLOCK_LAVA]
if ord(block) in op_blocks and not self.client.isOpPlus():
self.client.sendErrorMessage("Sorry, but you can't use that block.")
return
if len(parts) == 3:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All parameters must be integers")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to bwcb.")
return
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k):
return
if i==x or i==x2 or k==z or k==z2:
if (i+j+k)%2 == 0:
curNewBlock = block2
else:
curNewBlock = block
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != curNewBlock:
world[i, j, k] = curNewBlock
self.client.runHook("blockchange", x, y, z, ord(curNewBlock), ord(curNewBlock), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, curNewBlock), world)
self.client.sendBlock(i, j, k, curNewBlock)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds bwcb error.")
return
block_iter = iter(generate_changes())
def do_step():
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your bwcb just completed.")
pass
do_step()
@build_list
@builder_only
def commandXBtb(self, parts, fromloc, overriderank):
"/xbtb blockname [x y z x2 y2 z2] - Builder\nBuilds a tunnel on the x-axis."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a block type")
else:
block = self.client.GetBlockValue(parts[1])
if block == None:
return
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All parameters must be integers")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to btb.")
return
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k) and overriderank==False:
return
if i==x or i==x2 or j==y or j==y2:
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != block:
world[i, j, k] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world)
self.client.sendBlock(i, j, k, block)
yield
except AssertionError:
self.client.sendServerMessage("Out of bounds btb error.")
return
block_iter = iter(generate_changes())
def do_step():
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your btb just completed.")
pass
do_step()
@build_list
@builder_only
def commandZBtb(self, parts, fromloc, overriderank):
"/zbtb blockname [x y z x2 y2 z2] - Builder\nBuilds a tunnel on the z-axis."
if len(parts) < 8 and len(parts) != 2:
self.client.sendServerMessage("Please enter a block type")
else:
block = self.client.GetBlockValue(parts[1])
if block == None:
return
if len(parts) == 2:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[2])
y = int(parts[3])
z = int(parts[4])
x2 = int(parts[5])
y2 = int(parts[6])
z2 = int(parts[7])
except ValueError:
self.client.sendServerMessage("All parameters must be integers")
return
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
limit = self.getBuildLimit(overriderank)
if (x2 - x) * (y2 - y) * (z2 - z) > limit:
self.client.sendErrorMessage("Sorry, that area is too big for you to btb.")
return
world = self.client.world
def generate_changes():
try:
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k) and overriderank==False:
return
if k==z or k==z2 or j==y or j==y2:
check_offset = world.blockstore.get_offset(i, j, k)
existingBlock = world.blockstore.raw_blocks[check_offset]
if existingBlock != block:
world[i, j, k] = block
self.client.runHook("blockchange", x, y, z, ord(block), ord(block), fromloc)
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world)
self.client.sendBlock(i, j, k, block)
yield
except AssertionError:
self.client.sendErrorMessage("Out of bounds btb error.")
return
block_iter = iter(generate_changes())
def do_step():
try:
for x in range(10):
block_iter.next()
reactor.callLater(0.01, do_step)
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your btb just completed.")
pass
do_step()
| 48.745247
| 403
| 0.458541
| 5,579
| 51,280
| 4.172074
| 0.062018
| 0.088933
| 0.007604
| 0.008593
| 0.875881
| 0.865613
| 0.862133
| 0.856462
| 0.846365
| 0.837128
| 0
| 0.024937
| 0.454154
| 51,280
| 1,051
| 404
| 48.791627
| 0.806616
| 0.107274
| 0
| 0.869656
| 0
| 0.012513
| 0.104418
| 0
| 0
| 0
| 0
| 0
| 0.013556
| 1
| 0.037539
| false
| 0.012513
| 0.005214
| 0
| 0.1439
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2e76c53033feb95c73b9b9bf7b2e1635dd0e7219
| 8,589
|
py
|
Python
|
corona_python/country.py
|
MakufonSkifto/corona-python
|
fdb7d44ca89edc8ca0b29c59f7e84f65b1bd7ece
|
[
"MIT"
] | 3
|
2020-11-16T14:22:14.000Z
|
2020-12-29T19:27:06.000Z
|
corona_python/country.py
|
MakufonSkifto/corona-python
|
fdb7d44ca89edc8ca0b29c59f7e84f65b1bd7ece
|
[
"MIT"
] | null | null | null |
corona_python/country.py
|
MakufonSkifto/corona-python
|
fdb7d44ca89edc8ca0b29c59f7e84f65b1bd7ece
|
[
"MIT"
] | null | null | null |
import requests
import json
import datetime
class Country:
def __init__(self, country):
self.country = country
@staticmethod
def last_updated():
"""
:returns: (int) The time when the API was last updated
"""
request = requests.get("https://disease.sh/v2/all")
corona = json.loads(request.content)
if request.status_code != 200:
pass
else:
s = corona["updated"] / 1000.0
updated = datetime.datetime.fromtimestamp(s).strftime("%d-%m-%Y %I:%M:%S UTC")
return updated
def flag(self):
"""
:returns: (str) The image link to the flag of the country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["countryInfo"]["flag"]
def total_cases(self):
"""
:returns: (int) Number of the total cases in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["cases"]
def today_cases(self):
"""
:returns: (int) Number of the total cases today in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["todayCases"]
def total_deaths(self):
"""
:returns: (int) Number of the total deaths in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["deaths"]
def today_deaths(self):
"""
:returns: (int) Number of the total deaths today in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["todayDeaths"]
def recovered(self):
"""
:return: (int) Number of the total recoveries in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["recovered"]
def today_recovered(self):
"""
:return: (int) Number of the total recoveries today in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["todayRecovered"]
def active(self):
"""
:return: (int) Number of the active cases in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["active"]
def critical(self):
"""
:return: (int) Number of the critical cases in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["deaths"]
def cases_per_one_million(self):
"""
:return: (int) Number of the cases per one million in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["casesPerOneMillion"]
def deaths_per_one_million(self):
"""
:return: (int) Number of the deaths per one million in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["deathsPerOneMillion"]
def total_tests(self):
"""
:return: (int) Number of the total tests in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["tests"]
def tests_per_one_million(self):
"""
:return: (int) Number of the tests per one million in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["testsPerOneMillion"]
def population(self):
"""
:return: (int) Number residents in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["population"]
def continent(self):
"""
:return: (str) The continent of the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["continent"]
def one_case_per_people(self):
"""
:return: (int) Number of the one cases per people in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["oneCasePerPeople"]
def one_death_per_people(self):
"""
:return: (int) Number of the one deaths per people in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["oneDeathPerPeople"]
def one_test_per_people(self):
"""
:return: (int) Number of the one tests per people in the specified country
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200 and request.status_code != 404:
pass
else:
return corona["oneTestPerPeople"]
def get_all(self):
"""
:returns: (int) Number of critical cases per one million people
"""
request = requests.get(f"https://disease.sh/v2/countries/{self.country.replace(' ', '%20')}")
corona = json.loads(request.content)
if request.status_code != 200:
pass
else:
return corona
| 31.693727
| 101
| 0.579113
| 990
| 8,589
| 4.955556
| 0.090909
| 0.100693
| 0.131676
| 0.065226
| 0.830616
| 0.826131
| 0.816347
| 0.810436
| 0.810436
| 0.71382
| 0
| 0.029083
| 0.291419
| 8,589
| 270
| 102
| 31.811111
| 0.777029
| 0.159972
| 0
| 0.677852
| 0
| 0
| 0.224608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.14094
| false
| 0.134228
| 0.020134
| 0
| 0.302013
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
cf4768c5b8fa1628f34f8c5e4964aa0ad7c39dc6
| 102
|
py
|
Python
|
pyos/os/__init__.py
|
dgisolfi/PyOS
|
711205f570842a6573b650bde085d9925ee20aaa
|
[
"MIT"
] | 3
|
2021-01-09T12:44:14.000Z
|
2021-01-10T02:23:52.000Z
|
pyos/os/__init__.py
|
dgisolfi/PyOS
|
711205f570842a6573b650bde085d9925ee20aaa
|
[
"MIT"
] | null | null | null |
pyos/os/__init__.py
|
dgisolfi/PyOS
|
711205f570842a6573b650bde085d9925ee20aaa
|
[
"MIT"
] | 1
|
2021-01-09T12:44:16.000Z
|
2021-01-09T12:44:16.000Z
|
from pyos.os.console import Console
from pyos.os.kernel import Kernel
from pyos.os.shell import Shell
| 25.5
| 35
| 0.823529
| 18
| 102
| 4.666667
| 0.388889
| 0.285714
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 102
| 3
| 36
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d8c86d79d452de48836c9d30e6dc4d08ef3ce14a
| 67,792
|
py
|
Python
|
crypttrack.py
|
ghostofcoolidge/cryptobot_public
|
f0a435c0917b00aa3fa3a3c91e399f1853db209e
|
[
"MIT"
] | 2
|
2021-07-09T14:55:52.000Z
|
2021-11-28T01:32:14.000Z
|
crypttrack.py
|
ghostofcoolidge/cryptobot_public
|
f0a435c0917b00aa3fa3a3c91e399f1853db209e
|
[
"MIT"
] | null | null | null |
crypttrack.py
|
ghostofcoolidge/cryptobot_public
|
f0a435c0917b00aa3fa3a3c91e399f1853db209e
|
[
"MIT"
] | null | null | null |
import datetime
import json
import sys
import traceback
from calendar import monthrange
import requests
from github import Github
from web3 import Web3
g = Github('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')
web3 = Web3(Web3.HTTPProvider("https://bsc-dataseed1.defibit.io/"))
WBNB = '0xbb4CdB9CBd36B01bD1cBaEBF2De08d9173bc095c'
limit_orders_abi = json.loads(
'[{"inputs":[{"internalType":"address","name":"adr","type":"address"}],"name":"authorize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"orderID","type":"uint256"}],"name":"cancelOrder","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"collectFees","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256[]","name":"orderIDs","type":"uint256[]"}],"name":"fulfilMany","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"orderID","type":"uint256"}],"name":"fulfilOrder","outputs":[{"internalType":"bool","name":"filled","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"orderID","type":"uint256"}],"name":"OrderCancelled","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"orderID","type":"uint256"},{"indexed":false,"internalType":"address","name":"broker","type":"address"}],"name":"OrderFulfilled","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"orderID","type":"uint256"},{"indexed":false,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint256","name":"amountIn","type":"uint256"},{"indexed":false,"internalType":"address","name":"tokenIn","type":"address"},{"indexed":false,"internalType":"address","name":"tokenOut","type":"address"},{"indexed":false,"internalType":"uint256","name":"targetAmountOut","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"minAmountOut","type":"uint256"}],"name":"OrderPlaced","type":"event"},{"inputs":[{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint256","name":"targetAmountOut","type":"uint256"},{"internalType":"uint256","name":"minAmountOut","type":"uint256"}],"name":"placeBNBTokenOrder","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"targetAmountOut","type":"uint256"},{"internalType":"uint256","name":"minAmountOut","type":"uint256"}],"name":"placeTokenBNBOrder","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint256","name":"targetAmountOut","type":"uint256"},{"internalType":"uint256","name":"minAmountOut","type":"uint256"}],"name":"placeTokenTokenOrder","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"bool","name":"state","type":"bool"}],"name":"setBlacklist","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"bool","name":"state","type":"bool"}],"name":"setWhitelist","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bool","name":"state","type":"bool"}],"name":"setWhitelistState","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address payable","name":"adr","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"adr","type":"address"}],"name":"unauthorize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"addressOrders","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"blacklisted","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"orderID","type":"uint256"}],"name":"canFulfilOrder","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"reserveIn","type":"uint256"},{"internalType":"uint256","name":"reserveOut","type":"uint256"}],"name":"getAmountOut","outputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getBNBSpotPrice","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"orderID","type":"uint256"}],"name":"getCurrentAmountOut","outputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getNextReadyOrder","outputs":[{"internalType":"uint256","name":"orderID","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"orderID","type":"uint256"}],"name":"getOrder","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"owner","type":"address"},{"internalType":"enum IBogLimitOrdersV1.OrderStatus","name":"status","type":"uint8"},{"internalType":"enum IBogLimitOrdersV1.OrderType","name":"swapType","type":"uint8"},{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"targetAmountOut","type":"uint256"},{"internalType":"uint256","name":"minAmountOut","type":"uint256"},{"internalType":"uint256","name":"expiry","type":"uint256"},{"internalType":"uint256","name":"feePaid","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"adr","type":"address"}],"name":"getOrdersForAddress","outputs":[{"internalType":"uint256[]","name":"","type":"uint256[]"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"}],"name":"getPair","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getPendingOrders","outputs":[{"internalType":"uint256[]","name":"","type":"uint256[]"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"pair","type":"address"},{"internalType":"address","name":"tokenA","type":"address"},{"internalType":"address","name":"tokenB","type":"address"}],"name":"getReserves","outputs":[{"internalType":"uint256","name":"reserveA","type":"uint256"},{"internalType":"uint256","name":"reserveB","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getRouterAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"}],"name":"getTokenTokenPrice","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"isOwner","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"nextOrder","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"orders","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"pendingIndex","type":"uint256"},{"internalType":"uint256","name":"addressIndex","type":"uint256"},{"internalType":"address","name":"owner","type":"address"},{"internalType":"enum IBogLimitOrdersV1.OrderStatus","name":"status","type":"uint8"},{"internalType":"enum IBogLimitOrdersV1.OrderType","name":"swapType","type":"uint8"},{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"address","name":"pair","type":"address"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"targetAmountOut","type":"uint256"},{"internalType":"uint256","name":"minAmountOut","type":"uint256"},{"internalType":"uint256","name":"expiry","type":"uint256"},{"internalType":"uint256","name":"feePaid","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"PRICE_DECIMALS","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"router","outputs":[{"internalType":"contract IPancakeRouter02","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"orderID","type":"uint256"}],"name":"shouldFulfilOrder","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"tokenA","type":"address"},{"internalType":"address","name":"tokenB","type":"address"}],"name":"sortTokens","outputs":[{"internalType":"address","name":"token0","type":"address"},{"internalType":"address","name":"token1","type":"address"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"UINT_MAX","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"WBNB","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"whitelisted","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"}]')
Bep_20_api = json.loads(
'[{"constant":true,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_value","type":"uint256"}],"name":"approve","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transferFrom","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"balanceOf","outputs":[{"name":"balance","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transfer","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"},{"name":"_spender","type":"address"}],"name":"allowance","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"payable":true,"stateMutability":"payable","type":"fallback"},{"anonymous":false,"inputs":[{"indexed":true,"name":"owner","type":"address"},{"indexed":true,"name":"spender","type":"address"},{"indexed":false,"name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"from","type":"address"},{"indexed":true,"name":"to","type":"address"},{"indexed":false,"name":"value","type":"uint256"}],"name":"Transfer","type":"event"}]')
address = '0x0Bd91f45FcA6428680C02a79A2496D6f97BDF24a'
address = web3.toChecksumAddress(address)
contract = web3.eth.contract(address=address, abi=limit_orders_abi)
def crypt_log(value, date, blowlist):
# print(blowlist)
if len(blowlist) > 2201:
blowlist.pop(2)
temp_dict = {value: date}
blowlist.append(temp_dict)
# print(blowlist)
def decimal_str(y, decimals=15):
return format(y, f".{decimals}f").lstrip().rstrip('0')
def getDecimal(w3, bep, tok):
Dcontract = w3.eth.contract(address=tok, abi=bep)
decimals = Dcontract.functions.decimals().call()
return decimals
def direct_coin_request(token_id, token_name):
print(token_name)
bnb_token = True
def test(t):
p = Web3(Web3.HTTPProvider("https://bsc-dataseed.binance.org/")).eth.contract(address=address,
abi=limit_orders_abi).functions.getTokenTokenPrice(
t, WBNB).call()
return p
w3 = Web3(Web3.HTTPProvider("https://bsc-dataseed.binance.org/"))
badtok = False
try:
price = test(token_id)
except Exception:
try:
new_token = Web3.toChecksumAddress(token_id)
price = test(new_token)
except Exception as notokerr:
print(f'bad BNB token! {notokerr}')
bnb_token = False
badtok = True
if bnb_token is False:
try:
header = {'X-MBX-APIKEY': 'ZGMLcJIdgFcUB603rTl0kQ4M4AzStoBMfJl9tyl0csswBOyafOMVR2LJi7mbBJj9'}
avg_price_url = 'https://api.binance.com/api/v3/avgPrice'
symbol = token_name + "USDT"
params = {"symbol": symbol}
r = requests.get(avg_price_url, params=params, headers=header)
if "Invalid symbol" in r.text:
try:
w3 = Web3(Web3.HTTPProvider('https://mainnet.infura.io/v3/9d820f3b5df74983aaed0167188c0c37'))
uni_contract = w3.toChecksumAddress('0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f')
ERC_20_API = '[{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Burn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"}],"name":"Mint","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount0Out","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1Out","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Swap","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint112","name":"reserve0","type":"uint112"},{"indexed":false,"internalType":"uint112","name":"reserve1","type":"uint112"}],"name":"Sync","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"constant":true,"inputs":[],"name":"DOMAIN_SEPARATOR","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"MINIMUM_LIQUIDITY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PERMIT_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"burn","outputs":[{"internalType":"uint256","name":"amount0","type":"uint256"},{"internalType":"uint256","name":"amount1","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getReserves","outputs":[{"internalType":"uint112","name":"_reserve0","type":"uint112"},{"internalType":"uint112","name":"_reserve1","type":"uint112"},{"internalType":"uint32","name":"_blockTimestampLast","type":"uint32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_token0","type":"address"},{"internalType":"address","name":"_token1","type":"address"}],"name":"initialize","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"kLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"mint","outputs":[{"internalType":"uint256","name":"liquidity","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"nonces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"permit","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"price0CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"price1CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"skim","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"amount0Out","type":"uint256"},{"internalType":"uint256","name":"amount1Out","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"swap","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"sync","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"token0","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"token1","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
uni_api = '[{"inputs":[{"internalType":"address","name":"_feeToSetter","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"token0","type":"address"},{"indexed":true,"internalType":"address","name":"token1","type":"address"},{"indexed":false,"internalType":"address","name":"pair","type":"address"},{"indexed":false,"internalType":"uint256","name":"","type":"uint256"}],"name":"PairCreated","type":"event"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"allPairs","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"allPairsLength","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"tokenA","type":"address"},{"internalType":"address","name":"tokenB","type":"address"}],"name":"createPair","outputs":[{"internalType":"address","name":"pair","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"feeTo","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"feeToSetter","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"getPair","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_feeTo","type":"address"}],"name":"setFeeTo","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_feeToSetter","type":"address"}],"name":"setFeeToSetter","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
token_id = w3.toChecksumAddress(token_id)
ETH = w3.toChecksumAddress('0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2')
ERC_contract = w3.eth.contract(address=uni_contract, abi=uni_api)
x = ERC_contract.functions.getPair(token_id, ETH).call()
ERC_contract = w3.eth.contract(address=x, abi=ERC_20_API)
t1, t2, t3 = ERC_contract.functions.getReserves().call()
ETH_price = get_ETH_price()
final_price = (t2 * ETH_price / t1)
return final_price
except Exception:
print(Exception)
else:
ERC_price = float(r.text.split('price":"', 1)[1].rsplit('"', 1)[0])
print(f'the price is: {ERC_price}')
return ERC_price
except:
try:
w3 = Web3(Web3.HTTPProvider('https://mainnet.infura.io/v3/9d820f3b5df74983aaed0167188c0c37'))
uni_contract = w3.toChecksumAddress('0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f')
ERC_20_API = '[{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Burn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"}],"name":"Mint","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount0Out","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1Out","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Swap","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint112","name":"reserve0","type":"uint112"},{"indexed":false,"internalType":"uint112","name":"reserve1","type":"uint112"}],"name":"Sync","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"constant":true,"inputs":[],"name":"DOMAIN_SEPARATOR","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"MINIMUM_LIQUIDITY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PERMIT_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"burn","outputs":[{"internalType":"uint256","name":"amount0","type":"uint256"},{"internalType":"uint256","name":"amount1","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getReserves","outputs":[{"internalType":"uint112","name":"_reserve0","type":"uint112"},{"internalType":"uint112","name":"_reserve1","type":"uint112"},{"internalType":"uint32","name":"_blockTimestampLast","type":"uint32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_token0","type":"address"},{"internalType":"address","name":"_token1","type":"address"}],"name":"initialize","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"kLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"mint","outputs":[{"internalType":"uint256","name":"liquidity","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"nonces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"permit","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"price0CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"price1CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"skim","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"amount0Out","type":"uint256"},{"internalType":"uint256","name":"amount1Out","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"swap","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"sync","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"token0","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"token1","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
uni_api = '[{"inputs":[{"internalType":"address","name":"_feeToSetter","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"token0","type":"address"},{"indexed":true,"internalType":"address","name":"token1","type":"address"},{"indexed":false,"internalType":"address","name":"pair","type":"address"},{"indexed":false,"internalType":"uint256","name":"","type":"uint256"}],"name":"PairCreated","type":"event"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"allPairs","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"allPairsLength","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"tokenA","type":"address"},{"internalType":"address","name":"tokenB","type":"address"}],"name":"createPair","outputs":[{"internalType":"address","name":"pair","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"feeTo","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"feeToSetter","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"getPair","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_feeTo","type":"address"}],"name":"setFeeTo","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_feeToSetter","type":"address"}],"name":"setFeeToSetter","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
token_id = w3.toChecksumAddress(token_id)
ETH = w3.toChecksumAddress('0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2')
ERC_contract = w3.eth.contract(address=uni_contract, abi=uni_api)
x = ERC_contract.functions.getPair(token_id, ETH).call()
ERC_contract = w3.eth.contract(address=x, abi=ERC_20_API)
t1, t2, t3 = ERC_contract.functions.getReserves().call()
ETH_price = get_ETH_price()
final_price = (t2 * ETH_price / t1)
return final_price
except Exception:
print(Exception)
else:
if badtok:
Dcontract = w3.eth.contract(address=new_token, abi=Bep_20_api)
tokenIn = Dcontract.functions.decimals().call()
Dcontract = w3.eth.contract(address=WBNB, abi=Bep_20_api)
tokenOut = Dcontract.functions.decimals().call()
else:
Dcontract = w3.eth.contract(address=token_id, abi=Bep_20_api)
tokenIn = Dcontract.functions.decimals().call()
Dcontract = w3.eth.contract(address=WBNB, abi=Bep_20_api)
tokenOut = Dcontract.functions.decimals().call()
price = price / pow(10, tokenOut - tokenIn)
USDprice = ((price / pow(10, 10)) * (contract.functions.getBNBSpotPrice().call()) / pow(10, 10))
final = USDprice / pow(10, 16)
return final
def get_BNB_price():
return contract.functions.getBNBSpotPrice().call()
def get_ETH_price():
avg_price_url = 'https://api.binance.com/api/v3/avgPrice'
params = {"symbol": 'ETHUSDT'}
header = {'X-MBX-APIKEY': 'ZGMLcJIdgFcUB603rTl0kQ4M4AzStoBMfJl9tyl0csswBOyafOMVR2LJi7mbBJj9'}
r = requests.get(avg_price_url, params=params, headers=header)
print(r.text)
parse = float(r.text.split('price":"', 1)[1].rsplit('"', 1)[0])
return parse
def get_token_bnb_value(token):
badtok = False
try:
price = contract.functions.getTokenTokenPrice(token, WBNB).call()
except Exception:
try:
new_token = Web3.toChecksumAddress(token)
price = contract.functions.getTokenTokenPrice(new_token, WBNB).call()
except Exception as badtoken:
print(f'bad token! {badtoken}')
return
badtok = True
if badtok:
tokenIn = getDecimal(web3, Bep_20_api, new_token)
tokenOut = getDecimal(web3, Bep_20_api, WBNB)
else:
tokenIn = getDecimal(web3, Bep_20_api, token)
tokenOut = getDecimal(web3, Bep_20_api, WBNB)
price = price / pow(10, tokenOut - tokenIn) / pow(10, 18)
return price
def check_BNB_token(token):
try:
contract.functions.getTokenTokenPrice(token, WBNB).call()
return True
except Exception as badtoken:
print(f'bad token! {badtoken}')
try:
new_token = Web3.toChecksumAddress(token)
contract.functions.getTokenTokenPrice(new_token, WBNB).call()
return True
except Exception as badtoken:
print(f'bad token! {badtoken}')
return False
def check_BINANCE_token(token):
try:
token_name = parse_ERC_token(token)
except Exception:
print('token name not found!')
return False
header = {'X-MBX-APIKEY': 'ZGMLcJIdgFcUB603rTl0kQ4M4AzStoBMfJl9tyl0csswBOyafOMVR2LJi7mbBJj9'}
all_symbols_url = "https://api.binance.com/api/v3/ticker/price"
try:
r = requests.get(all_symbols_url, headers=header)
if token_name in r.text:
return True
else:
return False
except Exception as badtoken:
print(f'error! bad token {badtoken}')
return False
def check_uni_token(token):
try:
w3 = Web3(Web3.HTTPProvider('https://mainnet.infura.io/v3/9d820f3b5df74983aaed0167188c0c37'))
uni_contract = w3.toChecksumAddress('0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f')
ERC_20_API = '[{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Burn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"}],"name":"Mint","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount0Out","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1Out","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Swap","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint112","name":"reserve0","type":"uint112"},{"indexed":false,"internalType":"uint112","name":"reserve1","type":"uint112"}],"name":"Sync","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"constant":true,"inputs":[],"name":"DOMAIN_SEPARATOR","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"MINIMUM_LIQUIDITY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PERMIT_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"burn","outputs":[{"internalType":"uint256","name":"amount0","type":"uint256"},{"internalType":"uint256","name":"amount1","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getReserves","outputs":[{"internalType":"uint112","name":"_reserve0","type":"uint112"},{"internalType":"uint112","name":"_reserve1","type":"uint112"},{"internalType":"uint32","name":"_blockTimestampLast","type":"uint32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_token0","type":"address"},{"internalType":"address","name":"_token1","type":"address"}],"name":"initialize","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"kLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"mint","outputs":[{"internalType":"uint256","name":"liquidity","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"nonces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"permit","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"price0CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"price1CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"skim","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"amount0Out","type":"uint256"},{"internalType":"uint256","name":"amount1Out","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"swap","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"sync","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"token0","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"token1","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
uni_api = '[{"inputs":[{"internalType":"address","name":"_feeToSetter","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"token0","type":"address"},{"indexed":true,"internalType":"address","name":"token1","type":"address"},{"indexed":false,"internalType":"address","name":"pair","type":"address"},{"indexed":false,"internalType":"uint256","name":"","type":"uint256"}],"name":"PairCreated","type":"event"},{"constant":true,"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"allPairs","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"allPairsLength","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"tokenA","type":"address"},{"internalType":"address","name":"tokenB","type":"address"}],"name":"createPair","outputs":[{"internalType":"address","name":"pair","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"feeTo","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"feeToSetter","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"getPair","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_feeTo","type":"address"}],"name":"setFeeTo","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"_feeToSetter","type":"address"}],"name":"setFeeToSetter","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
token_id = w3.toChecksumAddress(token)
ETH = w3.toChecksumAddress('0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2')
ERC_contract = w3.eth.contract(address=uni_contract, abi=uni_api)
x = ERC_contract.functions.getPair(token_id, ETH).call()
ERC_contract = w3.eth.contract(address=x, abi=ERC_20_API)
t1, t2, t3 = ERC_contract.functions.getReserves().call()
ETH_price = get_ETH_price()
(t2 * ETH_price / t1)
return True
except Exception:
print(Exception)
return False
async def add_token_git(name, token, h, client):
try:
text = f'Name {name}\ntoken {token}'
repo = h.get_repo('ghostofcoolidge/cryptobot')
repo.create_file(f"crypt/{name}_list.txt", "start", text)
print('finish adding token to github')
except Exception:
try:
err_message = client.get_user(797387782766592020)
x, y, z = sys.exc_info()
z = (traceback.format_tb(z))
await err_message.send(z)
await err_message.send(y)
except Exception:
err_message = client.get_user(797387782766592020)
x, y, z = sys.exc_info()
z = (traceback.format_tb(z))
print(y)
print(z)
await err_message.send('error too long to send through discord; please check Heroku')
async def remove_token_get(name, h, client):
repo = h.get_repo('ghostofcoolidge/cryptobot')
try:
contents = repo.get_contents(f"crypt/{name}_list.txt")
repo.delete_file(contents.path, 'remove token', contents.sha)
return True
except Exception:
try:
err_message = client.get_user(797387782766592020)
x, y, z = sys.exc_info()
z = (traceback.format_tb(z))
await err_message.send(z)
await err_message.send(y)
except Exception:
err_message = client.get_user(797387782766592020)
x, y, z = sys.exc_info()
z = (traceback.format_tb(z))
print(y)
print(z)
await err_message.send('error too long to send through discord; please check Heroku')
def isfloat(value):
try:
float(value)
return True
except:
return False
def parse_BNB_token(tokenid):
r = requests.get(f'https://www.bscscan.com/token/{tokenid}')
x = str(r.content).split("symbol\\': \\'", 1)[1].split('\\', 1)[0]
print(x)
x2 = x.replace('.', '')
if x2.isalnum():
print('token name found!')
return x
else:
print('could not parse token name')
return False
def parse_ERC_token(tokenid):
r = requests.get(f'https://explorer.bitquery.io/ethereum/token/{tokenid}')
x = str(r.content)
print(x)
parse = x.split('(', 1)[1].split(') ERC20', 1)[0]
parse = parse.replace('.', '')
if parse.isalnum():
print('token name found!')
return parse
else:
print('could not parse token name')
return False
def ticker_mess(string):
x = 0
tl = []
tl2 = []
for item in string.split(' split '):
x = x + len(item)
tl.append(item)
if x > 1700:
tl2.append(tl)
tl = []
x = 0
if tl:
tl2.append(tl)
return tl2
# THIS IS WHERE WRAPPER FUNCTION WOULD BE USEFUL; NEED TO LEARN HOW TO IMPLEMENT
async def percent(num, message, length, ptime):
if ptime == '24 hours':
head = f'**Prices for the past {ptime}:**\n\n\n'
else:
head = f'**Prices for the past {length} {ptime}:**\n\n\n'
price = ''
for item in num:
# print(item)
if item == num[0]:
price = price + f'**${decimal_str(item)}**\n\n'
else:
n = num.index(item) - 1
an = ((item - num[n]) / num[n])
perc = round(an * 100, 2)
# print(f'{perc}%')
price = price + f'**${decimal_str(item)}**\n(*{perc}%*)\n\n'
total = round(((num[-1] - num[0]) / num[0]) * 100, 2)
await message.channel.send(f'{head}{price}total percentage change:\n***({total})%***')
async def hourly(crypt_list, message):
htime = 'hours'
if len(crypt_list) < 9:
await message.channel.send('not enough data stored.')
return
else:
temp_cr_li = []
for i in crypt_list:
# print(f'item: {i}')
if isfloat(i):
temp_cr_li.append(float(i))
temp_list = temp_cr_li[-6:]
length = len(temp_list)
# print(temp_list)
await percent(temp_list, message, length, htime)
async def daily(crypt_list, token, token_name, message):
dtime = '24 hours'
if len(crypt_list) < 123:
await message.channel.send('not enough data stored.')
return
else:
temp_cr_li = []
for i in crypt_list:
if isfloat(i):
temp_cr_li.append(float(i))
temp_val = direct_coin_request(token, token_name)
temp_cr_li.append(temp_val)
temp_list = temp_cr_li[-25::24]
length = len(temp_list)
await percent(temp_list, message, length, dtime)
async def days(crypt_list, token, token_name, message):
daytime = 'days'
if len(crypt_list) < 123:
await message.channel.send('not enough data stored.')
return
else:
temp_cr_li = []
for i in crypt_list:
if isfloat(i):
temp_cr_li.append(float(i))
temp_val = direct_coin_request(token, token_name)
temp_cr_li.append(temp_val)
temp_list = temp_cr_li[::-24][:5]
length = len(temp_list)
await percent(temp_list, message, length, daytime)
async def weekly(crypt_list, token, token_name, message):
wtime = 'weeks'
if len(crypt_list) < 386:
await message.channel.send('not enough data stored.')
return
else:
temp_cr_li = []
for i in crypt_list:
if isfloat(i):
temp_cr_li.append(float(i))
temp_val = direct_coin_request(token, token_name)
temp_cr_li.append(temp_val)
temp_list = temp_cr_li[::-96][:4]
length = len(temp_list)
await percent(temp_list, message, length, wtime)
async def monthly(crypt_list, token, token_name, message):
mtime = 'months'
if len(crypt_list) < 2162:
await message.channel.send('not enough data stored.')
return
else:
temp_cr_li = []
for i in crypt_list:
if isfloat(i):
temp_cr_li.append(float(i))
temp_val = direct_coin_request(token, token_name)
temp_cr_li.append(temp_val)
temp_list = temp_cr_li[::-720][:3]
length = len(temp_list)
await percent(temp_list, message, length, mtime)
def percent_check_hourly(li, pernum):
if len(li) < 5:
print('not enough data stored.')
return False, False, False, False
else:
temp = []
for item in li:
# print(item)
if isfloat(item):
temp.append(float(item))
# print(temp)
new = temp[-1]
old = temp[-2]
if old == 0:
perc = 0
return True, new, old, perc
perc = round(((new - old) / old * 100), 2)
print(f'hourly percentage: {perc}')
if perc >= pernum or perc <= (-1 * pernum):
return True, new, old, perc
else:
return False, new, old, perc
def percent_check_quarterly(li, pernum):
if len(li) < 11:
print('not enough data stored.')
return False, False, False, False
else:
temp = []
for item in li:
# print(item)
if isfloat(item):
temp.append(float(item))
# print(temp)
new = temp[-1]
old = temp[-7]
if old == 0:
perc = 0
return True, new, old, perc
perc = round(((new - old) / old * 100), 2)
print(f'hourly percentage: {perc}')
if perc >= pernum or perc <= (-1 * pernum):
return True, new, old, perc
else:
return False, new, old, perc
def percent_check_daily(li, pernum):
if len(li) < 28:
print('not enough data stored.')
return False, False, False, False
else:
temp = []
for i in li:
if isfloat(i):
temp.append(float(i))
# print(temp)
new = temp[-1]
old = temp[-25]
if old == 0:
perc = 0
return True, new, old, perc
# print(old)
perc = round(((new - old) / old * 100), 2)
print(f'daily percentage: {perc}')
if perc >= pernum or perc <= (-1 * pernum):
return True, new, old, perc
else:
return False, new, old, perc
def percent_check_weekly(li, pernum):
if len(li) < 172:
print('not enough data stored.')
return False, False, False, False
else:
temp = []
for i in li:
if isfloat(i):
temp.append(float(i))
new = temp[-1]
old = temp[-169]
if old == 0:
perc = 0
return True, new, old, perc
perc = round(((new - old) / old * 100), 2)
print(f'weekly percentage: {perc}')
if perc >= pernum or perc <= (-1 * pernum):
return True, new, old, perc
else:
return False, new, old, perc
def percent_check_monthly(li, pernum):
current_time = datetime.datetime.today()
year = current_time.year
previous_month = current_time.month - 1
if previous_month == 0:
previous_month = 12
year = year - 1
hours = monthrange(year, previous_month)[1]
hours = (hours * 24) + 1
if len(li) < hours + 3:
print('not enough data stored.')
return False, False, False, False
else:
temp = []
for i in li:
if isfloat(i):
temp.append(float(i))
new = temp[-1]
old = temp[-hours]
if old == 0:
perc = 0
return True, new, old, perc
perc = round(((new - old) / old * 100), 2)
print(f'monthly percentage: {perc}')
if perc >= pernum or perc <= (-1 * pernum):
return True, new, old, perc
else:
return False, new, old, perc
def ticker_percent(current, hour):
if hour == 0:
tick_percent = 0
return tick_percent
tick_percent = ((current - hour) / hour) * 100
return tick_percent
# def ticker_percent_24h(current,coin_list):
# if len(coin_list) > 26:
# daily = coin_list[-25]
# daily_percent = ((current - daily)/ daily) * 100
# return daily_percent
# TODO CHANGE FLAG NUMBERING SYSTEM: MAKE IT CHECK THE HIGHEST FLAG NUMBER IN THE DICT AND THEN ADD 1
def add_flag_percent(message, all_coin, token, flag_percent, flaglist):
if float(flag_percent) == 0:
response = 'please use a number other than zero'
return response
token_check = False
for item in all_coin:
if token == item["Name"]:
token_value = direct_coin_request(item["token"], item["Name"])
token_check = True
break
if token_check:
flag_dict = {}
user_id = message.author.id
flagnum = len(flaglist) + 1
flag_dict.update({
"user": user_id,
"token": token,
"token_value": float(token_value),
"percent": float(flag_percent),
"flagnum": flagnum})
flaglist.append(flag_dict)
response = f'flag stored! Flag number is: {flagnum}'
return response
else:
response = 'token not found in list: please choose a token that is being tracked by the bot'
return response
# TODO FIX REMOVE FUNCTION!
async def remove_flag(message, flag_list_percent, flag_list_abs, flagnum):
user_id = message.user.id
for item in flag_list_percent:
if flagnum == item["flagnum"] and user_id == item["user"]:
flag_list_percent.remove(item)
await message.channel.send('flag has been removed from bot archive')
return
for item in flag_list_abs:
if flagnum == item["flagnum"] and user_id == item["user"]:
flag_list_percent.remove(item)
await message.channel.send('flag has been removed from bot archive')
return
def update_flag_text(flli, flagtext):
print('updating flag text...')
print(flli)
strlist = ''
for item in flli:
for k, v in item.items():
strlist = strlist + str(k) + f' {str(v)}\n'
strlist = strlist + ',' + '\n'
repo = g.get_repo(f'ghostofcoolidge/cryptobot')
contents = repo.get_contents(flagtext)
repo.update_file(contents.path, 'update', strlist, contents.sha)
print('done')
async def check_flags(client, flag_list_percent, flagprcenttext, dict_prices):
if len(flag_list_percent) < 1:
return
else:
for item in flag_list_percent:
token_found = False
for k, v in dict_prices.items():
if item["token"] == k:
current_price = v
token_found = True
break
else:
continue
if token_found:
perc_check = ((current_price - item["token_value"]) / item["token_value"]) * 100
if item["percent"] > 0:
if perc_check >= float(item["percent"]):
print('flag triggered!')
channel = client.get_channel(833260192850247710)
user = client.get_user(int(item["user"]))
user_id = user.id
# await user.send(
# f'FLAG TRIGGERED: {item["token"]} has increased {round(perc_check), 2}% in value!\n*${decimal_str(current_price, 15)}*')
await channel.send(
f'<@!{user_id}> FLAG TRIGGERED: {item["token"]} has increased {round(perc_check), 2}% in value!\n*${decimal_str(current_price, 15)}*')
flag_list_percent.remove(item)
update_flag_text(flag_list_percent, flagprcenttext)
elif item["percent"] < 0:
if perc_check <= float(item["percent"]):
print('flag triggered!')
channel = client.get_channel(833260192850247710)
user = client.get_user(item["user"])
user_id = user.id
await channel.send(
f'<@!{user_id}> FLAG TRIGGERED: {item["token"]} has decreased {round(perc_check), 2}% in value!\n*${decimal_str(current_price, 15)}*')
flag_list_percent.remove(item)
update_flag_text(flag_list_percent, flagprcenttext)
else:
print('item not found in coin archive!')
flag_list_percent.remove(item)
update_flag_text(flag_list_percent, flagprcenttext)
continue
| 103.027356
| 10,474
| 0.62913
| 7,102
| 67,792
| 5.942974
| 0.066319
| 0.0589
| 0.091549
| 0.069775
| 0.868861
| 0.851423
| 0.826474
| 0.807141
| 0.788305
| 0.777999
| 0
| 0.030296
| 0.11581
| 67,792
| 657
| 10,475
| 103.18417
| 0.673846
| 0.010326
| 0
| 0.586146
| 0
| 0.017762
| 0.711377
| 0.677754
| 0
| 0
| 0.005059
| 0.001522
| 0
| 1
| 0.040853
| false
| 0
| 0.01421
| 0.003552
| 0.16341
| 0.069272
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
41cffbb62648abfbcffede05850c500e5a62a979
| 9,700
|
py
|
Python
|
tests/test_models/test_classifiers.py
|
guigarfr/mmclassification
|
4ddfd3d5080de1b418d1c0a35508397a56fcf08e
|
[
"Apache-2.0"
] | 1
|
2020-07-20T17:22:06.000Z
|
2020-07-20T17:22:06.000Z
|
tests/test_models/test_classifiers.py
|
guigarfr/mmclassification
|
4ddfd3d5080de1b418d1c0a35508397a56fcf08e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_models/test_classifiers.py
|
guigarfr/mmclassification
|
4ddfd3d5080de1b418d1c0a35508397a56fcf08e
|
[
"Apache-2.0"
] | 1
|
2021-12-28T18:30:40.000Z
|
2021-12-28T18:30:40.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from copy import deepcopy
import numpy as np
import pytest
import torch
from mmcv import ConfigDict
from mmcv.runner.base_module import BaseModule
from mmcls.models import CLASSIFIERS
from mmcls.models.classifiers import ImageClassifier
def test_image_classifier():
model_cfg = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet_CIFAR',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=10,
in_channels=2048,
loss=dict(type='CrossEntropyLoss')))
imgs = torch.randn(16, 3, 32, 32)
label = torch.randint(0, 10, (16, ))
model_cfg_ = deepcopy(model_cfg)
model = CLASSIFIERS.build(model_cfg_)
# test property
assert model.with_neck
assert model.with_head
# test train_step
outputs = model.train_step({'img': imgs, 'gt_label': label}, None)
assert outputs['loss'].item() > 0
assert outputs['num_samples'] == 16
# test train_step without optimizer
outputs = model.train_step({'img': imgs, 'gt_label': label})
assert outputs['loss'].item() > 0
assert outputs['num_samples'] == 16
# test val_step
outputs = model.val_step({'img': imgs, 'gt_label': label}, None)
assert outputs['loss'].item() > 0
assert outputs['num_samples'] == 16
# test val_step without optimizer
outputs = model.val_step({'img': imgs, 'gt_label': label})
assert outputs['loss'].item() > 0
assert outputs['num_samples'] == 16
# test forward
losses = model(imgs, return_loss=True, gt_label=label)
assert losses['loss'].item() > 0
# test forward_test
model_cfg_ = deepcopy(model_cfg)
model = CLASSIFIERS.build(model_cfg_)
pred = model(imgs, return_loss=False, img_metas=None)
assert isinstance(pred, list) and len(pred) == 16
single_img = torch.randn(1, 3, 32, 32)
pred = model(single_img, return_loss=False, img_metas=None)
assert isinstance(pred, list) and len(pred) == 1
# test pretrained
# TODO remove deprecated pretrained
with pytest.warns(UserWarning):
model_cfg_ = deepcopy(model_cfg)
model_cfg_['pretrained'] = 'checkpoint'
model = CLASSIFIERS.build(model_cfg_)
assert model.init_cfg == dict(
type='Pretrained', checkpoint='checkpoint')
# test show_result
img = np.random.random_integers(0, 255, (224, 224, 3)).astype(np.uint8)
result = dict(pred_class='cat', pred_label=0, pred_score=0.9)
with tempfile.TemporaryDirectory() as tmpdir:
out_file = osp.join(tmpdir, 'out.png')
model.show_result(img, result, out_file=out_file)
assert osp.exists(out_file)
with tempfile.TemporaryDirectory() as tmpdir:
out_file = osp.join(tmpdir, 'out.png')
model.show_result(img, result, out_file=out_file)
assert osp.exists(out_file)
def test_image_classifier_with_mixup():
# Test mixup in ImageClassifier
model_cfg = dict(
backbone=dict(
type='ResNet_CIFAR',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='MultiLabelLinearClsHead',
num_classes=10,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0,
use_soft=True)),
train_cfg=dict(
augments=dict(
type='BatchMixup', alpha=1., num_classes=10, prob=1.)))
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
imgs = torch.randn(16, 3, 32, 32)
label = torch.randint(0, 10, (16, ))
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0
# Considering BC-breaking
# TODO remove deprecated mixup usage.
model_cfg['train_cfg'] = dict(mixup=dict(alpha=1.0, num_classes=10))
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
imgs = torch.randn(16, 3, 32, 32)
label = torch.randint(0, 10, (16, ))
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0
def test_image_classifier_with_cutmix():
# Test cutmix in ImageClassifier
model_cfg = dict(
backbone=dict(
type='ResNet_CIFAR',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='MultiLabelLinearClsHead',
num_classes=10,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0,
use_soft=True)),
train_cfg=dict(
augments=dict(
type='BatchCutMix', alpha=1., num_classes=10, prob=1.)))
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
imgs = torch.randn(16, 3, 32, 32)
label = torch.randint(0, 10, (16, ))
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0
# Considering BC-breaking
# TODO remove deprecated mixup usage.
model_cfg['train_cfg'] = dict(
cutmix=dict(alpha=1.0, num_classes=10, cutmix_prob=1.0))
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
imgs = torch.randn(16, 3, 32, 32)
label = torch.randint(0, 10, (16, ))
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0
def test_image_classifier_with_augments():
imgs = torch.randn(16, 3, 32, 32)
label = torch.randint(0, 10, (16, ))
# Test cutmix and mixup in ImageClassifier
model_cfg = dict(
backbone=dict(
type='ResNet_CIFAR',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='MultiLabelLinearClsHead',
num_classes=10,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0,
use_soft=True)),
train_cfg=dict(augments=[
dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5),
dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3),
dict(type='Identity', num_classes=10, prob=0.2)
]))
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0
# Test cutmix with cutmix_minmax in ImageClassifier
model_cfg['train_cfg'] = dict(
augments=dict(
type='BatchCutMix',
alpha=1.,
num_classes=10,
prob=1.,
cutmix_minmax=[0.2, 0.8]))
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0
# Test not using train_cfg
model_cfg = dict(
backbone=dict(
type='ResNet_CIFAR',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=10,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0)))
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
imgs = torch.randn(16, 3, 32, 32)
label = torch.randint(0, 10, (16, ))
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0
# Test not using cutmix and mixup in ImageClassifier
model_cfg['train_cfg'] = dict(augments=None)
img_classifier = ImageClassifier(**model_cfg)
img_classifier.init_weights()
losses = img_classifier.forward_train(imgs, label)
assert losses['loss'].item() > 0
def test_image_classifier_return_tuple():
model_cfg = ConfigDict(
type='ImageClassifier',
backbone=dict(
type='ResNet_CIFAR',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch',
return_tuple=False),
head=dict(
type='LinearClsHead',
num_classes=10,
in_channels=2048,
loss=dict(type='CrossEntropyLoss')))
imgs = torch.randn(16, 3, 32, 32)
model_cfg_ = deepcopy(model_cfg)
with pytest.warns(DeprecationWarning):
model = CLASSIFIERS.build(model_cfg_)
# test backbone return tensor
feat = model.extract_feat(imgs)
assert isinstance(feat, torch.Tensor)
# test backbone return tuple
model_cfg_ = deepcopy(model_cfg)
model_cfg_.backbone.return_tuple = True
model = CLASSIFIERS.build(model_cfg_)
feat = model.extract_feat(imgs)
assert isinstance(feat, tuple)
# test warning if backbone return tensor
class ToyBackbone(BaseModule):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model_cfg_ = deepcopy(model_cfg)
model_cfg_.backbone.return_tuple = True
model = CLASSIFIERS.build(model_cfg_)
model.backbone = ToyBackbone()
with pytest.warns(DeprecationWarning):
model.extract_feat(imgs)
| 31.596091
| 75
| 0.622268
| 1,170
| 9,700
| 4.965812
| 0.135897
| 0.053701
| 0.028916
| 0.03253
| 0.804303
| 0.769363
| 0.762995
| 0.740103
| 0.712909
| 0.693287
| 0
| 0.03466
| 0.259381
| 9,700
| 306
| 76
| 31.699346
| 0.774081
| 0.071649
| 0
| 0.734783
| 0
| 0
| 0.082628
| 0.007684
| 0
| 0
| 0
| 0.003268
| 0.113043
| 1
| 0.030435
| false
| 0
| 0.043478
| 0.004348
| 0.082609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
41e9bae017ef21260a387e2ca9481743b375ae1c
| 5,518
|
py
|
Python
|
src/inceptionModules.py
|
davidharvey1986/darkCNN
|
7f0519851a11c72e31f820b69b5bcc1f81f5c0f2
|
[
"MIT"
] | null | null | null |
src/inceptionModules.py
|
davidharvey1986/darkCNN
|
7f0519851a11c72e31f820b69b5bcc1f81f5c0f2
|
[
"MIT"
] | null | null | null |
src/inceptionModules.py
|
davidharvey1986/darkCNN
|
7f0519851a11c72e31f820b69b5bcc1f81f5c0f2
|
[
"MIT"
] | null | null | null |
from .globalVariables import *
def stemInception( model ):
'''
The first stem incpetion
'''
#Inception max pool and convolutin
tower_1 = layers.Conv2D(96, (3,3), strides=(2,2), padding='valid')(model)
tower_2 = layers.MaxPooling2D((3,3), strides=(2,2), padding='valid')(model)
model = layers.concatenate([tower_1, tower_2], axis = 3)
tower_1 = layers.Conv2D(64, (1,1), strides=(1,1), padding='valid')(model)
tower_1 = layers.Conv2D(96, (3,3), strides=(1,1), padding='same')(tower_1)
tower_2 = layers.Conv2D(64, (1,1), strides=(1,1), padding='same')(model)
tower_2 = layers.Conv2D(64, (7,1), strides=(1,1), padding='same')(tower_2)
tower_2 = layers.Conv2D(64, (1,7), strides=(1,1), padding='same')(tower_2)
tower_2 = layers.Conv2D(96, (3,3), strides=(1,1), padding='valid')(tower_2)
model = concatenate([tower_1, tower_2], axis=3)
tower_1 = layers.Conv2D(96, (3,3), strides=(2,2), padding='valid') (model)
tower_2 = layers.MaxPooling2D((3,3), strides=(2,2), padding='valid')(model)
model = concatenate([tower_1, tower_2], axis=3)
return model
def inceptionA( model ):
'''
The inception A layer
'''
tower_1 = layers.AveragePooling2D((2,2), padding='same',strides=(1,1))(model)
tower_1 = layers.Conv2D(96, (1,1), padding='same', strides=(1,1))(tower_1)
tower_2 = layers.Conv2D(96, (1,1), padding='same', strides=(1,1))(model)
tower_3 = layers.Conv2D(64, (1,1), padding='same', strides=(1,1))(model)
tower_3 = layers.Conv2D(96, (3,3), padding='same', strides=(1,1))(tower_3)
tower_4 = layers.Conv2D(64, (1,1), padding='same', strides=(1,1))(model)
tower_4 = layers.Conv2D(96, (3,3), padding='same', strides=(1,1))(tower_4)
tower_4 = layers.Conv2D(96, (3,3), padding='same', strides=(1,1))(tower_4)
model = layers.concatenate([tower_1, tower_2, tower_3, tower_4], axis=3)
return model
def reductionA( model ):
'''
The inception B layer
'''
tower_1 = layers.MaxPooling2D((3,3), padding='valid', strides=(2,2))(model)
tower_2 = layers.Conv2D(384, (3,3), padding='valid', strides=(2,2))(model)
tower_3 = layers.Conv2D(192, (1,1), padding='same', strides=(1,1))(model)
tower_3 = layers.Conv2D(224, (3,3), padding='same', strides=(1,1))(tower_3)
tower_3 = layers.Conv2D(256, (3,3), padding='valid', strides=(2,2))(tower_3)
model = layers.concatenate([tower_1, tower_2, tower_3], axis=3)
return model
def inceptionB( model ):
'''
The inception A layer
'''
tower_1 = layers.AveragePooling2D((2,2), padding='same', strides=(1,1))(model)
tower_1 = layers.Conv2D(128, (2,2), padding='same',strides=(1,1))(tower_1)
tower_2 = layers.Conv2D(384, (1,1), padding='same',strides=(1,1))(model)
tower_3 = layers.Conv2D(192, (1,1), padding='same',strides=(1,1))(model)
tower_3 = layers.Conv2D(224, (1,7), padding='same',strides=(1,1))(tower_3)
tower_3 = layers.Conv2D(256, (7,1), padding='same',strides=(1,1))(tower_3)
tower_4 = layers.Conv2D(192, (1,1), padding='same',strides=(1,1))(model)
tower_4 = layers.Conv2D(192, (1,7), padding='same',strides=(1,1))(tower_4)
tower_4 = layers.Conv2D(224, (7,1), padding='same',strides=(1,1))(tower_4)
tower_4 = layers.Conv2D(224, (1,7), padding='same',strides=(1,1))(tower_4)
tower_4 = layers.Conv2D(256, (7,1), padding='same',strides=(1,1))(tower_4)
model = layers.concatenate([tower_1, tower_2, tower_3, tower_4], axis=3)
return model
def reductionB( model ):
'''
The inception B layer
'''
tower_1 = layers.MaxPooling2D((3,3), padding='valid', strides=(2,2))(model)
tower_2 = layers.Conv2D(192, (1,1), padding='valid', strides=(1,1))(model)
tower_2 = layers.Conv2D(192, (3,3), padding='valid', strides=(2,2))(tower_2)
tower_3 = layers.Conv2D(256, (1,1), padding='same', strides=(1,1))(model)
tower_3 = layers.Conv2D(256, (1,7), padding='same', strides=(1,1))(tower_3)
tower_3 = layers.Conv2D(320, (7,1), padding='same', strides=(1,1))(tower_3)
tower_3 = layers.Conv2D(320, (3,3), padding='valid', strides=(2,2))(tower_3)
model = layers.concatenate([tower_1, tower_2, tower_3], axis=3)
return model
def inceptionC( model ):
'''
The inception C layer
'''
tower_1 = layers.Conv2D(384,(1,1), padding='same', strides=(1,1))(model)
tower_1a = layers.Conv2D(256, (1,3), padding='same', strides=(1,1))(tower_1)
tower_1b = layers.Conv2D(256, (3,1), padding='same', strides=(1,1))(tower_1)
tower_2 = layers.Conv2D(256, (1,1), padding='same', strides=(1,1))(model)
tower_3 = layers.AveragePooling2D((2,2), padding='same', strides=(1,1))(model)
tower_3 = layers.Conv2D(256, (1,1), padding='same', strides=(1,1))(tower_3)
tower_4 = layers.Conv2D(284, (1,1), padding='same', strides=(1,1))(model)
tower_4 = layers.Conv2D(448, (1,3), padding='same', strides=(1,1))(tower_4)
tower_4 = layers.Conv2D(512, (3,1), padding='same', strides=(1,1))(tower_4)
tower_4a = layers.Conv2D(256, (1,3), padding='same', strides=(1,1))(tower_4)
tower_4b = layers.Conv2D(256, (3,1), padding='same', strides=(1,1))(tower_4)
model = layers.concatenate([tower_1a, tower_1b,\
tower_2, tower_3, \
tower_4a, tower_4b], axis=3)
return model
| 34.061728
| 83
| 0.61091
| 849
| 5,518
| 3.845701
| 0.06596
| 0.035528
| 0.115773
| 0.203675
| 0.925574
| 0.901685
| 0.880245
| 0.877795
| 0.855436
| 0.805819
| 0
| 0.109998
| 0.186118
| 5,518
| 161
| 84
| 34.273292
| 0.617012
| 0.030989
| 0
| 0.32
| 0
| 0
| 0.042248
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.013333
| 0
| 0.173333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
510864c776f77229350a8a83a801390ccc7adec9
| 1,067
|
py
|
Python
|
tests/test_youtube.py
|
baptabl/spotify-dl
|
649238f4186303c483b101f809dc403f5aa6d998
|
[
"MIT"
] | null | null | null |
tests/test_youtube.py
|
baptabl/spotify-dl
|
649238f4186303c483b101f809dc403f5aa6d998
|
[
"MIT"
] | null | null | null |
tests/test_youtube.py
|
baptabl/spotify-dl
|
649238f4186303c483b101f809dc403f5aa6d998
|
[
"MIT"
] | null | null | null |
from spotify_dl import youtube as yt
def test_download_one_false_skip():
songs = [{'album': 'Hell Freezes Over (Remaster 2018)',
'artist': 'Eagles',
'cover': 'https://i.scdn.co/image/ab67616d0000b27396d28597a5ae44ab66552183',
'genre': 'album rock',
'name': 'Hotel California - Live On MTV, 1994',
'num': 6,
'num_tracks': 15,
'year': '1994'}]
yt.download_songs(songs, download_directory='~/Downloads', format_string='best',
skip_mp3=False)
def test_download_one_true_skip():
songs = [
{'album': 'Hell Freezes Over (Remaster 2018)',
'artist': 'Eagles',
'cover': 'https://i.scdn.co/image/ab67616d0000b27396d28597a5ae44ab66552183',
'genre': 'album rock',
'name': 'Hotel California - Live On MTV, 1994',
'num': 6,
'num_tracks': 15,
'year': '1994'}]
yt.download_songs(songs, download_directory='~/Downloads', format_string='best',
skip_mp3=False)
| 36.793103
| 89
| 0.571696
| 111
| 1,067
| 5.324324
| 0.45045
| 0.023689
| 0.050761
| 0.060914
| 0.873096
| 0.873096
| 0.873096
| 0.873096
| 0.873096
| 0.873096
| 0
| 0.120419
| 0.283974
| 1,067
| 28
| 90
| 38.107143
| 0.653141
| 0
| 0
| 0.75
| 0
| 0
| 0.393627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.041667
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
515b7fd4917c82225ea897e175f5b9faabd60018
| 13,873
|
py
|
Python
|
api/radiam/api/tests/permissionstests/researchgrouppermissionstests.py
|
usask-rc/radiam
|
bfa38fd33e211b66e30e453a717c5f216e848cb2
|
[
"MIT"
] | 2
|
2020-02-01T20:41:28.000Z
|
2020-02-03T20:57:59.000Z
|
api/radiam/api/tests/permissionstests/researchgrouppermissionstests.py
|
usask-rc/radiam
|
bfa38fd33e211b66e30e453a717c5f216e848cb2
|
[
"MIT"
] | 10
|
2020-04-20T15:52:49.000Z
|
2020-04-30T18:03:09.000Z
|
api/radiam/api/tests/permissionstests/researchgrouppermissionstests.py
|
usask-rc/radiam
|
bfa38fd33e211b66e30e453a717c5f216e848cb2
|
[
"MIT"
] | null | null | null |
import json
from unittest import mock
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from rest_framework.test import force_authenticate
from django.urls import reverse
from radiam.api.models import Project, User, ResearchGroup
from radiam.api.views import ResearchGroupViewSet
class TestSuperuserResearchGroupPermissions(APITestCase):
"""
Test Response codes for researchgroup endpoints for Superusers
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='admin')
def test_superuser_read_researchgroup_list(self):
"""
Test Superuser can read ResearchGroup list
"""
request = self.factory.get(reverse('researchgroup-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
def test_superuser_write_researchgroup_list(self):
"""
Test Superuser can write ResearchGroup list
(ie: create new ResearchGroups)
"""
body = {
"name": "testgroup",
"description": "Some Test Group",
}
request = self.factory.post(
reverse('researchgroup-list'),
json.dumps(body),
content_type='application/json')
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=201)
def test_superuser_read_researchgroup_detail(self):
"""
Test Superuser can update an existing research group
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 1')
request = self.factory.get(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'retrieve'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=200)
@mock.patch("radiam.api.documents.ResearchGroupMetadataDoc")
def test_superuser_write_researchgroup_detail(self, doc):
"""
Test Superuser can update an existing research group
"""
doc.get.return_value = doc
doc.update.return_value = None
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 1')
body = {
"name": "testgroup",
"description": "Some Test Group"
}
request = self.factory.patch(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=200)
class TestAdminuserResearchGroupPermissions(APITestCase):
"""
Test Response codes for researchgroup endpoints for Admin users
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser1')
def test_adminuser_read_researchgroup_list(self):
"""
Test Adminuser can read ResearchGroup list. Restricted to groups that
they admin.
"""
user_groups = self.user.get_groups()
request = self.factory.get(reverse('researchgroup-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
self.assertEquals(response.data['count'], user_groups.count())
def test_adminuser_write_researchgroup_list(self):
"""
Test Admin user can write ResearchGroup list
(ie: create new ResearchGroups)
"""
body = {
"name": "testgroup",
"description": "Some Test Group",
}
request = self.factory.post(
reverse('researchgroup-list'),
json.dumps(body),
content_type='application/json')
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=201)
def test_adminuser_read_researchgroup_detail(self):
"""
Test Admin user can read an existing research group detail
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 1')
request = self.factory.get(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'retrieve'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_adminuser_write_researchgroup_detail(self):
"""
Test Adminuser can update an existing research group detail
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 1')
body = {
"description": "updated description",
}
request = self.factory.patch(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_adminuser_cannot_write_researchgroup_detail(self):
"""
Test Adminuser for one group cannot update an existing research
group detail for a group they have only member access to.
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 2')
body = {
"description": "updated description",
}
request = self.factory.patch(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=403)
class TestDataManageruserResearchGroupPermissions(APITestCase):
"""
Test Response codes for researchgroup endpoints for Data Manager users
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser2')
def test_manageruser_read_researchgroup_list(self):
"""
Test Manager user can read ResearchGroup list
"""
user_groups = self.user.get_groups()
request = self.factory.get(reverse('researchgroup-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
self.assertEquals(response.data['count'], user_groups.count())
def test_manageruser_write_researchgroup_list(self):
"""
Test Manager user can write ResearchGroup list
(ie: create new ResearchGroups)
"""
body = {
"name": "testgroup",
"description": "test group description",
}
request = self.factory.post(
reverse('researchgroup-list'),
json.dumps(body),
content_type='application/json')
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=403)
def test_manageruser_read_researchgroup_detail(self):
"""
Test Manager user can read an existing research group
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 1')
request = self.factory.get(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'retrieve'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_manageruser_write_researchgroup_detail_denied(self):
"""
Test Manager user cannot update an existing research group
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 1')
body = {
"description": "updated description",
}
request = self.factory.patch(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=403)
class TestMemberuserResearchGroupPermissions(APITestCase):
"""
Test Response codes for researchgroup endpoints for Member users
"""
fixtures = ['userpermissions']
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.get(username='testuser3')
def test_memberuser_read_researchgroup_list(self):
"""
Test Member user can read ResearchGroup list
"""
user_groups = self.user.get_groups()
request = self.factory.get(reverse('researchgroup-list'))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'list'})(request)
self.assertContains(
response=response,
text="",
status_code=200)
self.assertEquals(response.data['count'], user_groups.count())
def test_memberuser_write_researchgroup_list_denied(self):
"""
Test Member user cannot write ResearchGroup list
(ie: create new ResearchGroups)
"""
body = {
"name": "testgroup",
"description": "test group description",
}
request = self.factory.post(
reverse('researchgroup-list'),
json.dumps(body),
content_type='application/json')
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'post': 'create'})(request)
self.assertContains(
response=response,
text="",
status_code=403)
def test_memberuser_read_researchgroup_detail(self):
"""
Test Member user can read an existing research group
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 1')
request = self.factory.get(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'retrieve'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=200)
def test_memberuser_read_nonmember_researchgroup_detail_denied(self):
"""
Test Member user cannot read an existing research group they don't have access to
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 2')
request = self.factory.get(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'get': 'retrieve'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=404)
def test_memberuser_write_researchgroup_detail_denied(self):
"""
Test Memberuser cannot update an existing research group
"""
detail_researchgroup = ResearchGroup.objects.get(name='Test Research Group 1')
body = {
"description": "updated description",
}
request = self.factory.patch(reverse('researchgroup-detail', args=[detail_researchgroup.id]))
request.user = self.user
force_authenticate(request, user=request.user)
response = ResearchGroupViewSet.as_view({'patch': 'partial_update'})(request, pk=detail_researchgroup.id)
self.assertContains(
response=response,
text="",
status_code=403)
| 32.565728
| 113
| 0.636128
| 1,364
| 13,873
| 6.333578
| 0.08871
| 0.068758
| 0.048617
| 0.039588
| 0.915963
| 0.850677
| 0.812247
| 0.799051
| 0.764788
| 0.764788
| 0
| 0.00654
| 0.261587
| 13,873
| 425
| 114
| 32.642353
| 0.836783
| 0.104015
| 0
| 0.810277
| 0
| 0
| 0.110607
| 0.003779
| 0
| 0
| 0
| 0
| 0.083004
| 1
| 0.086957
| false
| 0
| 0.031621
| 0
| 0.150198
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5168c9c2f1c588ff60b4aecee7a9138a5c640103
| 1,203
|
py
|
Python
|
problem008.py
|
racamirko/proj_euler2014
|
62a4ff109ffc08811d3fa504a5014e8d317daad0
|
[
"BSD-3-Clause"
] | null | null | null |
problem008.py
|
racamirko/proj_euler2014
|
62a4ff109ffc08811d3fa504a5014e8d317daad0
|
[
"BSD-3-Clause"
] | null | null | null |
problem008.py
|
racamirko/proj_euler2014
|
62a4ff109ffc08811d3fa504a5014e8d317daad0
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
text = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
curmax = -1
for start in xrange(len(text)-13):
prod = 1
for offset in xrange(13):
prod *= int(text[start+offset])
if prod > curmax:
curmax = prod
print("Max: %d" % curmax)
| 85.928571
| 1,009
| 0.937656
| 34
| 1,203
| 33.176471
| 0.558824
| 0.007092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.867241
| 0.035744
| 1,203
| 14
| 1,010
| 85.928571
| 0.105172
| 0
| 0
| 0
| 0
| 0
| 0.837074
| 0.831255
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5aca28fb6a50a22d6a3b0d3df0f2b5fb9fcffa41
| 487
|
py
|
Python
|
tests/src/praxxis/scene/test_history.py
|
blimongi/praxxis
|
4c2496c89d1d26d01b91896496342ca60f3d15ae
|
[
"MIT"
] | 9
|
2019-07-31T23:50:16.000Z
|
2021-08-21T00:43:44.000Z
|
tests/src/praxxis/scene/test_history.py
|
blimongi/praxxis
|
4c2496c89d1d26d01b91896496342ca60f3d15ae
|
[
"MIT"
] | 22
|
2019-08-01T00:37:53.000Z
|
2020-03-31T05:01:57.000Z
|
tests/src/praxxis/scene/test_history.py
|
blimongi/praxxis
|
4c2496c89d1d26d01b91896496342ca60f3d15ae
|
[
"MIT"
] | 7
|
2020-01-03T02:28:36.000Z
|
2021-05-13T20:59:19.000Z
|
import pytest
def test_empty_history(setup, history_db, library_db, current_scene_db):
from src.praxxis.scene import history
notebooks = history.history(history_db, library_db, current_scene_db)
assert len(notebooks) == 0
def test_short_history(setup, history_db, library_db, current_scene_db, generate_short_history):
from src.praxxis.scene import history
notebooks = history.history(history_db, library_db, current_scene_db)
assert len(notebooks) == 1
| 34.785714
| 96
| 0.776181
| 69
| 487
| 5.15942
| 0.304348
| 0.101124
| 0.179775
| 0.202247
| 0.837079
| 0.837079
| 0.837079
| 0.837079
| 0.837079
| 0.589888
| 0
| 0.004819
| 0.147844
| 487
| 14
| 97
| 34.785714
| 0.853012
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.222222
| false
| 0
| 0.333333
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
cfd4a888ffae95d8b226e4c5de9dd670e03a4250
| 2,359
|
py
|
Python
|
gimbal_gui/o323bgc-release-v096-v20160319/o323bgc-release-v096-v20160319/mp-pano-example1.py
|
manavjain99/oscar_buggy
|
b5dab0848f8667c9515bcfb078730cd0c4060000
|
[
"MIT"
] | null | null | null |
gimbal_gui/o323bgc-release-v096-v20160319/o323bgc-release-v096-v20160319/mp-pano-example1.py
|
manavjain99/oscar_buggy
|
b5dab0848f8667c9515bcfb078730cd0c4060000
|
[
"MIT"
] | null | null | null |
gimbal_gui/o323bgc-release-v096-v20160319/o323bgc-release-v096-v20160319/mp-pano-example1.py
|
manavjain99/oscar_buggy
|
b5dab0848f8667c9515bcfb078730cd0c4060000
|
[
"MIT"
] | null | null | null |
# This script can be used for three situations:
# A) communication channel is connected to the Pixhawk
# Mission Planner is connected to Pixhawk (Sys/Comp ID 01/01)
# B) communication channel is connected to the Pixhawk
# Mission Planner is connected to STorM32 (Sys/Comp ID 71/67)
# C) communication channel is connected to the STorM32
# Mission Planner is connected to STorM32 (Sys/Comp ID 71/67)
#
# Note:
# * AC does NOT support the common messages COMMAND_LONG:DO_MOUNT_CONTROL and COMMAND_LONG:DO_DIGICAM_CONTROL
# but instead uses the APM specific messages DO_MOUNT_CONTROL and DO_DIGICAM_CONTROL
# * STorM32 DOES support the common messages COMMAND_LONG:DO_MOUNT_CONTROL and COMMAND_LONG:DO_DIGICAM_CONTROL
# as well as the APM specific messages DO_MOUNT_CONTROL and DO_DIGICAM_CONTROL
#
# This script uses the DO_MOUNT_CONTROL, DO_MOUNT_CONFIGURE, and DO_DIGICAM_CONTROL messages.
import clr
import MissionPlanner
clr.AddReference("MAVLink")
import MAVLink
print 'Start'
MAV.setMountConfigure(MAVLink.MAV_MOUNT_MODE.NEUTRAL, False, False, False)
Script.Sleep(2000)
MAV.setMountConfigure(MAVLink.MAV_MOUNT_MODE.MAVLINK_TARGETING, False, False, False)
MAV.setMountControl( -2000, 0, 5000, False)
Script.Sleep(3000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountControl( -2000, 0, 0, False)
Script.Sleep(2000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountControl( -2000, 0, -5000, False)
Script.Sleep(2000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountControl( 0, 0, -5000, False)
Script.Sleep(2000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountControl( 0, 0, 0, False)
Script.Sleep(2000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountControl( 0, 0, 5000, False)
Script.Sleep(2000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountControl( 2000, 0, 5000, False)
Script.Sleep(2000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountControl( 2000, 0, 0, False)
Script.Sleep(2000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountControl( 2000, 0, -5000, False)
Script.Sleep(2000)
print 'click'
MAV.setDigicamControl(1)
MAV.setMountConfigure(MAVLink.MAV_MOUNT_MODE.NEUTRAL, False, False, False)
Script.Sleep(500)
MAV.setMountConfigure(MAVLink.MAV_MOUNT_MODE.RC_TARGETING, False, False, False)
print 'End'
| 29.4875
| 111
| 0.7571
| 331
| 2,359
| 5.287009
| 0.223565
| 0.069143
| 0.100571
| 0.102857
| 0.807429
| 0.807429
| 0.742286
| 0.742286
| 0.717714
| 0.717714
| 0
| 0.067298
| 0.14964
| 2,359
| 80
| 112
| 29.4875
| 0.805085
| 0.369648
| 0
| 0.604167
| 0
| 0
| 0.043073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.0625
| null | null | 0.229167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
322570b6746ba5de2727819029477157bba209d3
| 2,877
|
py
|
Python
|
tests/views/test_change_password.py
|
Thenerdstation/MoonTracker
|
15e5cebc15fe69074f8257b4fcaf1606d87dc3e6
|
[
"MIT"
] | 4
|
2018-02-26T23:18:01.000Z
|
2018-04-21T01:48:59.000Z
|
tests/views/test_change_password.py
|
Thenerdstation/MoonTracker
|
15e5cebc15fe69074f8257b4fcaf1606d87dc3e6
|
[
"MIT"
] | 33
|
2018-02-05T01:02:39.000Z
|
2018-04-30T16:05:08.000Z
|
tests/views/test_change_password.py
|
Thenerdstation/MoonTracker
|
15e5cebc15fe69074f8257b4fcaf1606d87dc3e6
|
[
"MIT"
] | 1
|
2022-02-27T22:58:50.000Z
|
2022-02-27T22:58:50.000Z
|
from tests.utils import register, logout, login
from tests.utils import test_client
def test_change_password():
response = register('test_user', '12345678', '1111111111')
assert response.status_code == 200
assert 'Successfully created new account for test_user' in str(
response.data)
response = login('test_user', '12345678')
assert response.status_code == 200
response = test_client.get(
'/settings',
follow_redirects=True)
assert response.status_code == 200
response = test_client.post(
'/settings',
data=dict(
current_password='12345678',
new_password='dankmemes',
new_password_check='dankmemes',
),
follow_redirects=True)
assert response.status_code == 200
assert "Successfully changed password" in str(response.data)
response = logout()
assert response.status_code == 200
assert 'test_user' not in str(response.data)
response = login('test_user', '12345678')
assert response.status_code == 200
assert 'test_user' not in str(response.data)
response = login('test_user', 'dankmemes')
assert response.status_code == 200
assert 'test_user' in str(response.data)
# We have to do this to prevent test_client issues.
logout()
def test_bad_password():
response = register('test_user', '12345678', '1111111111')
assert response.status_code == 200
assert 'Successfully created new account for test_user' in str(
response.data)
response = login('test_user', '12345678')
assert response.status_code == 200
response = test_client.get(
'/settings',
follow_redirects=True)
assert response.status_code == 200
response = test_client.post(
'/settings',
data=dict(
current_password='thisisbad',
new_password='dankmemes',
new_password_check='dankmemes',
),
follow_redirects=True)
assert response.status_code == 200
assert "Current password is invalid" in str(response.data)
logout()
def test_mismatched_passwords():
response = register('test_user', '12345678', '1111111111')
assert response.status_code == 200
assert 'Successfully created new account for test_user' in str(
response.data)
response = login('test_user', '12345678')
assert response.status_code == 200
response = test_client.get(
'/settings',
follow_redirects=True)
assert response.status_code == 200
response = test_client.post(
'/settings',
data=dict(
current_password='12345678',
new_password='dankmemes',
new_password_check='surrealmemes',
),
follow_redirects=True)
assert response.status_code == 200
assert "New passwords do not match" in str(response.data)
logout()
| 33.453488
| 67
| 0.65902
| 328
| 2,877
| 5.594512
| 0.167683
| 0.114441
| 0.163488
| 0.196185
| 0.858311
| 0.819619
| 0.810354
| 0.803815
| 0.781471
| 0.753134
| 0
| 0.067277
| 0.240528
| 2,877
| 85
| 68
| 33.847059
| 0.77254
| 0.017032
| 0
| 0.831169
| 0
| 0
| 0.194621
| 0
| 0
| 0
| 0
| 0
| 0.311688
| 1
| 0.038961
| false
| 0.194805
| 0.025974
| 0
| 0.064935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
5c6a71f245ab6aedc1d24f0a8a7426a285184f52
| 9,351
|
py
|
Python
|
haychecker/_test/dhc/freshness_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | 2
|
2019-05-22T08:24:38.000Z
|
2020-12-04T13:36:30.000Z
|
haychecker/_test/dhc/freshness_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | null | null | null |
haychecker/_test/dhc/freshness_test.py
|
fruttasecca/hay_checker
|
2bbf4e8e90e0abc590dd74080fb6e4f445056354
|
[
"MIT"
] | 3
|
2018-09-15T13:40:40.000Z
|
2021-06-29T23:31:18.000Z
|
import datetime
import unittest
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, to_timestamp, to_date, current_timestamp, lit
from pyspark.sql.types import StringType, StructField, StructType, IntegerType, FloatType
from haychecker.dhc.metrics import freshness
replace_empty_with_null = udf(lambda x: None if x == "" else x, StringType())
replace_0_with_null = udf(lambda x: None if x == 0 else x, IntegerType())
replace_0dot_with_null = udf(lambda x: None if x == 0. else x, FloatType())
replace_every_string_with_null = udf(lambda x: None, StringType())
replace_every_int_with_null = udf(lambda x: None, IntegerType())
replace_every_float_with_null = udf(lambda x: None, FloatType())
class TestFreshness(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFreshness, self).__init__(*args, **kwargs)
self.spark = SparkSession.builder.master("local[2]").appName("freshness_test").getOrCreate()
self.spark.sparkContext.setLogLevel("ERROR")
def test_empty(self):
data = pd.DataFrame()
data["c1"] = []
data["c2"] = []
schema = [StructField("c1", StringType(), True), StructField("c2", StringType(), True)]
df = self.spark.createDataFrame(data, StructType(schema))
r1, r2 = freshness(["c1", "c2"], dateFormat="dd:MM:yyyy", df=df)
self.assertEqual("None days", r1)
self.assertEqual("None days", r2)
r1, r2 = freshness(["c1", "c2"], timeFormat="dd:MM:yyyy", df=df)
self.assertEqual("None seconds", r1)
self.assertEqual("None seconds", r2)
def test_allnull(self):
data = pd.DataFrame()
data["c1"] = [chr(i) for i in range(100)]
data["c2"] = [chr(i + 1) for i in range(100)]
schema = [StructField("c1", StringType(), True), StructField("c2", StringType(), True)]
df = self.spark.createDataFrame(data, StructType(schema))
df = df.withColumn("c1", replace_every_string_with_null(df["c1"]))
df = df.withColumn("c2", replace_every_string_with_null(df["c2"]))
r1, r2 = freshness(["c1", "c2"], dateFormat="dd:MM:yyyy", df=df)
self.assertEqual("None days", r1)
self.assertEqual("None days", r2)
r1, r2 = freshness(["c1", "c2"], timeFormat="ss:mm:HH", df=df)
self.assertEqual("None seconds", r1)
self.assertEqual("None seconds", r2)
def test_dateformat(self):
format = "yyyy-MM-dd HH:mm:ss"
now = str(datetime.datetime.now())[:19]
# test wrong type of column
data = pd.DataFrame()
dates = [i for i in range(100)]
data["c1"] = dates
df = self.spark.createDataFrame(data)
with self.assertRaises(SystemExit) as cm:
r1 = freshness(["c1"], dateFormat=format, df=df)
# test correct type
data = pd.DataFrame()
dates = [now for _ in range(100)]
data["c1"] = dates
df = self.spark.createDataFrame(data)
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_date(df["c1"], format))
r1, r2, r3 = freshness(["c1", "c2", "c3"], dateFormat=format, df=df)
self.assertEqual(r1, "0.0 days")
self.assertEqual(r2, "0.0 days")
self.assertEqual(r3, "0.0 days")
data = pd.DataFrame()
dates = [now for _ in range(100)]
for i in range(20):
dates[i] = ""
data["c1"] = dates
df = self.spark.createDataFrame(data)
df = df.withColumn("c1", replace_empty_with_null(df["c1"]))
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_date(df["c1"], format))
r1, r2, r3 = freshness(["c1", "c2", "c3"], dateFormat=format, df=df)
self.assertEqual(r1, "0.0 days")
self.assertEqual(r2, "0.0 days")
self.assertEqual(r3, "0.0 days")
def test_timeformat_nodate(self):
format = "HH:mm:ss"
now = str(datetime.datetime.now())[11:19]
# test wrong type of column
data = pd.DataFrame()
times = [i for i in range(100)]
data["c1"] = times
df = self.spark.createDataFrame(data)
with self.assertRaises(SystemExit) as cm:
r1 = freshness(["c1"], timeFormat=format, df=df)
# test correct type
data = pd.DataFrame()
times = [now for _ in range(100)]
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_timestamp(df["c1"], format))
r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df)
r1 = float(r1.split(" ")[0])
r2 = float(r2.split(" ")[0])
r3 = float(r3.split(" ")[0])
self.assertLessEqual(r1, 10.0)
self.assertLessEqual(r2, 10.0)
self.assertLessEqual(r3, 10.0)
data = pd.DataFrame()
times = [now for _ in range(100)]
for i in range(20):
times[i] = ""
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c1", replace_empty_with_null(df["c1"]))
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_timestamp(df["c1"], format))
r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df)
r1 = float(r1.split(" ")[0])
r2 = float(r2.split(" ")[0])
r3 = float(r3.split(" ")[0])
self.assertLessEqual(r1, 10.0)
self.assertLessEqual(r2, 10.0)
self.assertLessEqual(r3, 10.0)
def test_timeformat_nodate_dateincolumns(self):
format = "HH:mm:ss"
now = str(datetime.datetime.now())[11:19]
# test wrong type of column
data = pd.DataFrame()
times = [i for i in range(100)]
data["c1"] = times
df = self.spark.createDataFrame(data)
with self.assertRaises(SystemExit) as cm:
r1 = freshness(["c1"], timeFormat=format, df=df)
# test correct type
data = pd.DataFrame()
times = [now for _ in range(100)]
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c2", current_timestamp())
df = df.withColumn("c3", current_timestamp())
r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df)
r1 = float(r1.split(" ")[0])
r2 = float(r2.split(" ")[0])
r3 = float(r3.split(" ")[0])
self.assertLessEqual(r1, 10.0)
self.assertLessEqual(r2, 10.0)
self.assertLessEqual(r3, 10.0)
data = pd.DataFrame()
times = [now for _ in range(100)]
for i in range(20):
times[i] = ""
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c1", replace_empty_with_null(df["c1"]))
df = df.withColumn("c2", current_timestamp())
df = df.withColumn("c3", current_timestamp())
r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df)
r1 = float(r1.split(" ")[0])
r2 = float(r2.split(" ")[0])
r3 = float(r3.split(" ")[0])
self.assertLessEqual(r1, 10.0)
self.assertLessEqual(r2, 10.0)
self.assertLessEqual(r3, 10.0)
def test_timeformat_withdate(self):
format = "yyyy-MM-dd HH:mm:ss"
time = str(datetime.datetime.now())[11:19]
time = "1970-01-01 " + time
# test wrong type of column
data = pd.DataFrame()
times = [i for i in range(100)]
data["c1"] = times
df = self.spark.createDataFrame(data)
with self.assertRaises(SystemExit) as cm:
r1 = freshness(["c1"], timeFormat=format, df=df)
# test correct type
data = pd.DataFrame()
times = [time for _ in range(100)]
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_timestamp(df["c1"], format))
df = df.withColumn("c4", current_timestamp().cast("long") - to_timestamp(lit(time), format).cast("long"))
# seconds from 1970 plus 10 seconds for computation time
seconds = df.collect()[0][3] + 10
r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df)
r1 = float(r1.split(" ")[0])
r2 = float(r2.split(" ")[0])
r3 = float(r3.split(" ")[0])
self.assertLessEqual(r1, seconds)
self.assertLessEqual(r2, seconds)
self.assertLessEqual(r3, seconds)
data = pd.DataFrame()
times = [time for _ in range(100)]
for i in range(20):
times[i] = ""
data["c1"] = times
df = self.spark.createDataFrame(data)
df = df.withColumn("c1", replace_empty_with_null(df["c1"]))
df = df.withColumn("c2", to_timestamp(df["c1"], format))
df = df.withColumn("c3", to_timestamp(df["c1"], format))
r1, r2, r3 = freshness(["c1", "c2", "c3"], timeFormat=format, df=df)
r1 = float(r1.split(" ")[0])
r2 = float(r2.split(" ")[0])
r3 = float(r3.split(" ")[0])
self.assertLessEqual(r1, seconds)
self.assertLessEqual(r2, seconds)
self.assertLessEqual(r3, seconds)
| 39.125523
| 113
| 0.579082
| 1,213
| 9,351
| 4.390767
| 0.10305
| 0.02929
| 0.060458
| 0.068344
| 0.839655
| 0.830642
| 0.795344
| 0.791776
| 0.763049
| 0.737326
| 0
| 0.051123
| 0.257406
| 9,351
| 238
| 114
| 39.289916
| 0.71587
| 0.024596
| 0
| 0.792746
| 0
| 0
| 0.052903
| 0
| 0
| 0
| 0
| 0
| 0.186529
| 1
| 0.036269
| false
| 0
| 0.036269
| 0
| 0.07772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a66f9996190416d43775aa4d2a5758da6dfa2f9
| 5,044
|
py
|
Python
|
tests/eve_esi_jobs/job_queue_test.py
|
DonalChilde/eve-esi
|
8050e988a5460aa3dc97e573880fcda7243026da
|
[
"MIT"
] | null | null | null |
tests/eve_esi_jobs/job_queue_test.py
|
DonalChilde/eve-esi
|
8050e988a5460aa3dc97e573880fcda7243026da
|
[
"MIT"
] | null | null | null |
tests/eve_esi_jobs/job_queue_test.py
|
DonalChilde/eve-esi
|
8050e988a5460aa3dc97e573880fcda7243026da
|
[
"MIT"
] | null | null | null |
import asyncio
from logging import Logger
from pathlib import Path
from typing import Dict
import pytest
from aiohttp import ClientSession
from rich import print
from tests.eve_esi_jobs.conftest import FileResource
from eve_esi_jobs.examples import jobs as example_jobs
from eve_esi_jobs.models import EsiJob
from eve_esi_jobs.operation_manifest import OperationManifest
# @pytest.mark.asyncio
# async def test_queue_worker(
# jobs: Dict[str, FileResource],
# operation_manifest: OperationManifest,
# test_app_dir: Path,
# logger: Logger,
# ):
# job = EsiJob.deserialize_yaml(jobs["get_industry_facilities.yaml"].data)
# output_path = test_app_dir / Path("test_queue_worker")
# job.update_attributes({"ewo_output_path": str(output_path)})
# JobPreprocessor().pre_process_job(job)
# async with ClientSession() as session:
# limiter = RateLimiter(1)
# remote_source = EsiRemoteSource(
# session=session, limiter=limiter, operation_manifest=operation_manifest
# )
# worker = JobQueueWorker(None, remote_source)
# await worker.do_job(job)
# assert job.result.response.status == 200
# assert len(job.result.data) > 10
# files = list(output_path.glob("**/*.json"))
# assert len(files) == 2
# for file in files:
# assert file.stat().st_size > 5
# @pytest.mark.asyncio
# async def test_job_runner(
# jobs: Dict[str, FileResource],
# operation_manifest: OperationManifest,
# test_app_dir: Path,
# ):
# job = EsiJob.deserialize_yaml(jobs["get_industry_facilities.yaml"].data)
# output_path = test_app_dir / Path("test_job_runner")
# job.update_attributes({"ewo_output_path": str(output_path)})
# async with ClientSession() as session:
# limiter = RateLimiter(1)
# remote_source = EsiRemoteSource(
# session=session, limiter=limiter, operation_manifest=operation_manifest
# )
# await job_runner(job, None, remote_source)
# assert job.result.response.status == 200
# assert len(job.result.data) > 10
# files = list(output_path.glob("**/*.json"))
# assert len(files) == 2
# for file in files:
# assert file.stat().st_size > 5
# @pytest.mark.asyncio
# async def test_queue_runner(
# jobs: Dict[str, FileResource],
# operation_manifest: OperationManifest,
# test_app_dir: Path,
# logger: Logger,
# ):
# logger.info("in the test")
# job_1 = EsiJob.deserialize_yaml(jobs["get_industry_facilities.yaml"].data)
# job_2 = EsiJob.deserialize_json(jobs["get_industry_systems.json"].data)
# test_jobs = [job_1, job_2]
# output_path = test_app_dir / Path("test_job_runner")
# job_preprocessor = JobPreprocessor()
# for job in test_jobs:
# job.update_attributes({"ewo_output_path": str(output_path)})
# job_preprocessor.pre_process_job(job)
# await queue_runner(jobs=test_jobs, operation_manifest=operation_manifest)
# assert job_1.result.response.status == 200
# assert len(job_1.result.data) > 10
# assert job_2.result.response.status == 200
# assert len(job_2.result.data) > 10
# files = list(output_path.glob("**/*.json"))
# assert len(files) == 4
# for file in files:
# assert file.stat().st_size > 5
# def test_do_queue_runner(
# jobs: Dict[str, FileResource],
# operation_manifest: OperationManifest,
# test_app_dir: Path,
# logger: Logger,
# ):
# job_1 = EsiJob.deserialize_yaml(jobs["get_industry_facilities.yaml"].data)
# job_2 = EsiJob.deserialize_json(jobs["get_industry_systems.json"].data)
# test_jobs = [job_1, job_2]
# output_path = test_app_dir / Path("test_job_runner")
# job_preprocessor = JobPreprocessor()
# for job in test_jobs:
# job.update_attributes({"ewo_output_path": str(output_path)})
# job_preprocessor.pre_process_job(job)
# do_queue_runner(
# jobs=test_jobs,
# operation_manifest=operation_manifest,
# observers=[LoggingObserver()],
# )
# assert job_1.result.response.status == 200
# assert job_2.result.response.status == 200
# files = list(output_path.glob("**/*.json"))
# assert len(files) == 4
# for file in files:
# assert file.stat().st_size > 5
# def test_paged_request(
# operation_manifest: OperationManifest,
# test_app_dir: Path,
# logger: Logger,
# ):
# job = example_jobs.get_contracts_public_region_id(10000002)
# output_path = test_app_dir / Path("test_paged_request")
# job_preprocessor = JobPreprocessor()
# job.update_attributes({"ewo_output_path": str(output_path)})
# job_preprocessor.pre_process_job(job)
# do_queue_runner(
# jobs=[job],
# operation_manifest=operation_manifest,
# observers=[LoggingObserver()],
# )
# assert job.result.response.status == 200
# files = list(output_path.glob("**/*.json"))
# assert len(files) == 1
# for file in files:
# assert file.stat().st_size > 5
| 36.28777
| 85
| 0.678033
| 631
| 5,044
| 5.152139
| 0.153724
| 0.06152
| 0.03076
| 0.043064
| 0.816672
| 0.816672
| 0.816672
| 0.780683
| 0.728699
| 0.671486
| 0
| 0.015617
| 0.200238
| 5,044
| 138
| 86
| 36.550725
| 0.790283
| 0.878271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
7aab4cc8f78b88a91f8417a9fd80f487ce0b23e3
| 46
|
py
|
Python
|
yandex_maps/__init__.py
|
begyy/Yandexmaps
|
b319ac4d4a06d2cdd9b40ddaa44eff69f56610be
|
[
"MIT"
] | 8
|
2019-05-18T14:28:05.000Z
|
2022-01-28T11:19:51.000Z
|
yandex_maps/__init__.py
|
begyy/Yandexmaps
|
b319ac4d4a06d2cdd9b40ddaa44eff69f56610be
|
[
"MIT"
] | null | null | null |
yandex_maps/__init__.py
|
begyy/Yandexmaps
|
b319ac4d4a06d2cdd9b40ddaa44eff69f56610be
|
[
"MIT"
] | null | null | null |
from yandex_maps.yandex_maps import Yandexmaps
| 46
| 46
| 0.913043
| 7
| 46
| 5.714286
| 0.714286
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8f97febc03a33f4156ae3c8df9c64e70c2b6b4e7
| 16,973
|
py
|
Python
|
src/formidable/migrations/0024_historicalapplication_historicalfield_historicalresponse_historicalsection.py
|
danpercic86/formidable-backend
|
9390b094588110d224eb633ecb337497a16be59f
|
[
"Apache-2.0"
] | 2
|
2021-01-25T23:40:39.000Z
|
2021-05-24T00:16:27.000Z
|
src/formidable/migrations/0024_historicalapplication_historicalfield_historicalresponse_historicalsection.py
|
danpercic86/formidable-backend
|
9390b094588110d224eb633ecb337497a16be59f
|
[
"Apache-2.0"
] | 59
|
2021-03-01T03:49:56.000Z
|
2022-03-15T07:30:26.000Z
|
src/formidable/migrations/0024_historicalapplication_historicalfield_historicalresponse_historicalsection.py
|
danpercic86/formidable-backend
|
9390b094588110d224eb633ecb337497a16be59f
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-09-04 00:19
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("formidable", "0023_auto_20210904_0116"),
]
operations = [
migrations.CreateModel(
name="HistoricalSection",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
("order_index", models.PositiveIntegerField(default=0)),
(
"name",
models.CharField(
blank=True, default="", max_length=200, verbose_name="name"
),
),
(
"description",
ckeditor.fields.RichTextField(
blank=True,
default="",
max_length=1000,
verbose_name="description",
),
),
(
"button_text",
models.CharField(
default="Submit",
max_length=50,
verbose_name="submit button text",
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"form",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
related_query_name="section",
to="formidable.form",
verbose_name="form",
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical section",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name="HistoricalResponse",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"status",
model_utils.fields.StatusField(
choices=[("new", "new"), ("err", "has errors"), ("ok", "ok")],
default="new",
max_length=100,
no_check_for_status=True,
verbose_name="status",
),
),
(
"status_changed",
model_utils.fields.MonitorField(
default=django.utils.timezone.now,
monitor="status",
verbose_name="status changed",
),
),
("value", models.CharField(max_length=500, verbose_name="value")),
(
"errors",
models.CharField(
blank=True, default="", max_length=500, verbose_name="errors"
),
),
(
"observations",
models.CharField(
blank=True,
default="",
max_length=500,
verbose_name="observations",
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"application",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
related_query_name="response",
to="formidable.application",
verbose_name="application",
),
),
(
"field",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
related_query_name="response",
to="formidable.field",
verbose_name="field",
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical response",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name="HistoricalField",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
("name", models.CharField(max_length=100, verbose_name="name")),
(
"type",
models.CharField(
choices=[
("text", "text"),
("email", "email"),
("url", "url"),
("file", "file"),
("integer", "integer"),
("decimal", "decimal"),
("tel", "phone number"),
("date", "date"),
("time", "time"),
("datetime", "datetime"),
("radio", "radio"),
("checkbox", "checkbox"),
("select", "select"),
],
default="text",
max_length=50,
verbose_name="type",
),
),
(
"placeholder",
models.CharField(
blank=True,
default="",
max_length=200,
verbose_name="placeholder",
),
),
(
"is_required",
models.BooleanField(default=False, verbose_name="is required"),
),
(
"dependent_value",
models.CharField(
blank=True,
default="",
max_length=200,
verbose_name="with value",
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"dependent_field",
models.ForeignKey(
blank=True,
db_constraint=False,
limit_choices_to={"type__in": ["select", "checkbox", "radio"]},
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
related_query_name="dependent",
to="formidable.field",
verbose_name="depends on",
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"section",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
related_query_name="field",
to="formidable.section",
verbose_name="section",
),
),
],
options={
"verbose_name": "historical field",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name="HistoricalApplication",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"status",
model_utils.fields.StatusField(
choices=[("new", "new"), ("err", "has errors"), ("ok", "ok")],
default="new",
max_length=100,
no_check_for_status=True,
verbose_name="status",
),
),
(
"status_changed",
model_utils.fields.MonitorField(
default=django.utils.timezone.now,
monitor="status",
verbose_name="status changed",
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"applicant",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
related_query_name="application",
to=settings.AUTH_USER_MODEL,
verbose_name="applicant",
),
),
(
"form",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
related_query_name="application",
to="formidable.form",
verbose_name="form",
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical application",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
| 38.31377
| 87
| 0.368291
| 1,035
| 16,973
| 5.828019
| 0.142995
| 0.069297
| 0.034483
| 0.043767
| 0.793269
| 0.762765
| 0.762765
| 0.762765
| 0.754145
| 0.754145
| 0
| 0.010482
| 0.533494
| 16,973
| 442
| 88
| 38.400452
| 0.751326
| 0.002651
| 0
| 0.720183
| 1
| 0
| 0.103923
| 0.008862
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016055
| 0
| 0.022936
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8f2c0f27d1535a634b425c7dd12cbf87472c5cae
| 15,768
|
py
|
Python
|
audio_atari/audio/contrastive_loss.py
|
sahiljain11/ICML2019-TREX
|
82694f2dfe6e3bb0668948ffc531fcde20cdf45b
|
[
"MIT"
] | null | null | null |
audio_atari/audio/contrastive_loss.py
|
sahiljain11/ICML2019-TREX
|
82694f2dfe6e3bb0668948ffc531fcde20cdf45b
|
[
"MIT"
] | null | null | null |
audio_atari/audio/contrastive_loss.py
|
sahiljain11/ICML2019-TREX
|
82694f2dfe6e3bb0668948ffc531fcde20cdf45b
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
def __init__(self, batch_size, temperature=0.5):
super().__init__()
self.batch_size = batch_size
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.register_buffer("temperature", torch.tensor(temperature).to(device=device))
# self.register_buffer("negatives_mask", (~torch.eye(batch_size * 2, batch_size * 2, dtype=bool)).float())
# self.register_buffer("negatives_mask", (~torch.eye(batch_size * 2, batch_size * 2, dtype=int)).float().to(device=device))
self.temperature = torch.tensor(temperature).to(device=device)
self.negatives_mask = 1-torch.eye(batch_size * 2,batch_size * 2).float().to(device=device)
def forward(self, emb_i, emb_j):
"""
emb_i and emb_j are batches of embeddings, where corresponding indices are pairs
z_i, z_j as per SimCLR paper
https://zablo.net/blog/post/understanding-implementing-simclr-guide-eli5-pytorch/
"""
# z_i = F.normalize(emb_i, dim=1)
# z_j = F.normalize(emb_j, dim=1)
# print(emb_i, z_i)
representations = torch.cat([emb_i, emb_j], dim=0)
# print(representations.shape) # torch.Size([64, 1]) for batch size 32
# compute similarity based on difference of rewards
r1 = representations.unsqueeze(0).repeat(self.batch_size*2,1,1)
r2 = representations.unsqueeze(1).repeat(1,self.batch_size*2,1)
sim = torch.abs(r1-r2)
# change L2 distance to similarity by subtracting by 1? use 1/(1+d(p1,p2))
similarity_matrix = 1/(1+sim)
similarity_matrix = similarity_matrix.squeeze()
# print('similarity matrix: ',similarity_matrix)
# print(similarity_matrix.is_cuda)
# print(similarity_matrix.shape) #torch.Size([64, 64])
sim_ij = torch.diag(similarity_matrix, self.batch_size)
sim_ji = torch.diag(similarity_matrix, -self.batch_size)
positives = torch.cat([sim_ij, sim_ji], dim=0)
nominator = torch.exp(positives / self.temperature)
# print('numerator:',nominator.shape)
denominator = self.negatives_mask * torch.exp(similarity_matrix / self.temperature)
# print('negatives mask:', self.negatives_mask)
# print('denominator sum:',torch.sum(denominator, dim=1).shape)
loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
# print('loss partial: ',loss_partial)
# print('loss_partial: ',loss_partial.shape)
loss = torch.sum(loss_partial) / (2 * self.batch_size)
print('CAL loss:',loss)
return loss
class ContrastiveSingleLoss(nn.Module):
# only uses the loss for the first sample compared with all other samples
def __init__(self, batch_size, temperature=0.5):
super().__init__()
self.batch_size = batch_size
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.temperature = torch.tensor(temperature).to(device=device)
self.negatives_mask = 1-torch.eye(batch_size * 2,batch_size * 2).float().to(device=device)
def forward(self, emb_i, emb_j):
"""
emb_i and emb_j are batches of embeddings, where corresponding indices are pairs
z_i, z_j as per SimCLR paper
https://zablo.net/blog/post/understanding-implementing-simclr-guide-eli5-pytorch/
"""
representations = torch.cat([emb_i, emb_j], dim=0) # torch.Size([64, 1]) for batch size 32
# compute similarity based on difference of rewards
r1 = representations.unsqueeze(0).repeat(self.batch_size*2,1,1)
r2 = representations.unsqueeze(1).repeat(1,self.batch_size*2,1)
sim = torch.abs(r1-r2)
# change L2 distance to similarity by subtracting by 1? use 1/(1+d(p1,p2))
similarity_matrix = 1/(1+sim)
similarity_matrix = similarity_matrix.squeeze() #torch.Size([64, 64])
sim_ij = torch.diag(similarity_matrix, self.batch_size)
sim_ji = torch.diag(similarity_matrix, -self.batch_size)
positives = torch.cat([sim_ij, sim_ji], dim=0)
nominator = torch.exp(positives / self.temperature)
denominator = self.negatives_mask * torch.exp(similarity_matrix / self.temperature)
loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
# only taking loss of the first element against all other elements in the batch (Yes v/s No's or No v/s Yes's)
loss = (loss_partial[0]+loss_partial[self.batch_size]) / (2)# * self.batch_size)
print('CAL Single loss:',loss)
return loss
class ContrastiveSingleProsodyLoss(nn.Module):
def __init__(self, batch_size, temperature=0.5):
super().__init__()
self.batch_size = batch_size
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.temperature = torch.tensor(temperature).to(device=device)
self.negatives_mask = 1-torch.eye(batch_size * 2,batch_size * 2).float().to(device=device)
def forward(self, emb_i, emb_j, prosody_i, prosody_j):
"""
emb_i and emb_j are batches of embeddings, where corresponding indices are pairs
z_i, z_j as per SimCLR paper
https://zablo.net/blog/post/understanding-implementing-simclr-guide-eli5-pytorch/
"""
# print(emb_i.shape, prosody_i.shape)
representations = torch.cat([emb_i, emb_j], dim=0) # torch.Size([64, 1]) for batch size 32
eps = 0.1
# prosody_diff = abs(prosody_i-prosody_j)
# print('prosody_diff: ',prosody_diff.shape) #torch.Size([16])
# compute similarity based on difference of rewards
r1 = representations.unsqueeze(0).repeat(self.batch_size*2,1,1)
r2 = representations.unsqueeze(1).repeat(1,self.batch_size*2,1)
# print(r1.shape,r2.shape) #torch.Size([32, 32, 1]) torch.Size([32, 32, 1])
sim = torch.abs(r1-r2)
# change L2 distance to similarity by subtracting by 1? use 1/(1+d(p1,p2))
similarity_matrix = 1/(1+sim)
similarity_matrix = similarity_matrix.squeeze()
# print(similarity_matrix)
sim_ij = torch.diag(similarity_matrix, self.batch_size)
sim_ji = torch.diag(similarity_matrix, -self.batch_size)
positives = torch.cat([sim_ij, sim_ji], dim=0)
# set temperature as the difference between overall energy of utterances.
# Ensure temp between 0 and 1 (softmax all energy differences in this batch).
# Are rewards scaled? No. And here we consider returns which could be arbitrarily long.
# a higher temperature is scaling down the similairity of rewards and lower temperature is scaling up the similarity of rewards
# prosody for the similarity matrix and prosody for the positives should be computed similarly to
# similarity matrix and positive computation respectively
softmax = nn.Softmax(dim=1) #check dimension correct based on prosody_diff.shape?
# prosody_diff_softmax = softmax(prosody_diff) + eps
prosody_temp = torch.cat([prosody_i, prosody_j], dim=0)
p1 = prosody_temp.unsqueeze(0).repeat(self.batch_size*2,1,1)
p2 = prosody_temp.unsqueeze(1).repeat(1,self.batch_size*2,1)
prosody_diff = torch.abs(p1-p2)
prosody_matrix = softmax(prosody_diff.squeeze()) +eps
p_ij = torch.diag(prosody_matrix, self.batch_size)
p_ji = torch.diag(prosody_matrix, -self.batch_size)
prosody_positives = torch.cat([p_ij, p_ji], dim=0)
# prosody = torch.cat([prosody_diff_softmax, prosody_diff_softmax], dim=0) +eps
# print(prosody.shape, similarity_matrix.shape)
# nominator = torch.exp(positives / self.temperature)
nominator = torch.exp(torch.div(positives, prosody_positives))
# denominator = self.negatives_mask * torch.exp(similarity_matrix / self.temperature)
# check shape of similarity matrix and prosody tensors
denominator = self.negatives_mask * torch.exp(torch.div(similarity_matrix,prosody_matrix))
# print(prosody)
loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
loss = (loss_partial[0]+loss_partial[self.batch_size]) / (2)# * self.batch_size)
print('CAL Single Prosody loss:',loss)
return loss
class ContrastivePASELoss(nn.Module):
def __init__(self, batch_size, temperature=0.5):
super().__init__()
self.batch_size = batch_size
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.temperature = torch.tensor(temperature).to(device=device)
self.negatives_mask = 1-torch.eye(batch_size * 2,batch_size * 2).float().to(device=device)
def forward(self, emb_i, emb_j, pase_i, pase_j):
"""
emb_i and emb_j are batches of embeddings, where corresponding indices are pairs
z_i, z_j as per SimCLR paper
https://zablo.net/blog/post/understanding-implementing-simclr-guide-eli5-pytorch/
"""
representations = torch.cat([emb_i, emb_j], dim=0) # torch.Size([64, 1]) for batch size 32
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
# print('in CAL: ',pase_i.shape,pase_j.shape)
pase_cosine = cos(pase_i, pase_j) # TODO: check shape is same as batch size
#min_pase_cos, max_pase_cos = torch.min(pase_cosine), torch.max(pase_cosine)
min_cos, max_cos = -1,1
pase_cosine_scaled = (pase_cosine-min_cos)/(max_cos-min_cos)
pase_cosine_scaled_rep = torch.cat([pase_cosine_scaled, pase_cosine_scaled], dim=0)
# compute similarity based on difference of rewards
r1 = representations.unsqueeze(0).repeat(self.batch_size*2,1,1)
r2 = representations.unsqueeze(1).repeat(1,self.batch_size*2,1)
sim = torch.abs(r1-r2)
# change L2 distance to similarity by subtracting by 1? use 1/(1+d(p1,p2))
similarity_matrix = 1/(1+sim)
similarity_matrix = similarity_matrix.squeeze()
sim_ij = torch.diag(similarity_matrix, self.batch_size)
sim_ji = torch.diag(similarity_matrix, -self.batch_size)
positives = torch.cat([sim_ij, sim_ji], dim=0)
nominator = torch.exp(positives / self.temperature)
denominator = self.negatives_mask * torch.exp(similarity_matrix / self.temperature)
loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
# a weighted version of similarity between utterances is used to scale each term in the numerator
# based on how close the pair actually is (from cosine similarity of their audio embeddings)
# construct a dummy example
# scale the loss by the cosine similarity (converted between 0,1 from -1,1) of the audio embeddings
loss = torch.sum(pase_cosine_scaled_rep * loss_partial) / (2 * self.batch_size)
print('CAL PASE loss:',loss)
return loss
class ContrastivePASEProsodyLoss(nn.Module):
def __init__(self, batch_size, temperature=0.5):
super().__init__()
self.batch_size = batch_size
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.temperature = torch.tensor(temperature).to(device=device)
self.negatives_mask = 1-torch.eye(batch_size * 2,batch_size * 2).float().to(device=device)
def forward(self, emb_i, emb_j, pase_i, pase_j, prosody_i, prosody_j):
"""
emb_i and emb_j are batches of embeddings, where corresponding indices are pairs
z_i, z_j as per SimCLR paper
https://zablo.net/blog/post/understanding-implementing-simclr-guide-eli5-pytorch/
"""
# TODO: Should rewards predicted by TREX be scaled for better applicability of CAL?
representations = torch.cat([emb_i, emb_j], dim=0) # torch.Size([64, 1])
eps = 0.1
prosody_diff = abs(prosody_i-prosody_j)
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
pase_cosine = cos(pase_i, pase_j) # TODO: check shape is same as batch size
min_cos, max_cos = -1,1
pase_cosine_scaled = (pase_cosine-min_cos)/(max_cos-min_cos)
pase_cosine_scaled_rep = torch.cat([pase_cosine_scaled, pase_cosine_scaled], dim=0)
# compute similarity based on difference of rewards
r1 = representations.unsqueeze(0).repeat(self.batch_size*2,1,1)
r2 = representations.unsqueeze(1).repeat(1,self.batch_size*2,1)
sim = torch.abs(r1-r2)
# change L2 distance to similarity by subtracting by 1? use 1/(1+d(p1,p2))
similarity_matrix = 1/(1+sim)
similarity_matrix = similarity_matrix.squeeze()
sim_ij = torch.diag(similarity_matrix, self.batch_size)
sim_ji = torch.diag(similarity_matrix, -self.batch_size)
positives = torch.cat([sim_ij, sim_ji], dim=0)
# set temperature as the difference between overall energy of utterances.
# Ensure temp between 0 and 1 (softmax all energy differences in this batch).
# Are rewards scaled? No. And here we consider returns which could be arbitrarily long.
# a higher temperature is scaling down the similairity of rewards and lower temperature is scaling up the similarity of rewards
softmax = nn.Softmax(dim=1) #check dimension correct based on prosody_diff.shape?
# prosody_diff_softmax = softmax(prosody_diff) + eps
prosody_temp = torch.cat([prosody_i, prosody_j], dim=0)
p1 = prosody_temp.unsqueeze(0).repeat(self.batch_size*2,1,1)
p2 = prosody_temp.unsqueeze(1).repeat(1,self.batch_size*2,1)
prosody_diff = torch.abs(p1-p2)
prosody_matrix = softmax(prosody_diff.squeeze()) +eps
p_ij = torch.diag(prosody_matrix, self.batch_size)
p_ji = torch.diag(prosody_matrix, -self.batch_size)
prosody_positives = torch.cat([p_ij, p_ji], dim=0)
# prosody = torch.cat([prosody_diff_softmax, prosody_diff_softmax], dim=0) +eps
# print(prosody.shape, similarity_matrix.shape)
# nominator = torch.exp(positives / self.temperature)
nominator = torch.exp(torch.div(positives, prosody_positives))
# denominator = self.negatives_mask * torch.exp(similarity_matrix / self.temperature)
# check shape of similarity matrix and prosody tensors
denominator = self.negatives_mask * torch.exp(torch.div(similarity_matrix,prosody_matrix))
# nominator = torch.exp(positives / self.temperature)
# nominator = torch.exp(torch.div(positives, prosody))
# denominator = self.negatives_mask * torch.exp(similarity_matrix / self.temperature)
# check shape of similarity matrix and prosody tensors and along which dimension division makes most sense
# denominator = self.negatives_mask * torch.exp(torch.div(similarity_matrix,prosody))
loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
# a weighted version of similarity between utterances is used to scale each term in the numerator
# based on how close the pair actually is (from cosine similarity of their audio embeddings)
# construct a dummy example
# scale the loss by the cosine similarity (converted between 0,1 from -1,1) of the audio embeddings
loss = torch.sum(pase_cosine_scaled_rep * loss_partial) / (2 * self.batch_size)
print('CAL PASE Prosody loss:',loss)
return loss
| 49.429467
| 135
| 0.666286
| 2,168
| 15,768
| 4.675738
| 0.099631
| 0.062149
| 0.057709
| 0.022097
| 0.899379
| 0.876887
| 0.8759
| 0.8759
| 0.861892
| 0.858439
| 0
| 0.021661
| 0.227042
| 15,768
| 319
| 136
| 49.429467
| 0.810059
| 0.381215
| 0
| 0.879195
| 0
| 0
| 0.012662
| 0
| 0
| 0
| 0
| 0.00627
| 0
| 1
| 0.067114
| false
| 0
| 0.020134
| 0
| 0.154362
| 0.033557
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
56b2bec91f799ee8c5f3363afee3f1f74bf2420c
| 14
|
py
|
Python
|
tests/syntax/triple_equal.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 287
|
2019-04-08T13:18:29.000Z
|
2021-03-14T19:10:21.000Z
|
tests/syntax/triple_equal.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 191
|
2019-04-08T14:39:18.000Z
|
2021-03-14T22:14:56.000Z
|
tests/syntax/triple_equal.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 9
|
2019-04-08T12:54:08.000Z
|
2020-11-20T02:26:27.000Z
|
x = y === z
| 3.5
| 11
| 0.214286
| 3
| 14
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 14
| 3
| 12
| 4.666667
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
56ca4f617fc6667c384db8668b8b8555b5057693
| 13,788
|
py
|
Python
|
unittest/scripts/auto/py_shell/scripts/util_import_table_multifile_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 119
|
2016-04-14T14:16:22.000Z
|
2022-03-08T20:24:38.000Z
|
unittest/scripts/auto/py_shell/scripts/util_import_table_multifile_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 9
|
2017-04-26T20:48:42.000Z
|
2021-09-07T01:52:44.000Z
|
unittest/scripts/auto/py_shell/scripts/util_import_table_multifile_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 51
|
2016-07-20T05:06:48.000Z
|
2022-03-09T01:20:53.000Z
|
# This is unit test file for WL13362 Support for importTable from multiple compressed and uncompressed files
import os
target_port = __mysql_port
target_xport = __port
target_schema = 'wl13362'
uri = "mysql://" + __mysqluripwd
xuri = "mysqlx://" + __uripwd
if __os_type != "windows":
def filename_for_output(filename):
return filename
else:
def filename_for_output(filename):
long_path_prefix = r"\\?" "\\"
return long_path_prefix + filename.replace("/", "\\")
#@<> Setup test
shell.connect(uri)
session.run_sql('DROP SCHEMA IF EXISTS ' + target_schema)
session.run_sql('CREATE SCHEMA ' + target_schema)
session.run_sql('USE ' + target_schema)
session.run_sql('DROP TABLE IF EXISTS `lorem`')
session.run_sql("CREATE TABLE `lorem` (`id` int primary key, `part` text) ENGINE=InnoDB CHARSET=utf8mb4")
#@<> Set local_infile to true
session.run_sql('SET GLOBAL local_infile = true')
#@<> Retrieve directory content
chunked_dir = os.path.join(__import_data_path, "chunked")
dircontent = os.listdir(chunked_dir)
raw_files = sorted(list(filter(lambda x: x.endswith(".tsv"), dircontent)))
gz_files = sorted(list(filter(lambda x: x.endswith(".gz"), dircontent)))
zst_files = sorted(list(filter(lambda x: x.endswith(".zst"), dircontent)))
print("raw_files", raw_files)
print("gz_files", gz_files)
print("zst_files", zst_files)
#@<> Variations about empty file list
rc = testutil.call_mysqlsh([uri, '--schema=' + target_schema, '--', 'util', 'import-table', '', '--table=lorem'])
EXPECT_EQ(1, rc)
EXPECT_STDOUT_CONTAINS("File list cannot be empty.")
rc = testutil.call_mysqlsh([uri, '--schema=' + target_schema, '--', 'util', 'import-table', '', '', '--table=lorem'])
EXPECT_EQ(1, rc)
EXPECT_STDOUT_CONTAINS("File list cannot be empty.")
EXPECT_THROWS(lambda: util.import_table('', {'table': 'lorem'}), "RuntimeError: Util.import_table: File list cannot be empty.")
EXPECT_THROWS(lambda: util.import_table('', '', {'table': 'lorem'}), "ValueError: Util.import_table: Invalid number of arguments, expected 1 to 2 but got 3")
EXPECT_THROWS(lambda: util.import_table([], {'table': 'lorem'}), "RuntimeError: Util.import_table: File list cannot be empty.")
EXPECT_THROWS(lambda: util.import_table([''], {'table': 'lorem'}), "RuntimeError: Util.import_table: File list cannot be empty.")
EXPECT_THROWS(lambda: util.import_table(['', ''], {'table': 'lorem'}), "RuntimeError: Util.import_table: File list cannot be empty.")
#@<> Single file - cli
rc = testutil.call_mysqlsh([uri, '--schema=' + target_schema, '--', 'util', 'import-table', chunked_dir + os.path.sep + raw_files[0], '--table=lorem'])
EXPECT_EQ(0, rc)
EXPECT_STDOUT_CONTAINS(raw_files[0] + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Single file
util.import_table(chunked_dir + os.path.sep + raw_files[0], {'schema': target_schema, 'table': 'lorem'})
EXPECT_STDOUT_CONTAINS(raw_files[0] + ": Records: 100 Deleted: 0 Skipped: 100 Warnings: 100")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 100 Deleted: 0 Skipped: 100 Warnings: 100")
#@<> Single file - array arg
util.import_table(chunked_dir + os.path.sep + raw_files[0], {'schema': target_schema, 'table': 'lorem'})
EXPECT_STDOUT_CONTAINS(raw_files[0] + ": Records: 100 Deleted: 0 Skipped: 100 Warnings: 100")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 100 Deleted: 0 Skipped: 100 Warnings: 100")
#@<> Wildcard to single file - cli
rc = testutil.call_mysqlsh([uri, '--', 'util', 'import-table', chunked_dir + os.path.sep + 'lorem_aa*', '--schema=' + target_schema, '--table=lorem'])
EXPECT_EQ(0, rc)
EXPECT_STDOUT_CONTAINS("lorem_aaa.tsv: Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("1 file(s) (2.49 KB) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Wildcard to single file
util.import_table(chunked_dir + os.path.sep + 'lorem_aa*', {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True})
EXPECT_STDOUT_CONTAINS("lorem_aaa.tsv: Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("1 file(s) (2.49 KB) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Wildcard to single file - array arg
util.import_table([chunked_dir + os.path.sep + 'lorem_aa*'], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True})
EXPECT_STDOUT_CONTAINS("lorem_aaa.tsv: Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("1 file(s) (2.49 KB) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Expand wildcard to 0 files - cli
rc = testutil.call_mysqlsh([uri, '--', 'util', 'import-table', chunked_dir + os.path.sep + 'lorem_xx*', '--schema=' + target_schema, '--table=lorem'])
EXPECT_EQ(0, rc)
EXPECT_STDOUT_CONTAINS("0 file(s) (0 bytes) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 0 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Expand wildcard to 0 files
util.import_table(chunked_dir + os.path.sep + 'lorem_xx*', {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True})
EXPECT_STDOUT_CONTAINS("0 file(s) (0 bytes) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 0 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Expand wildcard to 0 files - array arg
util.import_table([chunked_dir + os.path.sep + 'lorem_xx*'], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True})
EXPECT_STDOUT_CONTAINS("0 file(s) (0 bytes) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 0 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Import single non-existing file
EXPECT_THROWS(lambda: util.import_table([chunked_dir + os.path.sep + 'lorem_xxx_xxx.tsv'], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True}),
"Cannot open file '"+ filename_for_output(chunked_dir + os.path.sep + 'lorem_xxx_xxx.tsv') +"': No such file or directory"
)
#@<> Import multiple non-existing files
EXPECT_THROWS(lambda: util.import_table([chunked_dir + os.path.sep + 'lorem_xxx_xxx.tsv', 'lorem_xxx_ccc.tsv'], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True}),
"File " + filename_for_output(chunked_dir + os.path.sep + 'lorem_xxx_xxx.tsv') + " does not exists."
)
EXPECT_STDOUT_CONTAINS("ERROR: File " + filename_for_output(chunked_dir + os.path.sep + 'lorem_xxx_xxx.tsv') + " does not exists.")
EXPECT_STDOUT_CONTAINS("ERROR: File " + filename_for_output(os.path.join(os.path.abspath(os.path.curdir), "lorem_xxx_ccc.tsv")) + " does not exists.")
EXPECT_STDOUT_CONTAINS("0 file(s) (0 bytes) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 0 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Wildcard files from non-existing directory
EXPECT_THROWS(lambda: util.import_table([os.path.join(chunked_dir, "nonexisting", 'lorem_*.tsv')], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True}),
"Util.import_table: Directory " + filename_for_output(os.path.join(chunked_dir, "nonexisting")) + " does not exists."
)
EXPECT_STDOUT_CONTAINS("ERROR: Directory " + filename_for_output(os.path.join(chunked_dir, "nonexisting")) + " does not exists.")
#@<> Select single file from non-existing directory
EXPECT_THROWS(lambda: util.import_table([os.path.join(chunked_dir, "nonexisting", 'lorem_abc.tsv')], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True}),
"Util.import_table: Cannot open file '" + filename_for_output(os.path.join(chunked_dir, "nonexisting", 'lorem_abc.tsv')) + "': No such file or directory"
)
#@<> Select multiple files from non-existing directory
EXPECT_THROWS(lambda: util.import_table([os.path.join(chunked_dir, "nonexisting", 'lorem_abc.tsv'), os.path.join(chunked_dir, "nonexisting", 'lorem_abq.tsv')], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True}),
"File " + filename_for_output(os.path.join(chunked_dir, "nonexisting", 'lorem_abc.tsv')) + " does not exists."
)
EXPECT_STDOUT_CONTAINS("ERROR: File " + filename_for_output(os.path.join(chunked_dir, "nonexisting", 'lorem_abc.tsv')) + " does not exists.")
EXPECT_STDOUT_CONTAINS("ERROR: File " + filename_for_output(os.path.join(chunked_dir, "nonexisting", 'lorem_abq.tsv')) + " does not exists.")
#@<> Select multiple files with single wildcard from directory
util.import_table([os.path.join(chunked_dir, 'lorem_a*.tsv')], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True})
for name in [f for f in raw_files if f.startswith("lorem_a") and f.endswith(".tsv")]:
EXPECT_STDOUT_CONTAINS(name + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("6 file(s) (14.75 KB) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 600 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Select multiple files with multiple wildcards from directory
util.import_table([os.path.join(chunked_dir, 'lorem_a*.tsv'), os.path.join(chunked_dir, 'lorem_b*.tsv.gz'), os.path.join(chunked_dir, 'lorem_c*.tsv.zst')], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True})
for name in [f for f in dircontent if (f.startswith("lorem_a") and f.endswith(".tsv")) or (f.startswith("lorem_b") and f.endswith(".tsv.gz")) or (f.startswith("lorem_c") and f.endswith(".tsv.zst"))]:
EXPECT_STDOUT_CONTAINS(name + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("17 file(s) (42.16 KB) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 1700 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Single compressed file
util.import_table([os.path.join(chunked_dir, zst_files[0])], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True})
EXPECT_STDOUT_CONTAINS(zst_files[0] + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("File '" + filename_for_output(os.path.join(chunked_dir, zst_files[0])) + "' (1.26 KB) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Multiple compressed files
util.import_table([os.path.join(chunked_dir, zst_files[0]), os.path.join(chunked_dir, zst_files[1]), os.path.join(chunked_dir, gz_files[0])], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True})
EXPECT_STDOUT_CONTAINS(zst_files[0] + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS(zst_files[1] + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS(gz_files[0] + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("3 file(s) (7.48 KB) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 300 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Mixed file list input
EXPECT_THROWS(lambda: util.import_table([os.path.join(chunked_dir, "nonexisting_a.csv"), os.path.join(chunked_dir, "lorem_a*"), os.path.join(chunked_dir, "lorem_b*"), '', os.path.join(chunked_dir, zst_files[0]), os.path.join(chunked_dir, zst_files[1]), os.path.join(chunked_dir, gz_files[0])], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True}),
"File " + filename_for_output(os.path.join(chunked_dir, "nonexisting_a.csv")) + " does not exists."
)
EXPECT_STDOUT_CONTAINS("ERROR: File " + filename_for_output(os.path.join(chunked_dir, "nonexisting_a.csv")) + " does not exists.")
EXPECT_STDOUT_CONTAINS(zst_files[0] + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS(zst_files[1] + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS(gz_files[0] + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
for name in [f for f in dircontent if f.startswith("lorem_a") or f.startswith("lorem_b")]:
EXPECT_STDOUT_CONTAINS(name + ": Records: 100 Deleted: 0 Skipped: 0 Warnings: 0")
EXPECT_STDOUT_CONTAINS("19 file(s) (47.15 KB) was imported in ")
EXPECT_STDOUT_CONTAINS("Total rows affected in " + target_schema + ".lorem: Records: 1900 Deleted: 0 Skipped: 0 Warnings: 0")
#@<> Multifile import does not support byterPerChunk option
EXPECT_THROWS(lambda: util.import_table([os.path.join(chunked_dir, "nonexisting_a.csv"), os.path.join(chunked_dir, "lorem_a*"), '', os.path.join(chunked_dir, zst_files[0]), os.path.join(chunked_dir, zst_files[1]), os.path.join(chunked_dir, gz_files[0])], {'schema': target_schema, 'table': 'lorem', 'replaceDuplicates': True, 'bytesPerChunk': '1M'}),
"Util.import_table: The 'bytesPerChunk' option cannot be used when loading from multiple files."
)
#@<> Missing target table + single path with wildcard
EXPECT_THROWS(lambda: util.import_table(os.path.join(chunked_dir, "lorem*.tsv"), {'schema': target_schema}),
"Util.import_table: Target table is not set. The target table for the import operation must be provided in the options."
)
#@<> Missing target table + multiple expanded paths
EXPECT_THROWS(lambda: util.import_table([os.path.join(chunked_dir, raw_files[0]), os.path.join(chunked_dir, raw_files[1])], {'schema': target_schema}),
"Util.import_table: Target table is not set. The target table for the import operation must be provided in the options."
)
#@<> Cleanup
session.close()
| 70.346939
| 366
| 0.723673
| 2,004
| 13,788
| 4.780439
| 0.101796
| 0.033194
| 0.106472
| 0.062109
| 0.848225
| 0.830271
| 0.813361
| 0.797808
| 0.777766
| 0.76096
| 0
| 0.023862
| 0.118581
| 13,788
| 195
| 367
| 70.707692
| 0.76442
| 0.078474
| 0
| 0.340741
| 0
| 0.022222
| 0.416739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014815
| false
| 0
| 0.355556
| 0.007407
| 0.385185
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
56edf46d536e7790e0537e964073edc04cf0426e
| 11,408
|
py
|
Python
|
core/migrations/0001_initial.py
|
Almeida92/projeto_tecweb_django
|
1eb2032abcce4a98e8cad02c37262d71b4c3982b
|
[
"Apache-2.0"
] | null | null | null |
core/migrations/0001_initial.py
|
Almeida92/projeto_tecweb_django
|
1eb2032abcce4a98e8cad02c37262d71b4c3982b
|
[
"Apache-2.0"
] | null | null | null |
core/migrations/0001_initial.py
|
Almeida92/projeto_tecweb_django
|
1eb2032abcce4a98e8cad02c37262d71b4c3982b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-29 21:14
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Aluno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ra', models.IntegerField(verbose_name='RA Aluno')),
('nome', models.CharField(max_length=120, verbose_name='Nome')),
('email', models.CharField(max_length=80, verbose_name='E-Mail')),
('celular', models.CharField(max_length=11, verbose_name='Celular')),
],
),
migrations.CreateModel(
name='ArquivoQuestao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ArquivoResposta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Aluno', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Aluno')),
],
),
migrations.CreateModel(
name='Curso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50, verbose_name='Nome')),
('carga_horaria', models.IntegerField(verbose_name='Carga Horária')),
('professor', models.CharField(max_length=50, verbose_name='Coordenador')),
('tipo', models.CharField(max_length=50)),
('img', models.CharField(blank=True, max_length=255)),
('descricao', models.TextField(blank=True, verbose_name='Descrição')),
('ativo', models.BooleanField(default=True, verbose_name='Ativo?')),
],
),
migrations.CreateModel(
name='Disciplina',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=240, verbose_name='Nome')),
('carga_horaria', models.IntegerField(verbose_name='Carga_Horaria')),
('teoria', models.DecimalField(decimal_places=2, max_digits=3, verbose_name='Teoria')),
('pratica', models.DecimalField(decimal_places=2, max_digits=3, verbose_name='Pratica')),
('ementa', models.TextField(verbose_name='Ementa')),
('competencias', models.TextField(verbose_name='Competencias')),
('habilidades', models.TextField(verbose_name='Habilidades')),
('conteudo', models.TextField(verbose_name='Conteudo')),
('bibliografia_basica', models.TextField(verbose_name='Bibliografia Basica')),
('bibliografia_complementar', models.TextField(verbose_name='Bibliografia Complementar')),
],
),
migrations.CreateModel(
name='DisciplinaOfertada',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ano', models.IntegerField(verbose_name='Ano')),
('semestre', models.IntegerField(verbose_name='Semestre')),
('disciplina', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Disciplina')),
],
),
migrations.CreateModel(
name='GradeCurricular',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ano', models.IntegerField(verbose_name='Ano')),
('semestre', models.IntegerField(verbose_name='Semestre')),
('curso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Curso')),
],
),
migrations.CreateModel(
name='Matricula',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Periodo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero', models.IntegerField(verbose_name='Numero')),
('curso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Curso')),
],
),
migrations.CreateModel(
name='PeriodoDisciplina',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('disciplina', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Disciplina')),
('periodo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Periodo')),
],
),
migrations.CreateModel(
name='Professor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ra', models.IntegerField(verbose_name='RA')),
('apelido', models.CharField(max_length=30, unique=True, verbose_name='Apelido')),
('nome', models.CharField(max_length=120, verbose_name='Nome')),
('email', models.CharField(max_length=80, verbose_name='E-Mail')),
('celular', models.CharField(max_length=11, verbose_name='Celular')),
],
),
migrations.CreateModel(
name='Questao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_limite', models.DateField(verbose_name='Data Limite')),
('descricao', models.TextField(verbose_name='Descricao')),
('data', models.DateField(verbose_name='Data')),
('disciplina', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Disciplina')),
('disciplinaofertada', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.DisciplinaOfertada')),
],
),
migrations.CreateModel(
name='Resposta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_avaliacao', models.DateField(default=datetime.date.today, verbose_name='Data Avaliação')),
('nota', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=2, verbose_name='Nota')),
('data_envio', models.DateField(default=datetime.date.today, verbose_name='Data Envio')),
('aluno', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='core.Aluno')),
('disciplina', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Disciplina')),
('disciplinaofertada', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.DisciplinaOfertada')),
('questao', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Questao')),
],
),
migrations.CreateModel(
name='Turma',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turno', models.CharField(max_length=15, verbose_name='Turno')),
('disciplina', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Disciplina')),
('discplinaofertada', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.DisciplinaOfertada')),
('professor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Professor')),
],
),
migrations.AddField(
model_name='resposta',
name='turma',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Turma'),
),
migrations.AddField(
model_name='questao',
name='turma',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Turma'),
),
migrations.AddField(
model_name='matricula',
name='Turma',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Turma'),
),
migrations.AddField(
model_name='matricula',
name='aluno',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Aluno'),
),
migrations.AddField(
model_name='matricula',
name='disciplinaofertada',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.DisciplinaOfertada'),
),
migrations.AddField(
model_name='arquivoresposta',
name='disciplina',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Disciplina'),
),
migrations.AddField(
model_name='arquivoresposta',
name='disciplinaofertada',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.DisciplinaOfertada'),
),
migrations.AddField(
model_name='arquivoresposta',
name='questao',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Questao'),
),
migrations.AddField(
model_name='arquivoresposta',
name='turma',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Turma'),
),
migrations.AddField(
model_name='arquivoquestao',
name='disciplina',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Disciplina'),
),
migrations.AddField(
model_name='arquivoquestao',
name='disciplinaofertada',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.DisciplinaOfertada'),
),
migrations.AddField(
model_name='arquivoquestao',
name='questao',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Questao'),
),
migrations.AddField(
model_name='arquivoquestao',
name='turma',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Turma'),
),
migrations.AddField(
model_name='aluno',
name='id_curso',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Curso'),
),
]
| 50.255507
| 133
| 0.596687
| 1,104
| 11,408
| 6.026268
| 0.11413
| 0.082669
| 0.063129
| 0.099203
| 0.784158
| 0.754998
| 0.738163
| 0.732301
| 0.732301
| 0.716068
| 0
| 0.006521
| 0.260694
| 11,408
| 226
| 134
| 50.477876
| 0.78231
| 0.005961
| 0
| 0.701835
| 1
| 0
| 0.143512
| 0.014378
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022936
| 0
| 0.041284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
711f09f089531628164cf9b6d9f37a36ab38bf77
| 2,394
|
py
|
Python
|
parcial.py
|
val182527/Algoritmos
|
1e2d4fb9213e5f6b33a83dd8ab31357d2da9281d
|
[
"MIT"
] | null | null | null |
parcial.py
|
val182527/Algoritmos
|
1e2d4fb9213e5f6b33a83dd8ab31357d2da9281d
|
[
"MIT"
] | null | null | null |
parcial.py
|
val182527/Algoritmos
|
1e2d4fb9213e5f6b33a83dd8ab31357d2da9281d
|
[
"MIT"
] | null | null | null |
T = int(input())
lista = []
for i in range (T):
c = str(input())
lista.append(c)
for x in range(0,len(lista)):
if lista[x] == "papel tijera" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "papel piedra" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "papel lagarto" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "papel Holk" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "papel papel" :
print ("Caso #", x+1,": ¡Otra vez!")
elif lista[x] == "tijera papel" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "tijera piedra" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "tijera lagarto" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "tijera Holk" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "tijera tijera" :
print ("Caso #", x+1,": ¡Otra vez!")
elif lista[x] == "piedra papel" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "piedra tijera" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "piedra lagarto" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "piedra Holk" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "piedra piedra" :
print ("Caso #", x+1,": ¡Otra vez!")
elif lista[x] == "lagarto papel" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "lagarto tijera" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "lagarto piedra" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "lagarto Holk" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "lagarto lagarto" :
print ("Caso #", x+1,": ¡Otra vez!")
elif lista[x] == "Holk papel" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "Holk tijera" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "Holk piedra" :
print ("Caso #", x+1,": ¡LaVidaEsDura!")
elif lista[x] == "Holk lagarto" :
print ("Caso #", x+1,": ¡Siempre hay un proximo semestre!")
elif lista[x] == "Holk Holk" :
print ("Caso #", x+1,": ¡Otra vez!")
| 42
| 64
| 0.540936
| 342
| 2,394
| 3.859649
| 0.096491
| 0.113636
| 0.189394
| 0.208333
| 0.92197
| 0.92197
| 0.900758
| 0.886364
| 0.886364
| 0.886364
| 0
| 0.014286
| 0.239766
| 2,394
| 57
| 65
| 42
| 0.697253
| 0
| 0
| 0.446429
| 0
| 0
| 0.438221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.446429
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
8536ac7acb4cd035022aea90706ff82ccbeac0df
| 657,519
|
py
|
Python
|
operators/keda/python/pulumi_pulumi_kubernetes_crds_operators_keda/keda/v1alpha1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
operators/keda/python/pulumi_pulumi_kubernetes_crds_operators_keda/keda/v1alpha1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | 2
|
2020-09-18T17:12:23.000Z
|
2020-12-30T19:40:56.000Z
|
operators/keda/python/pulumi_pulumi_kubernetes_crds_operators_keda/keda/v1alpha1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'KedaControllerSpecArgs',
'KedaControllerStatusArgs',
'ScaledObjectSpecArgs',
'ScaledObjectSpecJobTargetRefArgs',
'ScaledObjectSpecJobTargetRefSelectorArgs',
'ScaledObjectSpecJobTargetRefSelectorMatchExpressionsArgs',
'ScaledObjectSpecJobTargetRefTemplateArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromConfigMapRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromConfigMapKeyRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromFieldRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromResourceFieldRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromSecretKeyRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecycleArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartExecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopExecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeExecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetHttpHeadersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersPortsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeExecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetHttpHeadersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersResourcesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextCapabilitiesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextSeLinuxOptionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeDevicesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeMountsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigOptionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecHostAliasesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecImagePullSecretsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromConfigMapRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromConfigMapKeyRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromFieldRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromResourceFieldRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromSecretKeyRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecycleArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartExecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopExecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeExecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersPortsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeExecArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketPortArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersResourcesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextCapabilitiesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextSeLinuxOptionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeDevicesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeMountsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecReadinessGatesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSeLinuxOptionsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSysctlsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecTolerationsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesAwsElasticBlockStoreArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureDiskArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureFileArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapItemsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiNodePublishSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsFieldRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsResourceFieldRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesEmptyDirArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesFcArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlockerArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesGcePersistentDiskArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesGitRepoArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesGlusterfsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesHostPathArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesNfsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesPersistentVolumeClaimArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesPhotonPersistentDiskArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesPortworxVolumeArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapItemsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsFieldRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsResourceFieldRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretItemsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesServiceAccountTokenArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesQuobyteArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretItemsArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosSecretRefArgs',
'ScaledObjectSpecJobTargetRefTemplateSpecVolumesVsphereVolumeArgs',
'ScaledObjectSpecScaleTargetRefArgs',
'ScaledObjectSpecTriggersArgs',
'ScaledObjectSpecTriggersAuthenticationRefArgs',
'ScaledObjectStatusArgs',
'TriggerAuthenticationSpecArgs',
'TriggerAuthenticationSpecEnvArgs',
'TriggerAuthenticationSpecHashiCorpVaultArgs',
'TriggerAuthenticationSpecHashiCorpVaultCredentialArgs',
'TriggerAuthenticationSpecHashiCorpVaultSecretsArgs',
'TriggerAuthenticationSpecPodIdentityArgs',
'TriggerAuthenticationSpecSecretTargetRefArgs',
]
@pulumi.input_type
class KedaControllerSpecArgs:
def __init__(__self__, *,
log_level: Optional[pulumi.Input[str]] = None,
log_level_metrics: Optional[pulumi.Input[str]] = None,
log_time_format: Optional[pulumi.Input[str]] = None,
watch_namespace: Optional[pulumi.Input[str]] = None):
"""
KedaControllerSpec defines the desired state of KedaController
"""
if log_level is not None:
pulumi.set(__self__, "log_level", log_level)
if log_level_metrics is not None:
pulumi.set(__self__, "log_level_metrics", log_level_metrics)
if log_time_format is not None:
pulumi.set(__self__, "log_time_format", log_time_format)
if watch_namespace is not None:
pulumi.set(__self__, "watch_namespace", watch_namespace)
@property
@pulumi.getter(name="logLevel")
def log_level(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "log_level")
@log_level.setter
def log_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_level", value)
@property
@pulumi.getter(name="logLevelMetrics")
def log_level_metrics(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "log_level_metrics")
@log_level_metrics.setter
def log_level_metrics(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_level_metrics", value)
@property
@pulumi.getter(name="logTimeFormat")
def log_time_format(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "log_time_format")
@log_time_format.setter
def log_time_format(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_time_format", value)
@property
@pulumi.getter(name="watchNamespace")
def watch_namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "watch_namespace")
@watch_namespace.setter
def watch_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "watch_namespace", value)
@pulumi.input_type
class KedaControllerStatusArgs:
def __init__(__self__, *,
configmadatasum: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None,
secretdatasum: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
KedaControllerStatus defines the observed state of KedaController
"""
if configmadatasum is not None:
pulumi.set(__self__, "configmadatasum", configmadatasum)
if phase is not None:
pulumi.set(__self__, "phase", phase)
if reason is not None:
pulumi.set(__self__, "reason", reason)
if secretdatasum is not None:
pulumi.set(__self__, "secretdatasum", secretdatasum)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def configmadatasum(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "configmadatasum")
@configmadatasum.setter
def configmadatasum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configmadatasum", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def secretdatasum(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "secretdatasum")
@secretdatasum.setter
def secretdatasum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secretdatasum", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class ScaledObjectSpecArgs:
def __init__(__self__, *,
triggers: pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecTriggersArgs']]],
cooldown_period: Optional[pulumi.Input[int]] = None,
job_target_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefArgs']] = None,
max_replica_count: Optional[pulumi.Input[int]] = None,
min_replica_count: Optional[pulumi.Input[int]] = None,
polling_interval: Optional[pulumi.Input[int]] = None,
scale_target_ref: Optional[pulumi.Input['ScaledObjectSpecScaleTargetRefArgs']] = None,
scale_type: Optional[pulumi.Input[str]] = None):
"""
ScaledObjectSpec is the spec for a ScaledObject resource
:param pulumi.Input['ScaledObjectSpecJobTargetRefArgs'] job_target_ref: JobSpec describes how the job execution will look like.
:param pulumi.Input['ScaledObjectSpecScaleTargetRefArgs'] scale_target_ref: ObjectReference holds the a reference to the deployment this ScaledObject applies
:param pulumi.Input[str] scale_type: ScaledObjectScaleType distinguish between Deployment based and K8s Jobs
"""
pulumi.set(__self__, "triggers", triggers)
if cooldown_period is not None:
pulumi.set(__self__, "cooldown_period", cooldown_period)
if job_target_ref is not None:
pulumi.set(__self__, "job_target_ref", job_target_ref)
if max_replica_count is not None:
pulumi.set(__self__, "max_replica_count", max_replica_count)
if min_replica_count is not None:
pulumi.set(__self__, "min_replica_count", min_replica_count)
if polling_interval is not None:
pulumi.set(__self__, "polling_interval", polling_interval)
if scale_target_ref is not None:
pulumi.set(__self__, "scale_target_ref", scale_target_ref)
if scale_type is not None:
pulumi.set(__self__, "scale_type", scale_type)
@property
@pulumi.getter
def triggers(self) -> pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecTriggersArgs']]]:
return pulumi.get(self, "triggers")
@triggers.setter
def triggers(self, value: pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecTriggersArgs']]]):
pulumi.set(self, "triggers", value)
@property
@pulumi.getter(name="cooldownPeriod")
def cooldown_period(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cooldown_period")
@cooldown_period.setter
def cooldown_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown_period", value)
@property
@pulumi.getter(name="jobTargetRef")
def job_target_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefArgs']]:
"""
JobSpec describes how the job execution will look like.
"""
return pulumi.get(self, "job_target_ref")
@job_target_ref.setter
def job_target_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefArgs']]):
pulumi.set(self, "job_target_ref", value)
@property
@pulumi.getter(name="maxReplicaCount")
def max_replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_replica_count")
@max_replica_count.setter
def max_replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_replica_count", value)
@property
@pulumi.getter(name="minReplicaCount")
def min_replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_replica_count")
@min_replica_count.setter
def min_replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replica_count", value)
@property
@pulumi.getter(name="pollingInterval")
def polling_interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "polling_interval")
@polling_interval.setter
def polling_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "polling_interval", value)
@property
@pulumi.getter(name="scaleTargetRef")
def scale_target_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecScaleTargetRefArgs']]:
"""
ObjectReference holds the a reference to the deployment this ScaledObject applies
"""
return pulumi.get(self, "scale_target_ref")
@scale_target_ref.setter
def scale_target_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecScaleTargetRefArgs']]):
pulumi.set(self, "scale_target_ref", value)
@property
@pulumi.getter(name="scaleType")
def scale_type(self) -> Optional[pulumi.Input[str]]:
"""
ScaledObjectScaleType distinguish between Deployment based and K8s Jobs
"""
return pulumi.get(self, "scale_type")
@scale_type.setter
def scale_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_type", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefArgs:
def __init__(__self__, *,
template: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateArgs'],
active_deadline_seconds: Optional[pulumi.Input[int]] = None,
backoff_limit: Optional[pulumi.Input[int]] = None,
completions: Optional[pulumi.Input[int]] = None,
manual_selector: Optional[pulumi.Input[bool]] = None,
parallelism: Optional[pulumi.Input[int]] = None,
selector: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefSelectorArgs']] = None,
ttl_seconds_after_finished: Optional[pulumi.Input[int]] = None):
"""
JobSpec describes how the job execution will look like.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateArgs'] template: Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
:param pulumi.Input[int] active_deadline_seconds: Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer
:param pulumi.Input[int] backoff_limit: Specifies the number of retries before marking this job failed. Defaults to 6
:param pulumi.Input[int] completions: Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
:param pulumi.Input[bool] manual_selector: manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
:param pulumi.Input[int] parallelism: Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
:param pulumi.Input['ScaledObjectSpecJobTargetRefSelectorArgs'] selector: A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:param pulumi.Input[int] ttl_seconds_after_finished: ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.
"""
pulumi.set(__self__, "template", template)
if active_deadline_seconds is not None:
pulumi.set(__self__, "active_deadline_seconds", active_deadline_seconds)
if backoff_limit is not None:
pulumi.set(__self__, "backoff_limit", backoff_limit)
if completions is not None:
pulumi.set(__self__, "completions", completions)
if manual_selector is not None:
pulumi.set(__self__, "manual_selector", manual_selector)
if parallelism is not None:
pulumi.set(__self__, "parallelism", parallelism)
if selector is not None:
pulumi.set(__self__, "selector", selector)
if ttl_seconds_after_finished is not None:
pulumi.set(__self__, "ttl_seconds_after_finished", ttl_seconds_after_finished)
@property
@pulumi.getter
def template(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateArgs']:
"""
Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
"""
return pulumi.get(self, "template")
@template.setter
def template(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateArgs']):
pulumi.set(self, "template", value)
@property
@pulumi.getter(name="activeDeadlineSeconds")
def active_deadline_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer
"""
return pulumi.get(self, "active_deadline_seconds")
@active_deadline_seconds.setter
def active_deadline_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "active_deadline_seconds", value)
@property
@pulumi.getter(name="backoffLimit")
def backoff_limit(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of retries before marking this job failed. Defaults to 6
"""
return pulumi.get(self, "backoff_limit")
@backoff_limit.setter
def backoff_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "backoff_limit", value)
@property
@pulumi.getter
def completions(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
"""
return pulumi.get(self, "completions")
@completions.setter
def completions(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "completions", value)
@property
@pulumi.getter(name="manualSelector")
def manual_selector(self) -> Optional[pulumi.Input[bool]]:
"""
manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
"""
return pulumi.get(self, "manual_selector")
@manual_selector.setter
def manual_selector(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "manual_selector", value)
@property
@pulumi.getter
def parallelism(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
"""
return pulumi.get(self, "parallelism")
@parallelism.setter
def parallelism(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "parallelism", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefSelectorArgs']]:
"""
A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefSelectorArgs']]):
pulumi.set(self, "selector", value)
@property
@pulumi.getter(name="ttlSecondsAfterFinished")
def ttl_seconds_after_finished(self) -> Optional[pulumi.Input[int]]:
"""
ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.
"""
return pulumi.get(self, "ttl_seconds_after_finished")
@ttl_seconds_after_finished.setter
def ttl_seconds_after_finished(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl_seconds_after_finished", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateArgs:
def __init__(__self__, *,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spec: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecArgs']] = None):
"""
Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
:param pulumi.Input[Mapping[str, Any]] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecArgs'] spec: Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
"""
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecArgs']]:
"""
Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecArgs']]):
pulumi.set(self, "spec", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecArgs:
def __init__(__self__, *,
containers: pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersArgs']]],
active_deadline_seconds: Optional[pulumi.Input[int]] = None,
affinity: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityArgs']] = None,
automount_service_account_token: Optional[pulumi.Input[bool]] = None,
dns_config: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigArgs']] = None,
dns_policy: Optional[pulumi.Input[str]] = None,
enable_service_links: Optional[pulumi.Input[bool]] = None,
host_aliases: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecHostAliasesArgs']]]] = None,
host_ipc: Optional[pulumi.Input[bool]] = None,
host_network: Optional[pulumi.Input[bool]] = None,
host_pid: Optional[pulumi.Input[bool]] = None,
hostname: Optional[pulumi.Input[str]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecImagePullSecretsArgs']]]] = None,
init_containers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersArgs']]]] = None,
node_name: Optional[pulumi.Input[str]] = None,
node_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
priority: Optional[pulumi.Input[int]] = None,
priority_class_name: Optional[pulumi.Input[str]] = None,
readiness_gates: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecReadinessGatesArgs']]]] = None,
restart_policy: Optional[pulumi.Input[str]] = None,
runtime_class_name: Optional[pulumi.Input[str]] = None,
scheduler_name: Optional[pulumi.Input[str]] = None,
security_context: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextArgs']] = None,
service_account: Optional[pulumi.Input[str]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
share_process_namespace: Optional[pulumi.Input[bool]] = None,
subdomain: Optional[pulumi.Input[str]] = None,
termination_grace_period_seconds: Optional[pulumi.Input[int]] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecTolerationsArgs']]]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesArgs']]]] = None):
"""
Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersArgs']]] containers: List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.
:param pulumi.Input[int] active_deadline_seconds: Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityArgs'] affinity: If specified, the pod's scheduling constraints
:param pulumi.Input[bool] automount_service_account_token: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigArgs'] dns_config: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.
:param pulumi.Input[str] dns_policy: Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
:param pulumi.Input[bool] enable_service_links: EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecHostAliasesArgs']]] host_aliases: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.
:param pulumi.Input[bool] host_ipc: Use the host's ipc namespace. Optional: Default to false.
:param pulumi.Input[bool] host_network: Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.
:param pulumi.Input[bool] host_pid: Use the host's pid namespace. Optional: Default to false.
:param pulumi.Input[str] hostname: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecImagePullSecretsArgs']]] image_pull_secrets: ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersArgs']]] init_containers: List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
:param pulumi.Input[str] node_name: NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_selector: NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
:param pulumi.Input[int] priority: The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.
:param pulumi.Input[str] priority_class_name: If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecReadinessGatesArgs']]] readiness_gates: If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md
:param pulumi.Input[str] restart_policy: Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
:param pulumi.Input[str] runtime_class_name: RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is an alpha feature and may change in the future.
:param pulumi.Input[str] scheduler_name: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextArgs'] security_context: SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
:param pulumi.Input[str] service_account: DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.
:param pulumi.Input[str] service_account_name: ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
:param pulumi.Input[bool] share_process_namespace: Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
:param pulumi.Input[str] subdomain: If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". If not specified, the pod will not have a domainname at all.
:param pulumi.Input[int] termination_grace_period_seconds: Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecTolerationsArgs']]] tolerations: If specified, the pod's tolerations.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesArgs']]] volumes: List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes
"""
pulumi.set(__self__, "containers", containers)
if active_deadline_seconds is not None:
pulumi.set(__self__, "active_deadline_seconds", active_deadline_seconds)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if automount_service_account_token is not None:
pulumi.set(__self__, "automount_service_account_token", automount_service_account_token)
if dns_config is not None:
pulumi.set(__self__, "dns_config", dns_config)
if dns_policy is not None:
pulumi.set(__self__, "dns_policy", dns_policy)
if enable_service_links is not None:
pulumi.set(__self__, "enable_service_links", enable_service_links)
if host_aliases is not None:
pulumi.set(__self__, "host_aliases", host_aliases)
if host_ipc is not None:
pulumi.set(__self__, "host_ipc", host_ipc)
if host_network is not None:
pulumi.set(__self__, "host_network", host_network)
if host_pid is not None:
pulumi.set(__self__, "host_pid", host_pid)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if image_pull_secrets is not None:
pulumi.set(__self__, "image_pull_secrets", image_pull_secrets)
if init_containers is not None:
pulumi.set(__self__, "init_containers", init_containers)
if node_name is not None:
pulumi.set(__self__, "node_name", node_name)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if priority_class_name is not None:
pulumi.set(__self__, "priority_class_name", priority_class_name)
if readiness_gates is not None:
pulumi.set(__self__, "readiness_gates", readiness_gates)
if restart_policy is not None:
pulumi.set(__self__, "restart_policy", restart_policy)
if runtime_class_name is not None:
pulumi.set(__self__, "runtime_class_name", runtime_class_name)
if scheduler_name is not None:
pulumi.set(__self__, "scheduler_name", scheduler_name)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if share_process_namespace is not None:
pulumi.set(__self__, "share_process_namespace", share_process_namespace)
if subdomain is not None:
pulumi.set(__self__, "subdomain", subdomain)
if termination_grace_period_seconds is not None:
pulumi.set(__self__, "termination_grace_period_seconds", termination_grace_period_seconds)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
if volumes is not None:
pulumi.set(__self__, "volumes", volumes)
@property
@pulumi.getter
def containers(self) -> pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersArgs']]]:
"""
List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.
"""
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersArgs']]]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter(name="activeDeadlineSeconds")
def active_deadline_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.
"""
return pulumi.get(self, "active_deadline_seconds")
@active_deadline_seconds.setter
def active_deadline_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "active_deadline_seconds", value)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityArgs']]:
"""
If specified, the pod's scheduling constraints
"""
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityArgs']]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter(name="automountServiceAccountToken")
def automount_service_account_token(self) -> Optional[pulumi.Input[bool]]:
"""
AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
"""
return pulumi.get(self, "automount_service_account_token")
@automount_service_account_token.setter
def automount_service_account_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automount_service_account_token", value)
@property
@pulumi.getter(name="dnsConfig")
def dns_config(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigArgs']]:
"""
Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.
"""
return pulumi.get(self, "dns_config")
@dns_config.setter
def dns_config(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigArgs']]):
pulumi.set(self, "dns_config", value)
@property
@pulumi.getter(name="dnsPolicy")
def dns_policy(self) -> Optional[pulumi.Input[str]]:
"""
Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
"""
return pulumi.get(self, "dns_policy")
@dns_policy.setter
def dns_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_policy", value)
@property
@pulumi.getter(name="enableServiceLinks")
def enable_service_links(self) -> Optional[pulumi.Input[bool]]:
"""
EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.
"""
return pulumi.get(self, "enable_service_links")
@enable_service_links.setter
def enable_service_links(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_service_links", value)
@property
@pulumi.getter(name="hostAliases")
def host_aliases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecHostAliasesArgs']]]]:
"""
HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.
"""
return pulumi.get(self, "host_aliases")
@host_aliases.setter
def host_aliases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecHostAliasesArgs']]]]):
pulumi.set(self, "host_aliases", value)
@property
@pulumi.getter(name="hostIPC")
def host_ipc(self) -> Optional[pulumi.Input[bool]]:
"""
Use the host's ipc namespace. Optional: Default to false.
"""
return pulumi.get(self, "host_ipc")
@host_ipc.setter
def host_ipc(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_ipc", value)
@property
@pulumi.getter(name="hostNetwork")
def host_network(self) -> Optional[pulumi.Input[bool]]:
"""
Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.
"""
return pulumi.get(self, "host_network")
@host_network.setter
def host_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_network", value)
@property
@pulumi.getter(name="hostPID")
def host_pid(self) -> Optional[pulumi.Input[bool]]:
"""
Use the host's pid namespace. Optional: Default to false.
"""
return pulumi.get(self, "host_pid")
@host_pid.setter
def host_pid(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_pid", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecImagePullSecretsArgs']]]]:
"""
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
"""
return pulumi.get(self, "image_pull_secrets")
@image_pull_secrets.setter
def image_pull_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecImagePullSecretsArgs']]]]):
pulumi.set(self, "image_pull_secrets", value)
@property
@pulumi.getter(name="initContainers")
def init_containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersArgs']]]]:
"""
List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
"""
return pulumi.get(self, "init_containers")
@init_containers.setter
def init_containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersArgs']]]]):
pulumi.set(self, "init_containers", value)
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> Optional[pulumi.Input[str]]:
"""
NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.
"""
return pulumi.get(self, "node_name")
@node_name.setter
def node_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_name", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
"""
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.
"""
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="readinessGates")
def readiness_gates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecReadinessGatesArgs']]]]:
"""
If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md
"""
return pulumi.get(self, "readiness_gates")
@readiness_gates.setter
def readiness_gates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecReadinessGatesArgs']]]]):
pulumi.set(self, "readiness_gates", value)
@property
@pulumi.getter(name="restartPolicy")
def restart_policy(self) -> Optional[pulumi.Input[str]]:
"""
Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
"""
return pulumi.get(self, "restart_policy")
@restart_policy.setter
def restart_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restart_policy", value)
@property
@pulumi.getter(name="runtimeClassName")
def runtime_class_name(self) -> Optional[pulumi.Input[str]]:
"""
RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is an alpha feature and may change in the future.
"""
return pulumi.get(self, "runtime_class_name")
@runtime_class_name.setter
def runtime_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runtime_class_name", value)
@property
@pulumi.getter(name="schedulerName")
def scheduler_name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.
"""
return pulumi.get(self, "scheduler_name")
@scheduler_name.setter
def scheduler_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheduler_name", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextArgs']]:
"""
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input[str]]:
"""
DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.
"""
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
"""
ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
"""
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter(name="shareProcessNamespace")
def share_process_namespace(self) -> Optional[pulumi.Input[bool]]:
"""
Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.
"""
return pulumi.get(self, "share_process_namespace")
@share_process_namespace.setter
def share_process_namespace(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "share_process_namespace", value)
@property
@pulumi.getter
def subdomain(self) -> Optional[pulumi.Input[str]]:
"""
If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". If not specified, the pod will not have a domainname at all.
"""
return pulumi.get(self, "subdomain")
@subdomain.setter
def subdomain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subdomain", value)
@property
@pulumi.getter(name="terminationGracePeriodSeconds")
def termination_grace_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.
"""
return pulumi.get(self, "termination_grace_period_seconds")
@termination_grace_period_seconds.setter
def termination_grace_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "termination_grace_period_seconds", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecTolerationsArgs']]]]:
"""
If specified, the pod's tolerations.
"""
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecTolerationsArgs']]]]):
pulumi.set(self, "tolerations", value)
@property
@pulumi.getter
def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesArgs']]]]:
"""
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes
"""
return pulumi.get(self, "volumes")
@volumes.setter
def volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesArgs']]]]):
pulumi.set(self, "volumes", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityArgs:
def __init__(__self__, *,
node_affinity: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityArgs']] = None,
pod_affinity: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityArgs']] = None,
pod_anti_affinity: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityArgs']] = None):
"""
If specified, the pod's scheduling constraints
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityArgs'] node_affinity: Describes node affinity scheduling rules for the pod.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityArgs'] pod_affinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityArgs'] pod_anti_affinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
if node_affinity is not None:
pulumi.set(__self__, "node_affinity", node_affinity)
if pod_affinity is not None:
pulumi.set(__self__, "pod_affinity", pod_affinity)
if pod_anti_affinity is not None:
pulumi.set(__self__, "pod_anti_affinity", pod_anti_affinity)
@property
@pulumi.getter(name="nodeAffinity")
def node_affinity(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityArgs']]:
"""
Describes node affinity scheduling rules for the pod.
"""
return pulumi.get(self, "node_affinity")
@node_affinity.setter
def node_affinity(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityArgs']]):
pulumi.set(self, "node_affinity", value)
@property
@pulumi.getter(name="podAffinity")
def pod_affinity(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityArgs']]:
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_affinity")
@pod_affinity.setter
def pod_affinity(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityArgs']]):
pulumi.set(self, "pod_affinity", value)
@property
@pulumi.getter(name="podAntiAffinity")
def pod_anti_affinity(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityArgs']]:
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_anti_affinity")
@pod_anti_affinity.setter
def pod_anti_affinity(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityArgs']]):
pulumi.set(self, "pod_anti_affinity", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityArgs:
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None,
required_during_scheduling_ignored_during_execution: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']] = None):
"""
Describes node affinity scheduling rules for the pod.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@preferred_during_scheduling_ignored_during_execution.setter
def preferred_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "preferred_during_scheduling_ignored_during_execution", value)
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
@required_during_scheduling_ignored_during_execution.setter
def required_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]):
pulumi.set(self, "required_during_scheduling_ignored_during_execution", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
preference: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs'],
weight: pulumi.Input[int]):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs'] preference: A node selector term, associated with the corresponding weight.
:param pulumi.Input[int] weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs']:
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@preference.setter
def preference(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs']):
pulumi.set(self, "preference", value)
@property
@pulumi.getter
def weight(self) -> pulumi.Input[int]:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: pulumi.Input[int]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs']]]] = None,
match_fields: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs']]]] = None):
"""
A node selector term, associated with the corresponding weight.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs']]] match_expressions: A list of node selector requirements by node's labels.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs']]] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs']]]]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs']]]]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
@match_fields.setter
def match_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs']]]]):
pulumi.set(self, "match_fields", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: The label key that the selector applies to.
:param pulumi.Input[str] operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: The label key that the selector applies to.
:param pulumi.Input[str] operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
node_selector_terms: pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs']]]):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs']]] node_selector_terms: Required. A list of node selector terms. The terms are ORed.
"""
pulumi.set(__self__, "node_selector_terms", node_selector_terms)
@property
@pulumi.getter(name="nodeSelectorTerms")
def node_selector_terms(self) -> pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs']]]:
"""
Required. A list of node selector terms. The terms are ORed.
"""
return pulumi.get(self, "node_selector_terms")
@node_selector_terms.setter
def node_selector_terms(self, value: pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs']]]):
pulumi.set(self, "node_selector_terms", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs']]]] = None,
match_fields: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs']]]] = None):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs']]] match_expressions: A list of node selector requirements by node's labels.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs']]] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs']]]]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs']]]]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
@match_fields.setter
def match_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs']]]]):
pulumi.set(self, "match_fields", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: The label key that the selector applies to.
:param pulumi.Input[str] operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: The label key that the selector applies to.
:param pulumi.Input[str] operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityArgs:
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None,
required_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@preferred_during_scheduling_ignored_during_execution.setter
def preferred_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "preferred_during_scheduling_ignored_during_execution", value)
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
@required_during_scheduling_ignored_during_execution.setter
def required_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "required_during_scheduling_ignored_during_execution", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
pod_affinity_term: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs'],
weight: pulumi.Input[int]):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs'] pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param pulumi.Input[int] weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs']:
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@pod_affinity_term.setter
def pod_affinity_term(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs']):
pulumi.set(self, "pod_affinity_term", value)
@property
@pulumi.getter
def weight(self) -> pulumi.Input[int]:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: pulumi.Input[int]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs:
def __init__(__self__, *,
topology_key: pulumi.Input[str],
label_selector: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']] = None,
namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param pulumi.Input[str] topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs'] label_selector: A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input[str]]] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']]:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
@namespaces.setter
def namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "namespaces", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
topology_key: pulumi.Input[str],
label_selector: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']] = None,
namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param pulumi.Input[str] topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs'] label_selector: A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input[str]]] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']]:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
@namespaces.setter
def namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "namespaces", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityArgs:
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None,
required_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]] required_during_scheduling_ignored_during_execution: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@preferred_during_scheduling_ignored_during_execution.setter
def preferred_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "preferred_during_scheduling_ignored_during_execution", value)
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
@required_during_scheduling_ignored_during_execution.setter
def required_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "required_during_scheduling_ignored_during_execution", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
pod_affinity_term: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs'],
weight: pulumi.Input[int]):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs'] pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param pulumi.Input[int] weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs']:
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@pod_affinity_term.setter
def pod_affinity_term(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs']):
pulumi.set(self, "pod_affinity_term", value)
@property
@pulumi.getter
def weight(self) -> pulumi.Input[int]:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: pulumi.Input[int]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs:
def __init__(__self__, *,
topology_key: pulumi.Input[str],
label_selector: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']] = None,
namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param pulumi.Input[str] topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs'] label_selector: A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input[str]]] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']]:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
@namespaces.setter
def namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "namespaces", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
topology_key: pulumi.Input[str],
label_selector: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']] = None,
namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param pulumi.Input[str] topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs'] label_selector: A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input[str]]] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']]:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
@namespaces.setter
def namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "namespaces", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
env: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvArgs']]]] = None,
env_from: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromArgs']]]] = None,
image: Optional[pulumi.Input[str]] = None,
image_pull_policy: Optional[pulumi.Input[str]] = None,
lifecycle: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecycleArgs']] = None,
liveness_probe: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeArgs']] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersPortsArgs']]]] = None,
readiness_probe: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeArgs']] = None,
resources: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersResourcesArgs']] = None,
security_context: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextArgs']] = None,
stdin: Optional[pulumi.Input[bool]] = None,
stdin_once: Optional[pulumi.Input[bool]] = None,
termination_message_path: Optional[pulumi.Input[str]] = None,
termination_message_policy: Optional[pulumi.Input[str]] = None,
tty: Optional[pulumi.Input[bool]] = None,
volume_devices: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeDevicesArgs']]]] = None,
volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeMountsArgs']]]] = None,
working_dir: Optional[pulumi.Input[str]] = None):
"""
A single application container that you want to run within a pod.
:param pulumi.Input[str] name: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input[str]]] args: Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvArgs']]] env: List of environment variables to set in the container. Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromArgs']]] env_from: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
:param pulumi.Input[str] image: Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
:param pulumi.Input[str] image_pull_policy: Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecycleArgs'] lifecycle: Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeArgs'] liveness_probe: Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersPortsArgs']]] ports: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeArgs'] readiness_probe: Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersResourcesArgs'] resources: Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextArgs'] security_context: Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[bool] stdin: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:param pulumi.Input[bool] stdin_once: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:param pulumi.Input[str] termination_message_path: Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
:param pulumi.Input[str] termination_message_policy: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
:param pulumi.Input[bool] tty: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeDevicesArgs']]] volume_devices: volumeDevices is the list of block devices to be used by the container. This is a beta feature.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeMountsArgs']]] volume_mounts: Pod volumes to mount into the container's filesystem. Cannot be updated.
:param pulumi.Input[str] working_dir: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
pulumi.set(__self__, "name", name)
if args is not None:
pulumi.set(__self__, "args", args)
if command is not None:
pulumi.set(__self__, "command", command)
if env is not None:
pulumi.set(__self__, "env", env)
if env_from is not None:
pulumi.set(__self__, "env_from", env_from)
if image is not None:
pulumi.set(__self__, "image", image)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if lifecycle is not None:
pulumi.set(__self__, "lifecycle", lifecycle)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if stdin is not None:
pulumi.set(__self__, "stdin", stdin)
if stdin_once is not None:
pulumi.set(__self__, "stdin_once", stdin_once)
if termination_message_path is not None:
pulumi.set(__self__, "termination_message_path", termination_message_path)
if termination_message_policy is not None:
pulumi.set(__self__, "termination_message_policy", termination_message_policy)
if tty is not None:
pulumi.set(__self__, "tty", tty)
if volume_devices is not None:
pulumi.set(__self__, "volume_devices", volume_devices)
if volume_mounts is not None:
pulumi.set(__self__, "volume_mounts", volume_mounts)
if working_dir is not None:
pulumi.set(__self__, "working_dir", working_dir)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvArgs']]]]:
"""
List of environment variables to set in the container. Cannot be updated.
"""
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter(name="envFrom")
def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromArgs']]]]:
"""
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
"""
return pulumi.get(self, "env_from")
@env_from.setter
def env_from(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromArgs']]]]):
pulumi.set(self, "env_from", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[pulumi.Input[str]]:
"""
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
"""
return pulumi.get(self, "image_pull_policy")
@image_pull_policy.setter
def image_pull_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_pull_policy", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecycleArgs']]:
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeArgs']]:
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersPortsArgs']]]]:
"""
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeArgs']]:
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersResourcesArgs']]:
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersResourcesArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextArgs']]:
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter
def stdin(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
"""
return pulumi.get(self, "stdin")
@stdin.setter
def stdin(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin", value)
@property
@pulumi.getter(name="stdinOnce")
def stdin_once(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
"""
return pulumi.get(self, "stdin_once")
@stdin_once.setter
def stdin_once(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin_once", value)
@property
@pulumi.getter(name="terminationMessagePath")
def termination_message_path(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
"""
return pulumi.get(self, "termination_message_path")
@termination_message_path.setter
def termination_message_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_path", value)
@property
@pulumi.getter(name="terminationMessagePolicy")
def termination_message_policy(self) -> Optional[pulumi.Input[str]]:
"""
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
"""
return pulumi.get(self, "termination_message_policy")
@termination_message_policy.setter
def termination_message_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_policy", value)
@property
@pulumi.getter
def tty(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
"""
return pulumi.get(self, "tty")
@tty.setter
def tty(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tty", value)
@property
@pulumi.getter(name="volumeDevices")
def volume_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeDevicesArgs']]]]:
"""
volumeDevices is the list of block devices to be used by the container. This is a beta feature.
"""
return pulumi.get(self, "volume_devices")
@volume_devices.setter
def volume_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeDevicesArgs']]]]):
pulumi.set(self, "volume_devices", value)
@property
@pulumi.getter(name="volumeMounts")
def volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeMountsArgs']]]]:
"""
Pod volumes to mount into the container's filesystem. Cannot be updated.
"""
return pulumi.get(self, "volume_mounts")
@volume_mounts.setter
def volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeMountsArgs']]]]):
pulumi.set(self, "volume_mounts", value)
@property
@pulumi.getter(name="workingDir")
def working_dir(self) -> Optional[pulumi.Input[str]]:
"""
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
return pulumi.get(self, "working_dir")
@working_dir.setter
def working_dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "working_dir", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromArgs']] = None):
"""
EnvVar represents an environment variable present in a Container.
:param pulumi.Input[str] name: Name of the environment variable. Must be a C_IDENTIFIER.
:param pulumi.Input[str] value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromArgs'] value_from: Source for the environment variable's value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromArgs']]:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromArgs:
def __init__(__self__, *,
config_map_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromConfigMapRefArgs']] = None,
prefix: Optional[pulumi.Input[str]] = None,
secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromSecretRefArgs']] = None):
"""
EnvFromSource represents the source of a set of ConfigMaps
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromConfigMapRefArgs'] config_map_ref: The ConfigMap to select from
:param pulumi.Input[str] prefix: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromSecretRefArgs'] secret_ref: The Secret to select from
"""
if config_map_ref is not None:
pulumi.set(__self__, "config_map_ref", config_map_ref)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter(name="configMapRef")
def config_map_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromConfigMapRefArgs']]:
"""
The ConfigMap to select from
"""
return pulumi.get(self, "config_map_ref")
@config_map_ref.setter
def config_map_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromConfigMapRefArgs']]):
pulumi.set(self, "config_map_ref", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromSecretRefArgs']]:
"""
The Secret to select from
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromConfigMapRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The ConfigMap to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvFromSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The Secret to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromConfigMapKeyRefArgs']] = None,
field_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromFieldRefArgs']] = None,
resource_field_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromResourceFieldRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromSecretKeyRefArgs']] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromConfigMapKeyRefArgs'] config_map_key_ref: Selects a key of a ConfigMap.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromFieldRefArgs'] field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromConfigMapKeyRefArgs']]:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromFieldRefArgs']]:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromSecretKeyRefArgs']]:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a ConfigMap.
:param pulumi.Input[str] key: The key to select.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or it's key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to select.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or it's key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input[str]] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input[str] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a secret in the pod's namespace
:param pulumi.Input[str] key: The key of the secret to select from. Must be a valid secret key.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or it's key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or it's key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecycleArgs:
def __init__(__self__, *,
post_start: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartArgs']] = None,
pre_stop: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopArgs']] = None):
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartArgs'] post_start: PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopArgs'] pre_stop: PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
if post_start is not None:
pulumi.set(__self__, "post_start", post_start)
if pre_stop is not None:
pulumi.set(__self__, "pre_stop", pre_stop)
@property
@pulumi.getter(name="postStart")
def post_start(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartArgs']]:
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "post_start")
@post_start.setter
def post_start(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartArgs']]):
pulumi.set(self, "post_start", value)
@property
@pulumi.getter(name="preStop")
def pre_stop(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopArgs']]:
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "pre_stop")
@pre_stop.setter
def pre_stop(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopArgs']]):
pulumi.set(self, "pre_stop", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartExecArgs']] = None,
http_get: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketArgs']] = None):
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopExecArgs']] = None,
http_get: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketArgs']] = None):
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersPortsArgs:
def __init__(__self__, *,
container_port: pulumi.Input[int],
host_ip: Optional[pulumi.Input[str]] = None,
host_port: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None):
"""
ContainerPort represents a network port in a single container.
:param pulumi.Input[int] container_port: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
:param pulumi.Input[str] host_ip: What host IP to bind the external port to.
:param pulumi.Input[int] host_port: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
:param pulumi.Input[str] name: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
:param pulumi.Input[str] protocol: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
pulumi.set(__self__, "container_port", container_port)
if host_ip is not None:
pulumi.set(__self__, "host_ip", host_ip)
if host_port is not None:
pulumi.set(__self__, "host_port", host_port)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> pulumi.Input[int]:
"""
Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: pulumi.Input[int]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter(name="hostIP")
def host_ip(self) -> Optional[pulumi.Input[str]]:
"""
What host IP to bind the external port to.
"""
return pulumi.get(self, "host_ip")
@host_ip.setter
def host_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_ip", value)
@property
@pulumi.getter(name="hostPort")
def host_port(self) -> Optional[pulumi.Input[int]]:
"""
Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersReadinessProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextArgs:
def __init__(__self__, *,
allow_privilege_escalation: Optional[pulumi.Input[bool]] = None,
capabilities: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextCapabilitiesArgs']] = None,
privileged: Optional[pulumi.Input[bool]] = None,
proc_mount: Optional[pulumi.Input[str]] = None,
read_only_root_filesystem: Optional[pulumi.Input[bool]] = None,
run_as_group: Optional[pulumi.Input[int]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
se_linux_options: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextSeLinuxOptionsArgs']] = None):
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[bool] allow_privilege_escalation: AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextCapabilitiesArgs'] capabilities: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[bool] privileged: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param pulumi.Input[str] proc_mount: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param pulumi.Input[bool] read_only_root_filesystem: Whether this container has a read-only root filesystem. Default is false.
:param pulumi.Input[int] run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[bool] run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[int] run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextSeLinuxOptionsArgs'] se_linux_options: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if proc_mount is not None:
pulumi.set(__self__, "proc_mount", proc_mount)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:
"""
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
"""
return pulumi.get(self, "allow_privilege_escalation")
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_privilege_escalation", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextCapabilitiesArgs']]:
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextCapabilitiesArgs']]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
"""
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
"""
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter(name="procMount")
def proc_mount(self) -> Optional[pulumi.Input[str]]:
"""
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
"""
return pulumi.get(self, "proc_mount")
@proc_mount.setter
def proc_mount(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proc_mount", value)
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container has a read-only root filesystem. Default is false.
"""
return pulumi.get(self, "read_only_root_filesystem")
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only_root_filesystem", value)
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[pulumi.Input[int]]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_group")
@run_as_group.setter
def run_as_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_group", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextSeLinuxOptionsArgs']]:
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "se_linux_options")
@se_linux_options.setter
def se_linux_options(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextSeLinuxOptionsArgs']]):
pulumi.set(self, "se_linux_options", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextCapabilitiesArgs:
def __init__(__self__, *,
add: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
drop: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[Sequence[pulumi.Input[str]]] add: Added capabilities
:param pulumi.Input[Sequence[pulumi.Input[str]]] drop: Removed capabilities
"""
if add is not None:
pulumi.set(__self__, "add", add)
if drop is not None:
pulumi.set(__self__, "drop", drop)
@property
@pulumi.getter
def add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Added capabilities
"""
return pulumi.get(self, "add")
@add.setter
def add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "add", value)
@property
@pulumi.getter
def drop(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Removed capabilities
"""
return pulumi.get(self, "drop")
@drop.setter
def drop(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "drop", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersSecurityContextSeLinuxOptionsArgs:
def __init__(__self__, *,
level: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] level: Level is SELinux level label that applies to the container.
:param pulumi.Input[str] role: Role is a SELinux role label that applies to the container.
:param pulumi.Input[str] type: Type is a SELinux type label that applies to the container.
:param pulumi.Input[str] user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeDevicesArgs:
def __init__(__self__, *,
device_path: pulumi.Input[str],
name: pulumi.Input[str]):
"""
volumeDevice describes a mapping of a raw block device within a container.
:param pulumi.Input[str] device_path: devicePath is the path inside of the container that the device will be mapped to.
:param pulumi.Input[str] name: name must match the name of a persistentVolumeClaim in the pod
"""
pulumi.set(__self__, "device_path", device_path)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="devicePath")
def device_path(self) -> pulumi.Input[str]:
"""
devicePath is the path inside of the container that the device will be mapped to.
"""
return pulumi.get(self, "device_path")
@device_path.setter
def device_path(self, value: pulumi.Input[str]):
pulumi.set(self, "device_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name must match the name of a persistentVolumeClaim in the pod
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecContainersVolumeMountsArgs:
def __init__(__self__, *,
mount_path: pulumi.Input[str],
name: pulumi.Input[str],
mount_propagation: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
sub_path: Optional[pulumi.Input[str]] = None,
sub_path_expr: Optional[pulumi.Input[str]] = None):
"""
VolumeMount describes a mounting of a Volume within a container.
:param pulumi.Input[str] mount_path: Path within the container at which the volume should be mounted. Must not contain ':'.
:param pulumi.Input[str] name: This must match the Name of a Volume.
:param pulumi.Input[str] mount_propagation: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
:param pulumi.Input[bool] read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param pulumi.Input[str] sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param pulumi.Input[str] sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> pulumi.Input[str]:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@mount_path.setter
def mount_path(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[pulumi.Input[str]]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@mount_propagation.setter
def mount_propagation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_propagation", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[pulumi.Input[str]]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@sub_path.setter
def sub_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path", value)
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[pulumi.Input[str]]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.
"""
return pulumi.get(self, "sub_path_expr")
@sub_path_expr.setter
def sub_path_expr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path_expr", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigArgs:
def __init__(__self__, *,
nameservers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
options: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigOptionsArgs']]]] = None,
searches: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] nameservers: A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigOptionsArgs']]] options: A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] searches: A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.
"""
if nameservers is not None:
pulumi.set(__self__, "nameservers", nameservers)
if options is not None:
pulumi.set(__self__, "options", options)
if searches is not None:
pulumi.set(__self__, "searches", searches)
@property
@pulumi.getter
def nameservers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.
"""
return pulumi.get(self, "nameservers")
@nameservers.setter
def nameservers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "nameservers", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigOptionsArgs']]]]:
"""
A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigOptionsArgs']]]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def searches(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.
"""
return pulumi.get(self, "searches")
@searches.setter
def searches(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "searches", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecDnsConfigOptionsArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
PodDNSConfigOption defines DNS resolver options of a pod.
:param pulumi.Input[str] name: Required.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Required.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecHostAliasesArgs:
def __init__(__self__, *,
hostnames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ip: Optional[pulumi.Input[str]] = None):
"""
HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
:param pulumi.Input[Sequence[pulumi.Input[str]]] hostnames: Hostnames for the above IP address.
:param pulumi.Input[str] ip: IP address of the host file entry.
"""
if hostnames is not None:
pulumi.set(__self__, "hostnames", hostnames)
if ip is not None:
pulumi.set(__self__, "ip", ip)
@property
@pulumi.getter
def hostnames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Hostnames for the above IP address.
"""
return pulumi.get(self, "hostnames")
@hostnames.setter
def hostnames(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "hostnames", value)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
"""
IP address of the host file entry.
"""
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecImagePullSecretsArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
env: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvArgs']]]] = None,
env_from: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromArgs']]]] = None,
image: Optional[pulumi.Input[str]] = None,
image_pull_policy: Optional[pulumi.Input[str]] = None,
lifecycle: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecycleArgs']] = None,
liveness_probe: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeArgs']] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersPortsArgs']]]] = None,
readiness_probe: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeArgs']] = None,
resources: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersResourcesArgs']] = None,
security_context: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextArgs']] = None,
stdin: Optional[pulumi.Input[bool]] = None,
stdin_once: Optional[pulumi.Input[bool]] = None,
termination_message_path: Optional[pulumi.Input[str]] = None,
termination_message_policy: Optional[pulumi.Input[str]] = None,
tty: Optional[pulumi.Input[bool]] = None,
volume_devices: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeDevicesArgs']]]] = None,
volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeMountsArgs']]]] = None,
working_dir: Optional[pulumi.Input[str]] = None):
"""
A single application container that you want to run within a pod.
:param pulumi.Input[str] name: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input[str]]] args: Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvArgs']]] env: List of environment variables to set in the container. Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromArgs']]] env_from: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
:param pulumi.Input[str] image: Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
:param pulumi.Input[str] image_pull_policy: Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecycleArgs'] lifecycle: Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeArgs'] liveness_probe: Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersPortsArgs']]] ports: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeArgs'] readiness_probe: Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersResourcesArgs'] resources: Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextArgs'] security_context: Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[bool] stdin: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:param pulumi.Input[bool] stdin_once: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:param pulumi.Input[str] termination_message_path: Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
:param pulumi.Input[str] termination_message_policy: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
:param pulumi.Input[bool] tty: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeDevicesArgs']]] volume_devices: volumeDevices is the list of block devices to be used by the container. This is a beta feature.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeMountsArgs']]] volume_mounts: Pod volumes to mount into the container's filesystem. Cannot be updated.
:param pulumi.Input[str] working_dir: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
pulumi.set(__self__, "name", name)
if args is not None:
pulumi.set(__self__, "args", args)
if command is not None:
pulumi.set(__self__, "command", command)
if env is not None:
pulumi.set(__self__, "env", env)
if env_from is not None:
pulumi.set(__self__, "env_from", env_from)
if image is not None:
pulumi.set(__self__, "image", image)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if lifecycle is not None:
pulumi.set(__self__, "lifecycle", lifecycle)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if stdin is not None:
pulumi.set(__self__, "stdin", stdin)
if stdin_once is not None:
pulumi.set(__self__, "stdin_once", stdin_once)
if termination_message_path is not None:
pulumi.set(__self__, "termination_message_path", termination_message_path)
if termination_message_policy is not None:
pulumi.set(__self__, "termination_message_policy", termination_message_policy)
if tty is not None:
pulumi.set(__self__, "tty", tty)
if volume_devices is not None:
pulumi.set(__self__, "volume_devices", volume_devices)
if volume_mounts is not None:
pulumi.set(__self__, "volume_mounts", volume_mounts)
if working_dir is not None:
pulumi.set(__self__, "working_dir", working_dir)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvArgs']]]]:
"""
List of environment variables to set in the container. Cannot be updated.
"""
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter(name="envFrom")
def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromArgs']]]]:
"""
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
"""
return pulumi.get(self, "env_from")
@env_from.setter
def env_from(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromArgs']]]]):
pulumi.set(self, "env_from", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[pulumi.Input[str]]:
"""
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
"""
return pulumi.get(self, "image_pull_policy")
@image_pull_policy.setter
def image_pull_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_pull_policy", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecycleArgs']]:
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeArgs']]:
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersPortsArgs']]]]:
"""
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeArgs']]:
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersResourcesArgs']]:
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersResourcesArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextArgs']]:
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter
def stdin(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
"""
return pulumi.get(self, "stdin")
@stdin.setter
def stdin(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin", value)
@property
@pulumi.getter(name="stdinOnce")
def stdin_once(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
"""
return pulumi.get(self, "stdin_once")
@stdin_once.setter
def stdin_once(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin_once", value)
@property
@pulumi.getter(name="terminationMessagePath")
def termination_message_path(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
"""
return pulumi.get(self, "termination_message_path")
@termination_message_path.setter
def termination_message_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_path", value)
@property
@pulumi.getter(name="terminationMessagePolicy")
def termination_message_policy(self) -> Optional[pulumi.Input[str]]:
"""
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
"""
return pulumi.get(self, "termination_message_policy")
@termination_message_policy.setter
def termination_message_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_policy", value)
@property
@pulumi.getter
def tty(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
"""
return pulumi.get(self, "tty")
@tty.setter
def tty(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tty", value)
@property
@pulumi.getter(name="volumeDevices")
def volume_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeDevicesArgs']]]]:
"""
volumeDevices is the list of block devices to be used by the container. This is a beta feature.
"""
return pulumi.get(self, "volume_devices")
@volume_devices.setter
def volume_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeDevicesArgs']]]]):
pulumi.set(self, "volume_devices", value)
@property
@pulumi.getter(name="volumeMounts")
def volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeMountsArgs']]]]:
"""
Pod volumes to mount into the container's filesystem. Cannot be updated.
"""
return pulumi.get(self, "volume_mounts")
@volume_mounts.setter
def volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeMountsArgs']]]]):
pulumi.set(self, "volume_mounts", value)
@property
@pulumi.getter(name="workingDir")
def working_dir(self) -> Optional[pulumi.Input[str]]:
"""
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
return pulumi.get(self, "working_dir")
@working_dir.setter
def working_dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "working_dir", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromArgs']] = None):
"""
EnvVar represents an environment variable present in a Container.
:param pulumi.Input[str] name: Name of the environment variable. Must be a C_IDENTIFIER.
:param pulumi.Input[str] value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromArgs'] value_from: Source for the environment variable's value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromArgs']]:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromArgs:
def __init__(__self__, *,
config_map_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromConfigMapRefArgs']] = None,
prefix: Optional[pulumi.Input[str]] = None,
secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromSecretRefArgs']] = None):
"""
EnvFromSource represents the source of a set of ConfigMaps
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromConfigMapRefArgs'] config_map_ref: The ConfigMap to select from
:param pulumi.Input[str] prefix: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromSecretRefArgs'] secret_ref: The Secret to select from
"""
if config_map_ref is not None:
pulumi.set(__self__, "config_map_ref", config_map_ref)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter(name="configMapRef")
def config_map_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromConfigMapRefArgs']]:
"""
The ConfigMap to select from
"""
return pulumi.get(self, "config_map_ref")
@config_map_ref.setter
def config_map_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromConfigMapRefArgs']]):
pulumi.set(self, "config_map_ref", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromSecretRefArgs']]:
"""
The Secret to select from
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromConfigMapRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The ConfigMap to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvFromSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The Secret to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromConfigMapKeyRefArgs']] = None,
field_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromFieldRefArgs']] = None,
resource_field_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromResourceFieldRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromSecretKeyRefArgs']] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromConfigMapKeyRefArgs'] config_map_key_ref: Selects a key of a ConfigMap.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromFieldRefArgs'] field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromConfigMapKeyRefArgs']]:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromFieldRefArgs']]:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromSecretKeyRefArgs']]:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a ConfigMap.
:param pulumi.Input[str] key: The key to select.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or it's key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to select.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or it's key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input[str]] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input[str] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a secret in the pod's namespace
:param pulumi.Input[str] key: The key of the secret to select from. Must be a valid secret key.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or it's key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or it's key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecycleArgs:
def __init__(__self__, *,
post_start: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartArgs']] = None,
pre_stop: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopArgs']] = None):
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartArgs'] post_start: PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopArgs'] pre_stop: PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
if post_start is not None:
pulumi.set(__self__, "post_start", post_start)
if pre_stop is not None:
pulumi.set(__self__, "pre_stop", pre_stop)
@property
@pulumi.getter(name="postStart")
def post_start(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartArgs']]:
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "post_start")
@post_start.setter
def post_start(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartArgs']]):
pulumi.set(self, "post_start", value)
@property
@pulumi.getter(name="preStop")
def pre_stop(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopArgs']]:
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "pre_stop")
@pre_stop.setter
def pre_stop(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopArgs']]):
pulumi.set(self, "pre_stop", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartExecArgs']] = None,
http_get: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketArgs']] = None):
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopExecArgs']] = None,
http_get: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketArgs']] = None):
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersPortsArgs:
def __init__(__self__, *,
container_port: pulumi.Input[int],
host_ip: Optional[pulumi.Input[str]] = None,
host_port: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None):
"""
ContainerPort represents a network port in a single container.
:param pulumi.Input[int] container_port: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
:param pulumi.Input[str] host_ip: What host IP to bind the external port to.
:param pulumi.Input[int] host_port: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
:param pulumi.Input[str] name: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
:param pulumi.Input[str] protocol: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
pulumi.set(__self__, "container_port", container_port)
if host_ip is not None:
pulumi.set(__self__, "host_ip", host_ip)
if host_port is not None:
pulumi.set(__self__, "host_port", host_port)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> pulumi.Input[int]:
"""
Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: pulumi.Input[int]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter(name="hostIP")
def host_ip(self) -> Optional[pulumi.Input[str]]:
"""
What host IP to bind the external port to.
"""
return pulumi.get(self, "host_ip")
@host_ip.setter
def host_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_ip", value)
@property
@pulumi.getter(name="hostPort")
def host_port(self) -> Optional[pulumi.Input[int]]:
"""
Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersReadinessProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextArgs:
def __init__(__self__, *,
allow_privilege_escalation: Optional[pulumi.Input[bool]] = None,
capabilities: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextCapabilitiesArgs']] = None,
privileged: Optional[pulumi.Input[bool]] = None,
proc_mount: Optional[pulumi.Input[str]] = None,
read_only_root_filesystem: Optional[pulumi.Input[bool]] = None,
run_as_group: Optional[pulumi.Input[int]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
se_linux_options: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextSeLinuxOptionsArgs']] = None):
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[bool] allow_privilege_escalation: AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextCapabilitiesArgs'] capabilities: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[bool] privileged: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param pulumi.Input[str] proc_mount: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param pulumi.Input[bool] read_only_root_filesystem: Whether this container has a read-only root filesystem. Default is false.
:param pulumi.Input[int] run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[bool] run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[int] run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextSeLinuxOptionsArgs'] se_linux_options: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if proc_mount is not None:
pulumi.set(__self__, "proc_mount", proc_mount)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:
"""
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
"""
return pulumi.get(self, "allow_privilege_escalation")
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_privilege_escalation", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextCapabilitiesArgs']]:
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextCapabilitiesArgs']]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
"""
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
"""
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter(name="procMount")
def proc_mount(self) -> Optional[pulumi.Input[str]]:
"""
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
"""
return pulumi.get(self, "proc_mount")
@proc_mount.setter
def proc_mount(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proc_mount", value)
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container has a read-only root filesystem. Default is false.
"""
return pulumi.get(self, "read_only_root_filesystem")
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only_root_filesystem", value)
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[pulumi.Input[int]]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_group")
@run_as_group.setter
def run_as_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_group", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextSeLinuxOptionsArgs']]:
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "se_linux_options")
@se_linux_options.setter
def se_linux_options(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextSeLinuxOptionsArgs']]):
pulumi.set(self, "se_linux_options", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextCapabilitiesArgs:
def __init__(__self__, *,
add: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
drop: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[Sequence[pulumi.Input[str]]] add: Added capabilities
:param pulumi.Input[Sequence[pulumi.Input[str]]] drop: Removed capabilities
"""
if add is not None:
pulumi.set(__self__, "add", add)
if drop is not None:
pulumi.set(__self__, "drop", drop)
@property
@pulumi.getter
def add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Added capabilities
"""
return pulumi.get(self, "add")
@add.setter
def add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "add", value)
@property
@pulumi.getter
def drop(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Removed capabilities
"""
return pulumi.get(self, "drop")
@drop.setter
def drop(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "drop", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersSecurityContextSeLinuxOptionsArgs:
def __init__(__self__, *,
level: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] level: Level is SELinux level label that applies to the container.
:param pulumi.Input[str] role: Role is a SELinux role label that applies to the container.
:param pulumi.Input[str] type: Type is a SELinux type label that applies to the container.
:param pulumi.Input[str] user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeDevicesArgs:
def __init__(__self__, *,
device_path: pulumi.Input[str],
name: pulumi.Input[str]):
"""
volumeDevice describes a mapping of a raw block device within a container.
:param pulumi.Input[str] device_path: devicePath is the path inside of the container that the device will be mapped to.
:param pulumi.Input[str] name: name must match the name of a persistentVolumeClaim in the pod
"""
pulumi.set(__self__, "device_path", device_path)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="devicePath")
def device_path(self) -> pulumi.Input[str]:
"""
devicePath is the path inside of the container that the device will be mapped to.
"""
return pulumi.get(self, "device_path")
@device_path.setter
def device_path(self, value: pulumi.Input[str]):
pulumi.set(self, "device_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name must match the name of a persistentVolumeClaim in the pod
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecInitContainersVolumeMountsArgs:
def __init__(__self__, *,
mount_path: pulumi.Input[str],
name: pulumi.Input[str],
mount_propagation: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
sub_path: Optional[pulumi.Input[str]] = None,
sub_path_expr: Optional[pulumi.Input[str]] = None):
"""
VolumeMount describes a mounting of a Volume within a container.
:param pulumi.Input[str] mount_path: Path within the container at which the volume should be mounted. Must not contain ':'.
:param pulumi.Input[str] name: This must match the Name of a Volume.
:param pulumi.Input[str] mount_propagation: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
:param pulumi.Input[bool] read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param pulumi.Input[str] sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param pulumi.Input[str] sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> pulumi.Input[str]:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@mount_path.setter
def mount_path(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[pulumi.Input[str]]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@mount_propagation.setter
def mount_propagation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_propagation", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[pulumi.Input[str]]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@sub_path.setter
def sub_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path", value)
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[pulumi.Input[str]]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.
"""
return pulumi.get(self, "sub_path_expr")
@sub_path_expr.setter
def sub_path_expr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path_expr", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecReadinessGatesArgs:
def __init__(__self__, *,
condition_type: pulumi.Input[str]):
"""
PodReadinessGate contains the reference to a pod condition
:param pulumi.Input[str] condition_type: ConditionType refers to a condition in the pod's condition list with matching type.
"""
pulumi.set(__self__, "condition_type", condition_type)
@property
@pulumi.getter(name="conditionType")
def condition_type(self) -> pulumi.Input[str]:
"""
ConditionType refers to a condition in the pod's condition list with matching type.
"""
return pulumi.get(self, "condition_type")
@condition_type.setter
def condition_type(self, value: pulumi.Input[str]):
pulumi.set(self, "condition_type", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextArgs:
def __init__(__self__, *,
fs_group: Optional[pulumi.Input[int]] = None,
run_as_group: Optional[pulumi.Input[int]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
se_linux_options: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSeLinuxOptionsArgs']] = None,
supplemental_groups: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
sysctls: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSysctlsArgs']]]] = None):
"""
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
:param pulumi.Input[int] fs_group: A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
If unset, the Kubelet will not modify the ownership and permissions of any volume.
:param pulumi.Input[int] run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param pulumi.Input[bool] run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[int] run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSeLinuxOptionsArgs'] se_linux_options: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param pulumi.Input[Sequence[pulumi.Input[int]]] supplemental_groups: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSysctlsArgs']]] sysctls: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.
"""
if fs_group is not None:
pulumi.set(__self__, "fs_group", fs_group)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
if supplemental_groups is not None:
pulumi.set(__self__, "supplemental_groups", supplemental_groups)
if sysctls is not None:
pulumi.set(__self__, "sysctls", sysctls)
@property
@pulumi.getter(name="fsGroup")
def fs_group(self) -> Optional[pulumi.Input[int]]:
"""
A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
If unset, the Kubelet will not modify the ownership and permissions of any volume.
"""
return pulumi.get(self, "fs_group")
@fs_group.setter
def fs_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fs_group", value)
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[pulumi.Input[int]]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
"""
return pulumi.get(self, "run_as_group")
@run_as_group.setter
def run_as_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_group", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
"""
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSeLinuxOptionsArgs']]:
"""
The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
"""
return pulumi.get(self, "se_linux_options")
@se_linux_options.setter
def se_linux_options(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSeLinuxOptionsArgs']]):
pulumi.set(self, "se_linux_options", value)
@property
@pulumi.getter(name="supplementalGroups")
def supplemental_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.
"""
return pulumi.get(self, "supplemental_groups")
@supplemental_groups.setter
def supplemental_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "supplemental_groups", value)
@property
@pulumi.getter
def sysctls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSysctlsArgs']]]]:
"""
Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.
"""
return pulumi.get(self, "sysctls")
@sysctls.setter
def sysctls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSysctlsArgs']]]]):
pulumi.set(self, "sysctls", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSeLinuxOptionsArgs:
def __init__(__self__, *,
level: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param pulumi.Input[str] level: Level is SELinux level label that applies to the container.
:param pulumi.Input[str] role: Role is a SELinux role label that applies to the container.
:param pulumi.Input[str] type: Type is a SELinux type label that applies to the container.
:param pulumi.Input[str] user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecSecurityContextSysctlsArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
Sysctl defines a kernel parameter to be set
:param pulumi.Input[str] name: Name of a property to set
:param pulumi.Input[str] value: Value of a property to set
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of a property to set
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Value of a property to set
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecTolerationsArgs:
def __init__(__self__, *,
effect: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
toleration_seconds: Optional[pulumi.Input[int]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
:param pulumi.Input[str] effect: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param pulumi.Input[str] key: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param pulumi.Input[str] operator: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param pulumi.Input[int] toleration_seconds: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param pulumi.Input[str] value: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if toleration_seconds is not None:
pulumi.set(__self__, "toleration_seconds", toleration_seconds)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[pulumi.Input[str]]:
"""
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
"""
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter(name="tolerationSeconds")
def toleration_seconds(self) -> Optional[pulumi.Input[int]]:
"""
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
"""
return pulumi.get(self, "toleration_seconds")
@toleration_seconds.setter
def toleration_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "toleration_seconds", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
aws_elastic_block_store: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAwsElasticBlockStoreArgs']] = None,
azure_disk: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureDiskArgs']] = None,
azure_file: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureFileArgs']] = None,
cephfs: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsArgs']] = None,
cinder: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderArgs']] = None,
config_map: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapArgs']] = None,
csi: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiArgs']] = None,
downward_api: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIArgs']] = None,
empty_dir: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesEmptyDirArgs']] = None,
fc: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFcArgs']] = None,
flex_volume: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeArgs']] = None,
flocker: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlockerArgs']] = None,
gce_persistent_disk: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGcePersistentDiskArgs']] = None,
git_repo: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGitRepoArgs']] = None,
glusterfs: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGlusterfsArgs']] = None,
host_path: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesHostPathArgs']] = None,
iscsi: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiArgs']] = None,
nfs: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesNfsArgs']] = None,
persistent_volume_claim: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPersistentVolumeClaimArgs']] = None,
photon_persistent_disk: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPhotonPersistentDiskArgs']] = None,
portworx_volume: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPortworxVolumeArgs']] = None,
projected: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedArgs']] = None,
quobyte: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesQuobyteArgs']] = None,
rbd: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdArgs']] = None,
scale_io: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOArgs']] = None,
secret: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretArgs']] = None,
storageos: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosArgs']] = None,
vsphere_volume: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesVsphereVolumeArgs']] = None):
"""
Volume represents a named volume in a pod that may be accessed by any container in the pod.
:param pulumi.Input[str] name: Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAwsElasticBlockStoreArgs'] aws_elastic_block_store: AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureDiskArgs'] azure_disk: AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureFileArgs'] azure_file: AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsArgs'] cephfs: CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderArgs'] cinder: Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapArgs'] config_map: ConfigMap represents a configMap that should populate this volume
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiArgs'] csi: CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIArgs'] downward_api: DownwardAPI represents downward API about the pod that should populate this volume
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesEmptyDirArgs'] empty_dir: EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFcArgs'] fc: FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeArgs'] flex_volume: FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlockerArgs'] flocker: Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGcePersistentDiskArgs'] gce_persistent_disk: GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGitRepoArgs'] git_repo: GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGlusterfsArgs'] glusterfs: Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesHostPathArgs'] host_path: HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiArgs'] iscsi: ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesNfsArgs'] nfs: NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPersistentVolumeClaimArgs'] persistent_volume_claim: PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPhotonPersistentDiskArgs'] photon_persistent_disk: PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPortworxVolumeArgs'] portworx_volume: PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedArgs'] projected: Items for all in one resources secrets, configmaps, and downward API
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesQuobyteArgs'] quobyte: Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdArgs'] rbd: RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOArgs'] scale_io: ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretArgs'] secret: Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosArgs'] storageos: StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesVsphereVolumeArgs'] vsphere_volume: VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
"""
pulumi.set(__self__, "name", name)
if aws_elastic_block_store is not None:
pulumi.set(__self__, "aws_elastic_block_store", aws_elastic_block_store)
if azure_disk is not None:
pulumi.set(__self__, "azure_disk", azure_disk)
if azure_file is not None:
pulumi.set(__self__, "azure_file", azure_file)
if cephfs is not None:
pulumi.set(__self__, "cephfs", cephfs)
if cinder is not None:
pulumi.set(__self__, "cinder", cinder)
if config_map is not None:
pulumi.set(__self__, "config_map", config_map)
if csi is not None:
pulumi.set(__self__, "csi", csi)
if downward_api is not None:
pulumi.set(__self__, "downward_api", downward_api)
if empty_dir is not None:
pulumi.set(__self__, "empty_dir", empty_dir)
if fc is not None:
pulumi.set(__self__, "fc", fc)
if flex_volume is not None:
pulumi.set(__self__, "flex_volume", flex_volume)
if flocker is not None:
pulumi.set(__self__, "flocker", flocker)
if gce_persistent_disk is not None:
pulumi.set(__self__, "gce_persistent_disk", gce_persistent_disk)
if git_repo is not None:
pulumi.set(__self__, "git_repo", git_repo)
if glusterfs is not None:
pulumi.set(__self__, "glusterfs", glusterfs)
if host_path is not None:
pulumi.set(__self__, "host_path", host_path)
if iscsi is not None:
pulumi.set(__self__, "iscsi", iscsi)
if nfs is not None:
pulumi.set(__self__, "nfs", nfs)
if persistent_volume_claim is not None:
pulumi.set(__self__, "persistent_volume_claim", persistent_volume_claim)
if photon_persistent_disk is not None:
pulumi.set(__self__, "photon_persistent_disk", photon_persistent_disk)
if portworx_volume is not None:
pulumi.set(__self__, "portworx_volume", portworx_volume)
if projected is not None:
pulumi.set(__self__, "projected", projected)
if quobyte is not None:
pulumi.set(__self__, "quobyte", quobyte)
if rbd is not None:
pulumi.set(__self__, "rbd", rbd)
if scale_io is not None:
pulumi.set(__self__, "scale_io", scale_io)
if secret is not None:
pulumi.set(__self__, "secret", secret)
if storageos is not None:
pulumi.set(__self__, "storageos", storageos)
if vsphere_volume is not None:
pulumi.set(__self__, "vsphere_volume", vsphere_volume)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="awsElasticBlockStore")
def aws_elastic_block_store(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAwsElasticBlockStoreArgs']]:
"""
AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
"""
return pulumi.get(self, "aws_elastic_block_store")
@aws_elastic_block_store.setter
def aws_elastic_block_store(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAwsElasticBlockStoreArgs']]):
pulumi.set(self, "aws_elastic_block_store", value)
@property
@pulumi.getter(name="azureDisk")
def azure_disk(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureDiskArgs']]:
"""
AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
"""
return pulumi.get(self, "azure_disk")
@azure_disk.setter
def azure_disk(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureDiskArgs']]):
pulumi.set(self, "azure_disk", value)
@property
@pulumi.getter(name="azureFile")
def azure_file(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureFileArgs']]:
"""
AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
"""
return pulumi.get(self, "azure_file")
@azure_file.setter
def azure_file(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureFileArgs']]):
pulumi.set(self, "azure_file", value)
@property
@pulumi.getter
def cephfs(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsArgs']]:
"""
CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
"""
return pulumi.get(self, "cephfs")
@cephfs.setter
def cephfs(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsArgs']]):
pulumi.set(self, "cephfs", value)
@property
@pulumi.getter
def cinder(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderArgs']]:
"""
Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
"""
return pulumi.get(self, "cinder")
@cinder.setter
def cinder(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderArgs']]):
pulumi.set(self, "cinder", value)
@property
@pulumi.getter(name="configMap")
def config_map(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapArgs']]:
"""
ConfigMap represents a configMap that should populate this volume
"""
return pulumi.get(self, "config_map")
@config_map.setter
def config_map(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapArgs']]):
pulumi.set(self, "config_map", value)
@property
@pulumi.getter
def csi(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiArgs']]:
"""
CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).
"""
return pulumi.get(self, "csi")
@csi.setter
def csi(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiArgs']]):
pulumi.set(self, "csi", value)
@property
@pulumi.getter(name="downwardAPI")
def downward_api(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIArgs']]:
"""
DownwardAPI represents downward API about the pod that should populate this volume
"""
return pulumi.get(self, "downward_api")
@downward_api.setter
def downward_api(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIArgs']]):
pulumi.set(self, "downward_api", value)
@property
@pulumi.getter(name="emptyDir")
def empty_dir(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesEmptyDirArgs']]:
"""
EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
"""
return pulumi.get(self, "empty_dir")
@empty_dir.setter
def empty_dir(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesEmptyDirArgs']]):
pulumi.set(self, "empty_dir", value)
@property
@pulumi.getter
def fc(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFcArgs']]:
"""
FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
"""
return pulumi.get(self, "fc")
@fc.setter
def fc(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFcArgs']]):
pulumi.set(self, "fc", value)
@property
@pulumi.getter(name="flexVolume")
def flex_volume(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeArgs']]:
"""
FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.
"""
return pulumi.get(self, "flex_volume")
@flex_volume.setter
def flex_volume(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeArgs']]):
pulumi.set(self, "flex_volume", value)
@property
@pulumi.getter
def flocker(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlockerArgs']]:
"""
Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
"""
return pulumi.get(self, "flocker")
@flocker.setter
def flocker(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlockerArgs']]):
pulumi.set(self, "flocker", value)
@property
@pulumi.getter(name="gcePersistentDisk")
def gce_persistent_disk(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGcePersistentDiskArgs']]:
"""
GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
"""
return pulumi.get(self, "gce_persistent_disk")
@gce_persistent_disk.setter
def gce_persistent_disk(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGcePersistentDiskArgs']]):
pulumi.set(self, "gce_persistent_disk", value)
@property
@pulumi.getter(name="gitRepo")
def git_repo(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGitRepoArgs']]:
"""
GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.
"""
return pulumi.get(self, "git_repo")
@git_repo.setter
def git_repo(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGitRepoArgs']]):
pulumi.set(self, "git_repo", value)
@property
@pulumi.getter
def glusterfs(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGlusterfsArgs']]:
"""
Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
"""
return pulumi.get(self, "glusterfs")
@glusterfs.setter
def glusterfs(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesGlusterfsArgs']]):
pulumi.set(self, "glusterfs", value)
@property
@pulumi.getter(name="hostPath")
def host_path(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesHostPathArgs']]:
"""
HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.
"""
return pulumi.get(self, "host_path")
@host_path.setter
def host_path(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesHostPathArgs']]):
pulumi.set(self, "host_path", value)
@property
@pulumi.getter
def iscsi(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiArgs']]:
"""
ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
"""
return pulumi.get(self, "iscsi")
@iscsi.setter
def iscsi(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiArgs']]):
pulumi.set(self, "iscsi", value)
@property
@pulumi.getter
def nfs(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesNfsArgs']]:
"""
NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
"""
return pulumi.get(self, "nfs")
@nfs.setter
def nfs(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesNfsArgs']]):
pulumi.set(self, "nfs", value)
@property
@pulumi.getter(name="persistentVolumeClaim")
def persistent_volume_claim(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPersistentVolumeClaimArgs']]:
"""
PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
"""
return pulumi.get(self, "persistent_volume_claim")
@persistent_volume_claim.setter
def persistent_volume_claim(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPersistentVolumeClaimArgs']]):
pulumi.set(self, "persistent_volume_claim", value)
@property
@pulumi.getter(name="photonPersistentDisk")
def photon_persistent_disk(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPhotonPersistentDiskArgs']]:
"""
PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
"""
return pulumi.get(self, "photon_persistent_disk")
@photon_persistent_disk.setter
def photon_persistent_disk(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPhotonPersistentDiskArgs']]):
pulumi.set(self, "photon_persistent_disk", value)
@property
@pulumi.getter(name="portworxVolume")
def portworx_volume(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPortworxVolumeArgs']]:
"""
PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
"""
return pulumi.get(self, "portworx_volume")
@portworx_volume.setter
def portworx_volume(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesPortworxVolumeArgs']]):
pulumi.set(self, "portworx_volume", value)
@property
@pulumi.getter
def projected(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedArgs']]:
"""
Items for all in one resources secrets, configmaps, and downward API
"""
return pulumi.get(self, "projected")
@projected.setter
def projected(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedArgs']]):
pulumi.set(self, "projected", value)
@property
@pulumi.getter
def quobyte(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesQuobyteArgs']]:
"""
Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
"""
return pulumi.get(self, "quobyte")
@quobyte.setter
def quobyte(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesQuobyteArgs']]):
pulumi.set(self, "quobyte", value)
@property
@pulumi.getter
def rbd(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdArgs']]:
"""
RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
"""
return pulumi.get(self, "rbd")
@rbd.setter
def rbd(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdArgs']]):
pulumi.set(self, "rbd", value)
@property
@pulumi.getter(name="scaleIO")
def scale_io(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOArgs']]:
"""
ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
"""
return pulumi.get(self, "scale_io")
@scale_io.setter
def scale_io(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOArgs']]):
pulumi.set(self, "scale_io", value)
@property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretArgs']]:
"""
Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
"""
return pulumi.get(self, "secret")
@secret.setter
def secret(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretArgs']]):
pulumi.set(self, "secret", value)
@property
@pulumi.getter
def storageos(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosArgs']]:
"""
StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
"""
return pulumi.get(self, "storageos")
@storageos.setter
def storageos(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosArgs']]):
pulumi.set(self, "storageos", value)
@property
@pulumi.getter(name="vsphereVolume")
def vsphere_volume(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesVsphereVolumeArgs']]:
"""
VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
"""
return pulumi.get(self, "vsphere_volume")
@vsphere_volume.setter
def vsphere_volume(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesVsphereVolumeArgs']]):
pulumi.set(self, "vsphere_volume", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesAwsElasticBlockStoreArgs:
def __init__(__self__, *,
volume_id: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None,
partition: Optional[pulumi.Input[int]] = None,
read_only: Optional[pulumi.Input[bool]] = None):
"""
AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param pulumi.Input[str] volume_id: Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param pulumi.Input[str] fs_type: Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine
:param pulumi.Input[int] partition: The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
:param pulumi.Input[bool] read_only: Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
"""
pulumi.set(__self__, "volume_id", volume_id)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if partition is not None:
pulumi.set(__self__, "partition", partition)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter(name="volumeID")
def volume_id(self) -> pulumi.Input[str]:
"""
Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_id", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter
def partition(self) -> Optional[pulumi.Input[int]]:
"""
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
"""
return pulumi.get(self, "partition")
@partition.setter
def partition(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureDiskArgs:
def __init__(__self__, *,
disk_name: pulumi.Input[str],
disk_uri: pulumi.Input[str],
caching_mode: Optional[pulumi.Input[str]] = None,
fs_type: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None):
"""
AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
:param pulumi.Input[str] disk_name: The Name of the data disk in the blob storage
:param pulumi.Input[str] disk_uri: The URI the data disk in the blob storage
:param pulumi.Input[str] caching_mode: Host Caching mode: None, Read Only, Read Write.
:param pulumi.Input[str] fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
:param pulumi.Input[str] kind: Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
:param pulumi.Input[bool] read_only: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
pulumi.set(__self__, "disk_name", disk_name)
pulumi.set(__self__, "disk_uri", disk_uri)
if caching_mode is not None:
pulumi.set(__self__, "caching_mode", caching_mode)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> pulumi.Input[str]:
"""
The Name of the data disk in the blob storage
"""
return pulumi.get(self, "disk_name")
@disk_name.setter
def disk_name(self, value: pulumi.Input[str]):
pulumi.set(self, "disk_name", value)
@property
@pulumi.getter(name="diskURI")
def disk_uri(self) -> pulumi.Input[str]:
"""
The URI the data disk in the blob storage
"""
return pulumi.get(self, "disk_uri")
@disk_uri.setter
def disk_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "disk_uri", value)
@property
@pulumi.getter(name="cachingMode")
def caching_mode(self) -> Optional[pulumi.Input[str]]:
"""
Host Caching mode: None, Read Only, Read Write.
"""
return pulumi.get(self, "caching_mode")
@caching_mode.setter
def caching_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "caching_mode", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesAzureFileArgs:
def __init__(__self__, *,
secret_name: pulumi.Input[str],
share_name: pulumi.Input[str],
read_only: Optional[pulumi.Input[bool]] = None):
"""
AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
:param pulumi.Input[str] secret_name: the name of secret that contains Azure Storage Account Name and Key
:param pulumi.Input[str] share_name: Share Name
:param pulumi.Input[bool] read_only: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
pulumi.set(__self__, "secret_name", secret_name)
pulumi.set(__self__, "share_name", share_name)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> pulumi.Input[str]:
"""
the name of secret that contains Azure Storage Account Name and Key
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_name", value)
@property
@pulumi.getter(name="shareName")
def share_name(self) -> pulumi.Input[str]:
"""
Share Name
"""
return pulumi.get(self, "share_name")
@share_name.setter
def share_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_name", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsArgs:
def __init__(__self__, *,
monitors: pulumi.Input[Sequence[pulumi.Input[str]]],
path: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
secret_file: Optional[pulumi.Input[str]] = None,
secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsSecretRefArgs']] = None,
user: Optional[pulumi.Input[str]] = None):
"""
CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
:param pulumi.Input[Sequence[pulumi.Input[str]]] monitors: Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
:param pulumi.Input[str] path: Optional: Used as the mounted root, rather than the full Ceph tree, default is /
:param pulumi.Input[bool] read_only: Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
:param pulumi.Input[str] secret_file: Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsSecretRefArgs'] secret_ref: Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
:param pulumi.Input[str] user: Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
"""
pulumi.set(__self__, "monitors", monitors)
if path is not None:
pulumi.set(__self__, "path", path)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if secret_file is not None:
pulumi.set(__self__, "secret_file", secret_file)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def monitors(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
"""
return pulumi.get(self, "monitors")
@monitors.setter
def monitors(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "monitors", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Used as the mounted root, rather than the full Ceph tree, default is /
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="secretFile")
def secret_file(self) -> Optional[pulumi.Input[str]]:
"""
Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
"""
return pulumi.get(self, "secret_file")
@secret_file.setter
def secret_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_file", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsSecretRefArgs']]:
"""
Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesCephfsSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderArgs:
def __init__(__self__, *,
volume_id: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderSecretRefArgs']] = None):
"""
Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param pulumi.Input[str] volume_id: volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param pulumi.Input[str] fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param pulumi.Input[bool] read_only: Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderSecretRefArgs'] secret_ref: Optional: points to a secret object containing parameters used to connect to OpenStack.
"""
pulumi.set(__self__, "volume_id", volume_id)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter(name="volumeID")
def volume_id(self) -> pulumi.Input[str]:
"""
volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_id", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderSecretRefArgs']]:
"""
Optional: points to a secret object containing parameters used to connect to OpenStack.
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesCinderSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
Optional: points to a secret object containing parameters used to connect to OpenStack.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapArgs:
def __init__(__self__, *,
default_mode: Optional[pulumi.Input[int]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapItemsArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
ConfigMap represents a configMap that should populate this volume
:param pulumi.Input[int] default_mode: Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapItemsArgs']]] items: If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or it's keys must be defined
"""
if default_mode is not None:
pulumi.set(__self__, "default_mode", default_mode)
if items is not None:
pulumi.set(__self__, "items", items)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter(name="defaultMode")
def default_mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "default_mode")
@default_mode.setter
def default_mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_mode", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapItemsArgs']]]]:
"""
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
"""
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapItemsArgs']]]]):
pulumi.set(self, "items", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or it's keys must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesConfigMapItemsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
path: pulumi.Input[str],
mode: Optional[pulumi.Input[int]] = None):
"""
Maps a string key to a path within a volume.
:param pulumi.Input[str] key: The key to project.
:param pulumi.Input[str] path: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
:param pulumi.Input[int] mode: Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "path", path)
if mode is not None:
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to project.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mode", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiArgs:
def __init__(__self__, *,
driver: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None,
node_publish_secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiNodePublishSecretRefArgs']] = None,
read_only: Optional[pulumi.Input[bool]] = None,
volume_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).
:param pulumi.Input[str] driver: Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.
:param pulumi.Input[str] fs_type: Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiNodePublishSecretRefArgs'] node_publish_secret_ref: NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.
:param pulumi.Input[bool] read_only: Specifies a read-only configuration for the volume. Defaults to false (read/write).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] volume_attributes: VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.
"""
pulumi.set(__self__, "driver", driver)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if node_publish_secret_ref is not None:
pulumi.set(__self__, "node_publish_secret_ref", node_publish_secret_ref)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if volume_attributes is not None:
pulumi.set(__self__, "volume_attributes", volume_attributes)
@property
@pulumi.getter
def driver(self) -> pulumi.Input[str]:
"""
Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.
"""
return pulumi.get(self, "driver")
@driver.setter
def driver(self, value: pulumi.Input[str]):
pulumi.set(self, "driver", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter(name="nodePublishSecretRef")
def node_publish_secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiNodePublishSecretRefArgs']]:
"""
NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.
"""
return pulumi.get(self, "node_publish_secret_ref")
@node_publish_secret_ref.setter
def node_publish_secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiNodePublishSecretRefArgs']]):
pulumi.set(self, "node_publish_secret_ref", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies a read-only configuration for the volume. Defaults to false (read/write).
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="volumeAttributes")
def volume_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.
"""
return pulumi.get(self, "volume_attributes")
@volume_attributes.setter
def volume_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "volume_attributes", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesCsiNodePublishSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIArgs:
def __init__(__self__, *,
default_mode: Optional[pulumi.Input[int]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsArgs']]]] = None):
"""
DownwardAPI represents downward API about the pod that should populate this volume
:param pulumi.Input[int] default_mode: Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsArgs']]] items: Items is a list of downward API volume file
"""
if default_mode is not None:
pulumi.set(__self__, "default_mode", default_mode)
if items is not None:
pulumi.set(__self__, "items", items)
@property
@pulumi.getter(name="defaultMode")
def default_mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "default_mode")
@default_mode.setter
def default_mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_mode", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsArgs']]]]:
"""
Items is a list of downward API volume file
"""
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsArgs']]]]):
pulumi.set(self, "items", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
field_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsFieldRefArgs']] = None,
mode: Optional[pulumi.Input[int]] = None,
resource_field_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsResourceFieldRefArgs']] = None):
"""
DownwardAPIVolumeFile represents information to create the file containing the pod field
:param pulumi.Input[str] path: Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsFieldRefArgs'] field_ref: Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
:param pulumi.Input[int] mode: Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
"""
pulumi.set(__self__, "path", path)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsFieldRefArgs']]:
"""
Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesDownwardAPIItemsResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input[str]] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input[str] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesEmptyDirArgs:
def __init__(__self__, *,
medium: Optional[pulumi.Input[str]] = None,
size_limit: Optional[pulumi.Input[str]] = None):
"""
EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
:param pulumi.Input[str] medium: What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
:param pulumi.Input[str] size_limit: Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
"""
if medium is not None:
pulumi.set(__self__, "medium", medium)
if size_limit is not None:
pulumi.set(__self__, "size_limit", size_limit)
@property
@pulumi.getter
def medium(self) -> Optional[pulumi.Input[str]]:
"""
What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
"""
return pulumi.get(self, "medium")
@medium.setter
def medium(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "medium", value)
@property
@pulumi.getter(name="sizeLimit")
def size_limit(self) -> Optional[pulumi.Input[str]]:
"""
Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
"""
return pulumi.get(self, "size_limit")
@size_limit.setter
def size_limit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size_limit", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesFcArgs:
def __init__(__self__, *,
fs_type: Optional[pulumi.Input[str]] = None,
lun: Optional[pulumi.Input[int]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
target_wwns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wwids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
:param pulumi.Input[str] fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine
:param pulumi.Input[int] lun: Optional: FC target lun number
:param pulumi.Input[bool] read_only: Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_wwns: Optional: FC target worldwide names (WWNs)
:param pulumi.Input[Sequence[pulumi.Input[str]]] wwids: Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
"""
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if lun is not None:
pulumi.set(__self__, "lun", lun)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if target_wwns is not None:
pulumi.set(__self__, "target_wwns", target_wwns)
if wwids is not None:
pulumi.set(__self__, "wwids", wwids)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter
def lun(self) -> Optional[pulumi.Input[int]]:
"""
Optional: FC target lun number
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lun", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="targetWWNs")
def target_wwns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Optional: FC target worldwide names (WWNs)
"""
return pulumi.get(self, "target_wwns")
@target_wwns.setter
def target_wwns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_wwns", value)
@property
@pulumi.getter
def wwids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
"""
return pulumi.get(self, "wwids")
@wwids.setter
def wwids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "wwids", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeArgs:
def __init__(__self__, *,
driver: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeSecretRefArgs']] = None):
"""
FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.
:param pulumi.Input[str] driver: Driver is the name of the driver to use for this volume.
:param pulumi.Input[str] fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: Optional: Extra command options if any.
:param pulumi.Input[bool] read_only: Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeSecretRefArgs'] secret_ref: Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
"""
pulumi.set(__self__, "driver", driver)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if options is not None:
pulumi.set(__self__, "options", options)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter
def driver(self) -> pulumi.Input[str]:
"""
Driver is the name of the driver to use for this volume.
"""
return pulumi.get(self, "driver")
@driver.setter
def driver(self, value: pulumi.Input[str]):
pulumi.set(self, "driver", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional: Extra command options if any.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeSecretRefArgs']]:
"""
Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlexVolumeSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesFlockerArgs:
def __init__(__self__, *,
dataset_name: Optional[pulumi.Input[str]] = None,
dataset_uuid: Optional[pulumi.Input[str]] = None):
"""
Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
:param pulumi.Input[str] dataset_name: Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated
:param pulumi.Input[str] dataset_uuid: UUID of the dataset. This is unique identifier of a Flocker dataset
"""
if dataset_name is not None:
pulumi.set(__self__, "dataset_name", dataset_name)
if dataset_uuid is not None:
pulumi.set(__self__, "dataset_uuid", dataset_uuid)
@property
@pulumi.getter(name="datasetName")
def dataset_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated
"""
return pulumi.get(self, "dataset_name")
@dataset_name.setter
def dataset_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dataset_name", value)
@property
@pulumi.getter(name="datasetUUID")
def dataset_uuid(self) -> Optional[pulumi.Input[str]]:
"""
UUID of the dataset. This is unique identifier of a Flocker dataset
"""
return pulumi.get(self, "dataset_uuid")
@dataset_uuid.setter
def dataset_uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dataset_uuid", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesGcePersistentDiskArgs:
def __init__(__self__, *,
pd_name: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None,
partition: Optional[pulumi.Input[int]] = None,
read_only: Optional[pulumi.Input[bool]] = None):
"""
GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param pulumi.Input[str] pd_name: Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param pulumi.Input[str] fs_type: Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine
:param pulumi.Input[int] partition: The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param pulumi.Input[bool] read_only: ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
"""
pulumi.set(__self__, "pd_name", pd_name)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if partition is not None:
pulumi.set(__self__, "partition", partition)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter(name="pdName")
def pd_name(self) -> pulumi.Input[str]:
"""
Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
"""
return pulumi.get(self, "pd_name")
@pd_name.setter
def pd_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pd_name", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter
def partition(self) -> Optional[pulumi.Input[int]]:
"""
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
"""
return pulumi.get(self, "partition")
@partition.setter
def partition(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesGitRepoArgs:
def __init__(__self__, *,
repository: pulumi.Input[str],
directory: Optional[pulumi.Input[str]] = None,
revision: Optional[pulumi.Input[str]] = None):
"""
GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.
:param pulumi.Input[str] repository: Repository URL
:param pulumi.Input[str] directory: Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.
:param pulumi.Input[str] revision: Commit hash for the specified revision.
"""
pulumi.set(__self__, "repository", repository)
if directory is not None:
pulumi.set(__self__, "directory", directory)
if revision is not None:
pulumi.set(__self__, "revision", revision)
@property
@pulumi.getter
def repository(self) -> pulumi.Input[str]:
"""
Repository URL
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: pulumi.Input[str]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter
def directory(self) -> Optional[pulumi.Input[str]]:
"""
Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.
"""
return pulumi.get(self, "directory")
@directory.setter
def directory(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory", value)
@property
@pulumi.getter
def revision(self) -> Optional[pulumi.Input[str]]:
"""
Commit hash for the specified revision.
"""
return pulumi.get(self, "revision")
@revision.setter
def revision(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revision", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesGlusterfsArgs:
def __init__(__self__, *,
endpoints: pulumi.Input[str],
path: pulumi.Input[str],
read_only: Optional[pulumi.Input[bool]] = None):
"""
Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
:param pulumi.Input[str] endpoints: EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
:param pulumi.Input[str] path: Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
:param pulumi.Input[bool] read_only: ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
"""
pulumi.set(__self__, "endpoints", endpoints)
pulumi.set(__self__, "path", path)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter
def endpoints(self) -> pulumi.Input[str]:
"""
EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesHostPathArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
type: Optional[pulumi.Input[str]] = None):
"""
HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.
:param pulumi.Input[str] path: Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
:param pulumi.Input[str] type: Type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
"""
pulumi.set(__self__, "path", path)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiArgs:
def __init__(__self__, *,
iqn: pulumi.Input[str],
lun: pulumi.Input[int],
target_portal: pulumi.Input[str],
chap_auth_discovery: Optional[pulumi.Input[bool]] = None,
chap_auth_session: Optional[pulumi.Input[bool]] = None,
fs_type: Optional[pulumi.Input[str]] = None,
initiator_name: Optional[pulumi.Input[str]] = None,
iscsi_interface: Optional[pulumi.Input[str]] = None,
portals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiSecretRefArgs']] = None):
"""
ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
:param pulumi.Input[str] iqn: Target iSCSI Qualified Name.
:param pulumi.Input[int] lun: iSCSI Target Lun number.
:param pulumi.Input[str] target_portal: iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).
:param pulumi.Input[bool] chap_auth_discovery: whether support iSCSI Discovery CHAP authentication
:param pulumi.Input[bool] chap_auth_session: whether support iSCSI Session CHAP authentication
:param pulumi.Input[str] fs_type: Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine
:param pulumi.Input[str] initiator_name: Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection.
:param pulumi.Input[str] iscsi_interface: iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).
:param pulumi.Input[Sequence[pulumi.Input[str]]] portals: iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).
:param pulumi.Input[bool] read_only: ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiSecretRefArgs'] secret_ref: CHAP Secret for iSCSI target and initiator authentication
"""
pulumi.set(__self__, "iqn", iqn)
pulumi.set(__self__, "lun", lun)
pulumi.set(__self__, "target_portal", target_portal)
if chap_auth_discovery is not None:
pulumi.set(__self__, "chap_auth_discovery", chap_auth_discovery)
if chap_auth_session is not None:
pulumi.set(__self__, "chap_auth_session", chap_auth_session)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if initiator_name is not None:
pulumi.set(__self__, "initiator_name", initiator_name)
if iscsi_interface is not None:
pulumi.set(__self__, "iscsi_interface", iscsi_interface)
if portals is not None:
pulumi.set(__self__, "portals", portals)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter
def iqn(self) -> pulumi.Input[str]:
"""
Target iSCSI Qualified Name.
"""
return pulumi.get(self, "iqn")
@iqn.setter
def iqn(self, value: pulumi.Input[str]):
pulumi.set(self, "iqn", value)
@property
@pulumi.getter
def lun(self) -> pulumi.Input[int]:
"""
iSCSI Target Lun number.
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: pulumi.Input[int]):
pulumi.set(self, "lun", value)
@property
@pulumi.getter(name="targetPortal")
def target_portal(self) -> pulumi.Input[str]:
"""
iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).
"""
return pulumi.get(self, "target_portal")
@target_portal.setter
def target_portal(self, value: pulumi.Input[str]):
pulumi.set(self, "target_portal", value)
@property
@pulumi.getter(name="chapAuthDiscovery")
def chap_auth_discovery(self) -> Optional[pulumi.Input[bool]]:
"""
whether support iSCSI Discovery CHAP authentication
"""
return pulumi.get(self, "chap_auth_discovery")
@chap_auth_discovery.setter
def chap_auth_discovery(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "chap_auth_discovery", value)
@property
@pulumi.getter(name="chapAuthSession")
def chap_auth_session(self) -> Optional[pulumi.Input[bool]]:
"""
whether support iSCSI Session CHAP authentication
"""
return pulumi.get(self, "chap_auth_session")
@chap_auth_session.setter
def chap_auth_session(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "chap_auth_session", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter(name="initiatorName")
def initiator_name(self) -> Optional[pulumi.Input[str]]:
"""
Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection.
"""
return pulumi.get(self, "initiator_name")
@initiator_name.setter
def initiator_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "initiator_name", value)
@property
@pulumi.getter(name="iscsiInterface")
def iscsi_interface(self) -> Optional[pulumi.Input[str]]:
"""
iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).
"""
return pulumi.get(self, "iscsi_interface")
@iscsi_interface.setter
def iscsi_interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iscsi_interface", value)
@property
@pulumi.getter
def portals(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).
"""
return pulumi.get(self, "portals")
@portals.setter
def portals(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "portals", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiSecretRefArgs']]:
"""
CHAP Secret for iSCSI target and initiator authentication
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesIscsiSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
CHAP Secret for iSCSI target and initiator authentication
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesNfsArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
server: pulumi.Input[str],
read_only: Optional[pulumi.Input[bool]] = None):
"""
NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param pulumi.Input[str] path: Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param pulumi.Input[str] server: Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param pulumi.Input[bool] read_only: ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
"""
pulumi.set(__self__, "path", path)
pulumi.set(__self__, "server", server)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesPersistentVolumeClaimArgs:
def __init__(__self__, *,
claim_name: pulumi.Input[str],
read_only: Optional[pulumi.Input[bool]] = None):
"""
PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
:param pulumi.Input[str] claim_name: ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
:param pulumi.Input[bool] read_only: Will force the ReadOnly setting in VolumeMounts. Default false.
"""
pulumi.set(__self__, "claim_name", claim_name)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter(name="claimName")
def claim_name(self) -> pulumi.Input[str]:
"""
ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
"""
return pulumi.get(self, "claim_name")
@claim_name.setter
def claim_name(self, value: pulumi.Input[str]):
pulumi.set(self, "claim_name", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Will force the ReadOnly setting in VolumeMounts. Default false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesPhotonPersistentDiskArgs:
def __init__(__self__, *,
pd_id: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None):
"""
PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
:param pulumi.Input[str] pd_id: ID that identifies Photon Controller persistent disk
:param pulumi.Input[str] fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
"""
pulumi.set(__self__, "pd_id", pd_id)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
@property
@pulumi.getter(name="pdID")
def pd_id(self) -> pulumi.Input[str]:
"""
ID that identifies Photon Controller persistent disk
"""
return pulumi.get(self, "pd_id")
@pd_id.setter
def pd_id(self, value: pulumi.Input[str]):
pulumi.set(self, "pd_id", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesPortworxVolumeArgs:
def __init__(__self__, *,
volume_id: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None):
"""
PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
:param pulumi.Input[str] volume_id: VolumeID uniquely identifies a Portworx volume
:param pulumi.Input[str] fs_type: FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
:param pulumi.Input[bool] read_only: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
pulumi.set(__self__, "volume_id", volume_id)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter(name="volumeID")
def volume_id(self) -> pulumi.Input[str]:
"""
VolumeID uniquely identifies a Portworx volume
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_id", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedArgs:
def __init__(__self__, *,
sources: pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesArgs']]],
default_mode: Optional[pulumi.Input[int]] = None):
"""
Items for all in one resources secrets, configmaps, and downward API
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesArgs']]] sources: list of volume projections
:param pulumi.Input[int] default_mode: Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
pulumi.set(__self__, "sources", sources)
if default_mode is not None:
pulumi.set(__self__, "default_mode", default_mode)
@property
@pulumi.getter
def sources(self) -> pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesArgs']]]:
"""
list of volume projections
"""
return pulumi.get(self, "sources")
@sources.setter
def sources(self, value: pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesArgs']]]):
pulumi.set(self, "sources", value)
@property
@pulumi.getter(name="defaultMode")
def default_mode(self) -> Optional[pulumi.Input[int]]:
"""
Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "default_mode")
@default_mode.setter
def default_mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_mode", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesArgs:
def __init__(__self__, *,
config_map: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapArgs']] = None,
downward_api: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIArgs']] = None,
secret: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretArgs']] = None,
service_account_token: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesServiceAccountTokenArgs']] = None):
"""
Projection that may be projected along with other supported volume types
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapArgs'] config_map: information about the configMap data to project
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIArgs'] downward_api: information about the downwardAPI data to project
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretArgs'] secret: information about the secret data to project
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesServiceAccountTokenArgs'] service_account_token: information about the serviceAccountToken data to project
"""
if config_map is not None:
pulumi.set(__self__, "config_map", config_map)
if downward_api is not None:
pulumi.set(__self__, "downward_api", downward_api)
if secret is not None:
pulumi.set(__self__, "secret", secret)
if service_account_token is not None:
pulumi.set(__self__, "service_account_token", service_account_token)
@property
@pulumi.getter(name="configMap")
def config_map(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapArgs']]:
"""
information about the configMap data to project
"""
return pulumi.get(self, "config_map")
@config_map.setter
def config_map(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapArgs']]):
pulumi.set(self, "config_map", value)
@property
@pulumi.getter(name="downwardAPI")
def downward_api(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIArgs']]:
"""
information about the downwardAPI data to project
"""
return pulumi.get(self, "downward_api")
@downward_api.setter
def downward_api(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIArgs']]):
pulumi.set(self, "downward_api", value)
@property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretArgs']]:
"""
information about the secret data to project
"""
return pulumi.get(self, "secret")
@secret.setter
def secret(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretArgs']]):
pulumi.set(self, "secret", value)
@property
@pulumi.getter(name="serviceAccountToken")
def service_account_token(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesServiceAccountTokenArgs']]:
"""
information about the serviceAccountToken data to project
"""
return pulumi.get(self, "service_account_token")
@service_account_token.setter
def service_account_token(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesServiceAccountTokenArgs']]):
pulumi.set(self, "service_account_token", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapArgs:
def __init__(__self__, *,
items: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapItemsArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
information about the configMap data to project
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapItemsArgs']]] items: If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or it's keys must be defined
"""
if items is not None:
pulumi.set(__self__, "items", items)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapItemsArgs']]]]:
"""
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
"""
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapItemsArgs']]]]):
pulumi.set(self, "items", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or it's keys must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesConfigMapItemsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
path: pulumi.Input[str],
mode: Optional[pulumi.Input[int]] = None):
"""
Maps a string key to a path within a volume.
:param pulumi.Input[str] key: The key to project.
:param pulumi.Input[str] path: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
:param pulumi.Input[int] mode: Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "path", path)
if mode is not None:
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to project.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mode", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIArgs:
def __init__(__self__, *,
items: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsArgs']]]] = None):
"""
information about the downwardAPI data to project
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsArgs']]] items: Items is a list of DownwardAPIVolume file
"""
if items is not None:
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsArgs']]]]:
"""
Items is a list of DownwardAPIVolume file
"""
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsArgs']]]]):
pulumi.set(self, "items", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
field_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsFieldRefArgs']] = None,
mode: Optional[pulumi.Input[int]] = None,
resource_field_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsResourceFieldRefArgs']] = None):
"""
DownwardAPIVolumeFile represents information to create the file containing the pod field
:param pulumi.Input[str] path: Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsFieldRefArgs'] field_ref: Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
:param pulumi.Input[int] mode: Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
"""
pulumi.set(__self__, "path", path)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsFieldRefArgs']]:
"""
Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesDownwardAPIItemsResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input[str]] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input[str] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretArgs:
def __init__(__self__, *,
items: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretItemsArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
information about the secret data to project
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretItemsArgs']]] items: If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or its key must be defined
"""
if items is not None:
pulumi.set(__self__, "items", items)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretItemsArgs']]]]:
"""
If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
"""
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretItemsArgs']]]]):
pulumi.set(self, "items", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesSecretItemsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
path: pulumi.Input[str],
mode: Optional[pulumi.Input[int]] = None):
"""
Maps a string key to a path within a volume.
:param pulumi.Input[str] key: The key to project.
:param pulumi.Input[str] path: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
:param pulumi.Input[int] mode: Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "path", path)
if mode is not None:
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to project.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mode", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesProjectedSourcesServiceAccountTokenArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
audience: Optional[pulumi.Input[str]] = None,
expiration_seconds: Optional[pulumi.Input[int]] = None):
"""
information about the serviceAccountToken data to project
:param pulumi.Input[str] path: Path is the path relative to the mount point of the file to project the token into.
:param pulumi.Input[str] audience: Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.
:param pulumi.Input[int] expiration_seconds: ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.
"""
pulumi.set(__self__, "path", path)
if audience is not None:
pulumi.set(__self__, "audience", audience)
if expiration_seconds is not None:
pulumi.set(__self__, "expiration_seconds", expiration_seconds)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
Path is the path relative to the mount point of the file to project the token into.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def audience(self) -> Optional[pulumi.Input[str]]:
"""
Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.
"""
return pulumi.get(self, "audience")
@audience.setter
def audience(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "audience", value)
@property
@pulumi.getter(name="expirationSeconds")
def expiration_seconds(self) -> Optional[pulumi.Input[int]]:
"""
ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.
"""
return pulumi.get(self, "expiration_seconds")
@expiration_seconds.setter
def expiration_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "expiration_seconds", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesQuobyteArgs:
def __init__(__self__, *,
registry: pulumi.Input[str],
volume: pulumi.Input[str],
group: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
tenant: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
:param pulumi.Input[str] registry: Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:param pulumi.Input[str] volume: Volume is a string that references an already created Quobyte volume by name.
:param pulumi.Input[str] group: Group to map volume access to Default is no group
:param pulumi.Input[bool] read_only: ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:param pulumi.Input[str] tenant: Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin
:param pulumi.Input[str] user: User to map volume access to Defaults to serivceaccount user
"""
pulumi.set(__self__, "registry", registry)
pulumi.set(__self__, "volume", volume)
if group is not None:
pulumi.set(__self__, "group", group)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if tenant is not None:
pulumi.set(__self__, "tenant", tenant)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def registry(self) -> pulumi.Input[str]:
"""
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
"""
return pulumi.get(self, "registry")
@registry.setter
def registry(self, value: pulumi.Input[str]):
pulumi.set(self, "registry", value)
@property
@pulumi.getter
def volume(self) -> pulumi.Input[str]:
"""
Volume is a string that references an already created Quobyte volume by name.
"""
return pulumi.get(self, "volume")
@volume.setter
def volume(self, value: pulumi.Input[str]):
pulumi.set(self, "volume", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
"""
Group to map volume access to Default is no group
"""
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter
def tenant(self) -> Optional[pulumi.Input[str]]:
"""
Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin
"""
return pulumi.get(self, "tenant")
@tenant.setter
def tenant(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User to map volume access to Defaults to serivceaccount user
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdArgs:
def __init__(__self__, *,
image: pulumi.Input[str],
monitors: pulumi.Input[Sequence[pulumi.Input[str]]],
fs_type: Optional[pulumi.Input[str]] = None,
keyring: Optional[pulumi.Input[str]] = None,
pool: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdSecretRefArgs']] = None,
user: Optional[pulumi.Input[str]] = None):
"""
RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
:param pulumi.Input[str] image: The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
:param pulumi.Input[Sequence[pulumi.Input[str]]] monitors: A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
:param pulumi.Input[str] fs_type: Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine
:param pulumi.Input[str] keyring: Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
:param pulumi.Input[str] pool: The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
:param pulumi.Input[bool] read_only: ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdSecretRefArgs'] secret_ref: SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
:param pulumi.Input[str] user: The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
"""
pulumi.set(__self__, "image", image)
pulumi.set(__self__, "monitors", monitors)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if keyring is not None:
pulumi.set(__self__, "keyring", keyring)
if pool is not None:
pulumi.set(__self__, "pool", pool)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def image(self) -> pulumi.Input[str]:
"""
The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: pulumi.Input[str]):
pulumi.set(self, "image", value)
@property
@pulumi.getter
def monitors(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
"""
return pulumi.get(self, "monitors")
@monitors.setter
def monitors(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "monitors", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter
def keyring(self) -> Optional[pulumi.Input[str]]:
"""
Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
"""
return pulumi.get(self, "keyring")
@keyring.setter
def keyring(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "keyring", value)
@property
@pulumi.getter
def pool(self) -> Optional[pulumi.Input[str]]:
"""
The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
"""
return pulumi.get(self, "pool")
@pool.setter
def pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pool", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdSecretRefArgs']]:
"""
SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesRbdSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOArgs:
def __init__(__self__, *,
gateway: pulumi.Input[str],
secret_ref: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOSecretRefArgs'],
system: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None,
protection_domain: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
ssl_enabled: Optional[pulumi.Input[bool]] = None,
storage_mode: Optional[pulumi.Input[str]] = None,
storage_pool: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None):
"""
ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
:param pulumi.Input[str] gateway: The host address of the ScaleIO API Gateway.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOSecretRefArgs'] secret_ref: SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.
:param pulumi.Input[str] system: The name of the storage system as configured in ScaleIO.
:param pulumi.Input[str] fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".
:param pulumi.Input[str] protection_domain: The name of the ScaleIO Protection Domain for the configured storage.
:param pulumi.Input[bool] read_only: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param pulumi.Input[bool] ssl_enabled: Flag to enable/disable SSL communication with Gateway, default false
:param pulumi.Input[str] storage_mode: Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.
:param pulumi.Input[str] storage_pool: The ScaleIO Storage Pool associated with the protection domain.
:param pulumi.Input[str] volume_name: The name of a volume already created in the ScaleIO system that is associated with this volume source.
"""
pulumi.set(__self__, "gateway", gateway)
pulumi.set(__self__, "secret_ref", secret_ref)
pulumi.set(__self__, "system", system)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if protection_domain is not None:
pulumi.set(__self__, "protection_domain", protection_domain)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if ssl_enabled is not None:
pulumi.set(__self__, "ssl_enabled", ssl_enabled)
if storage_mode is not None:
pulumi.set(__self__, "storage_mode", storage_mode)
if storage_pool is not None:
pulumi.set(__self__, "storage_pool", storage_pool)
if volume_name is not None:
pulumi.set(__self__, "volume_name", volume_name)
@property
@pulumi.getter
def gateway(self) -> pulumi.Input[str]:
"""
The host address of the ScaleIO API Gateway.
"""
return pulumi.get(self, "gateway")
@gateway.setter
def gateway(self, value: pulumi.Input[str]):
pulumi.set(self, "gateway", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOSecretRefArgs']:
"""
SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOSecretRefArgs']):
pulumi.set(self, "secret_ref", value)
@property
@pulumi.getter
def system(self) -> pulumi.Input[str]:
"""
The name of the storage system as configured in ScaleIO.
"""
return pulumi.get(self, "system")
@system.setter
def system(self, value: pulumi.Input[str]):
pulumi.set(self, "system", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter(name="protectionDomain")
def protection_domain(self) -> Optional[pulumi.Input[str]]:
"""
The name of the ScaleIO Protection Domain for the configured storage.
"""
return pulumi.get(self, "protection_domain")
@protection_domain.setter
def protection_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_domain", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="sslEnabled")
def ssl_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to enable/disable SSL communication with Gateway, default false
"""
return pulumi.get(self, "ssl_enabled")
@ssl_enabled.setter
def ssl_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ssl_enabled", value)
@property
@pulumi.getter(name="storageMode")
def storage_mode(self) -> Optional[pulumi.Input[str]]:
"""
Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.
"""
return pulumi.get(self, "storage_mode")
@storage_mode.setter
def storage_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_mode", value)
@property
@pulumi.getter(name="storagePool")
def storage_pool(self) -> Optional[pulumi.Input[str]]:
"""
The ScaleIO Storage Pool associated with the protection domain.
"""
return pulumi.get(self, "storage_pool")
@storage_pool.setter
def storage_pool(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_pool", value)
@property
@pulumi.getter(name="volumeName")
def volume_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of a volume already created in the ScaleIO system that is associated with this volume source.
"""
return pulumi.get(self, "volume_name")
@volume_name.setter
def volume_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesScaleIOSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretArgs:
def __init__(__self__, *,
default_mode: Optional[pulumi.Input[int]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretItemsArgs']]]] = None,
optional: Optional[pulumi.Input[bool]] = None,
secret_name: Optional[pulumi.Input[str]] = None):
"""
Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
:param pulumi.Input[int] default_mode: Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:param pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretItemsArgs']]] items: If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:param pulumi.Input[bool] optional: Specify whether the Secret or it's keys must be defined
:param pulumi.Input[str] secret_name: Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
"""
if default_mode is not None:
pulumi.set(__self__, "default_mode", default_mode)
if items is not None:
pulumi.set(__self__, "items", items)
if optional is not None:
pulumi.set(__self__, "optional", optional)
if secret_name is not None:
pulumi.set(__self__, "secret_name", secret_name)
@property
@pulumi.getter(name="defaultMode")
def default_mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "default_mode")
@default_mode.setter
def default_mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_mode", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretItemsArgs']]]]:
"""
If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
"""
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretItemsArgs']]]]):
pulumi.set(self, "items", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or it's keys must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesSecretItemsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
path: pulumi.Input[str],
mode: Optional[pulumi.Input[int]] = None):
"""
Maps a string key to a path within a volume.
:param pulumi.Input[str] key: The key to project.
:param pulumi.Input[str] path: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
:param pulumi.Input[int] mode: Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "path", path)
if mode is not None:
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to project.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[int]]:
"""
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mode", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosArgs:
def __init__(__self__, *,
fs_type: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
secret_ref: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosSecretRefArgs']] = None,
volume_name: Optional[pulumi.Input[str]] = None,
volume_namespace: Optional[pulumi.Input[str]] = None):
"""
StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
:param pulumi.Input[str] fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
:param pulumi.Input[bool] read_only: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosSecretRefArgs'] secret_ref: SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.
:param pulumi.Input[str] volume_name: VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.
:param pulumi.Input[str] volume_namespace: VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.
"""
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
if volume_name is not None:
pulumi.set(__self__, "volume_name", volume_name)
if volume_namespace is not None:
pulumi.set(__self__, "volume_namespace", volume_namespace)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosSecretRefArgs']]:
"""
SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@property
@pulumi.getter(name="volumeName")
def volume_name(self) -> Optional[pulumi.Input[str]]:
"""
VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.
"""
return pulumi.get(self, "volume_name")
@volume_name.setter
def volume_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_name", value)
@property
@pulumi.getter(name="volumeNamespace")
def volume_namespace(self) -> Optional[pulumi.Input[str]]:
"""
VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.
"""
return pulumi.get(self, "volume_namespace")
@volume_namespace.setter
def volume_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_namespace", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesStorageosSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecJobTargetRefTemplateSpecVolumesVsphereVolumeArgs:
def __init__(__self__, *,
volume_path: pulumi.Input[str],
fs_type: Optional[pulumi.Input[str]] = None,
storage_policy_id: Optional[pulumi.Input[str]] = None,
storage_policy_name: Optional[pulumi.Input[str]] = None):
"""
VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
:param pulumi.Input[str] volume_path: Path that identifies vSphere volume vmdk
:param pulumi.Input[str] fs_type: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
:param pulumi.Input[str] storage_policy_id: Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
:param pulumi.Input[str] storage_policy_name: Storage Policy Based Management (SPBM) profile name.
"""
pulumi.set(__self__, "volume_path", volume_path)
if fs_type is not None:
pulumi.set(__self__, "fs_type", fs_type)
if storage_policy_id is not None:
pulumi.set(__self__, "storage_policy_id", storage_policy_id)
if storage_policy_name is not None:
pulumi.set(__self__, "storage_policy_name", storage_policy_name)
@property
@pulumi.getter(name="volumePath")
def volume_path(self) -> pulumi.Input[str]:
"""
Path that identifies vSphere volume vmdk
"""
return pulumi.get(self, "volume_path")
@volume_path.setter
def volume_path(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_path", value)
@property
@pulumi.getter(name="fsType")
def fs_type(self) -> Optional[pulumi.Input[str]]:
"""
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
"""
return pulumi.get(self, "fs_type")
@fs_type.setter
def fs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_type", value)
@property
@pulumi.getter(name="storagePolicyID")
def storage_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
"""
return pulumi.get(self, "storage_policy_id")
@storage_policy_id.setter
def storage_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_policy_id", value)
@property
@pulumi.getter(name="storagePolicyName")
def storage_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Storage Policy Based Management (SPBM) profile name.
"""
return pulumi.get(self, "storage_policy_name")
@storage_policy_name.setter
def storage_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_policy_name", value)
@pulumi.input_type
class ScaledObjectSpecScaleTargetRefArgs:
def __init__(__self__, *,
deployment_name: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None):
"""
ObjectReference holds the a reference to the deployment this ScaledObject applies
"""
pulumi.set(__self__, "deployment_name", deployment_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
@property
@pulumi.getter(name="deploymentName")
def deployment_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "deployment_name")
@deployment_name.setter
def deployment_name(self, value: pulumi.Input[str]):
pulumi.set(self, "deployment_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@pulumi.input_type
class ScaledObjectSpecTriggersArgs:
def __init__(__self__, *,
metadata: pulumi.Input[Mapping[str, pulumi.Input[str]]],
type: pulumi.Input[str],
authentication_ref: Optional[pulumi.Input['ScaledObjectSpecTriggersAuthenticationRefArgs']] = None,
name: Optional[pulumi.Input[str]] = None):
"""
ScaleTriggers reference the scaler that will be used
:param pulumi.Input['ScaledObjectSpecTriggersAuthenticationRefArgs'] authentication_ref: ScaledObjectAuthRef points to the TriggerAuthentication object that is used to authenticate the scaler with the environment
"""
pulumi.set(__self__, "metadata", metadata)
pulumi.set(__self__, "type", type)
if authentication_ref is not None:
pulumi.set(__self__, "authentication_ref", authentication_ref)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def metadata(self) -> pulumi.Input[Mapping[str, pulumi.Input[str]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: pulumi.Input[Mapping[str, pulumi.Input[str]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="authenticationRef")
def authentication_ref(self) -> Optional[pulumi.Input['ScaledObjectSpecTriggersAuthenticationRefArgs']]:
"""
ScaledObjectAuthRef points to the TriggerAuthentication object that is used to authenticate the scaler with the environment
"""
return pulumi.get(self, "authentication_ref")
@authentication_ref.setter
def authentication_ref(self, value: Optional[pulumi.Input['ScaledObjectSpecTriggersAuthenticationRefArgs']]):
pulumi.set(self, "authentication_ref", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectSpecTriggersAuthenticationRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
ScaledObjectAuthRef points to the TriggerAuthentication object that is used to authenticate the scaler with the environment
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ScaledObjectStatusArgs:
def __init__(__self__, *,
external_metric_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
last_active_time: Optional[pulumi.Input[str]] = None):
"""
ScaledObjectStatus is the status for a ScaledObject resource
"""
if external_metric_names is not None:
pulumi.set(__self__, "external_metric_names", external_metric_names)
if last_active_time is not None:
pulumi.set(__self__, "last_active_time", last_active_time)
@property
@pulumi.getter(name="externalMetricNames")
def external_metric_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "external_metric_names")
@external_metric_names.setter
def external_metric_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_metric_names", value)
@property
@pulumi.getter(name="lastActiveTime")
def last_active_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "last_active_time")
@last_active_time.setter
def last_active_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_active_time", value)
@pulumi.input_type
class TriggerAuthenticationSpecArgs:
def __init__(__self__, *,
env: Optional[pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecEnvArgs']]]] = None,
hashi_corp_vault: Optional[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultArgs']] = None,
pod_identity: Optional[pulumi.Input['TriggerAuthenticationSpecPodIdentityArgs']] = None,
secret_target_ref: Optional[pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecSecretTargetRefArgs']]]] = None):
"""
TriggerAuthenticationSpec defines the various ways to authenticate
:param pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultArgs'] hashi_corp_vault: HashiCorpVault is used to authenticate using Hashicorp Vault
:param pulumi.Input['TriggerAuthenticationSpecPodIdentityArgs'] pod_identity: AuthPodIdentity allows users to select the platform native identity mechanism
"""
if env is not None:
pulumi.set(__self__, "env", env)
if hashi_corp_vault is not None:
pulumi.set(__self__, "hashi_corp_vault", hashi_corp_vault)
if pod_identity is not None:
pulumi.set(__self__, "pod_identity", pod_identity)
if secret_target_ref is not None:
pulumi.set(__self__, "secret_target_ref", secret_target_ref)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecEnvArgs']]]]:
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter(name="hashiCorpVault")
def hashi_corp_vault(self) -> Optional[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultArgs']]:
"""
HashiCorpVault is used to authenticate using Hashicorp Vault
"""
return pulumi.get(self, "hashi_corp_vault")
@hashi_corp_vault.setter
def hashi_corp_vault(self, value: Optional[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultArgs']]):
pulumi.set(self, "hashi_corp_vault", value)
@property
@pulumi.getter(name="podIdentity")
def pod_identity(self) -> Optional[pulumi.Input['TriggerAuthenticationSpecPodIdentityArgs']]:
"""
AuthPodIdentity allows users to select the platform native identity mechanism
"""
return pulumi.get(self, "pod_identity")
@pod_identity.setter
def pod_identity(self, value: Optional[pulumi.Input['TriggerAuthenticationSpecPodIdentityArgs']]):
pulumi.set(self, "pod_identity", value)
@property
@pulumi.getter(name="secretTargetRef")
def secret_target_ref(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecSecretTargetRefArgs']]]]:
return pulumi.get(self, "secret_target_ref")
@secret_target_ref.setter
def secret_target_ref(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecSecretTargetRefArgs']]]]):
pulumi.set(self, "secret_target_ref", value)
@pulumi.input_type
class TriggerAuthenticationSpecEnvArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameter: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None):
"""
AuthEnvironment is used to authenticate using environment variables in the destination deployment spec
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "parameter", parameter)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameter(self) -> pulumi.Input[str]:
return pulumi.get(self, "parameter")
@parameter.setter
def parameter(self, value: pulumi.Input[str]):
pulumi.set(self, "parameter", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@pulumi.input_type
class TriggerAuthenticationSpecHashiCorpVaultArgs:
def __init__(__self__, *,
address: pulumi.Input[str],
authentication: pulumi.Input[str],
secrets: pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultSecretsArgs']]],
credential: Optional[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultCredentialArgs']] = None,
mount: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None):
"""
HashiCorpVault is used to authenticate using Hashicorp Vault
:param pulumi.Input[str] authentication: VaultAuthentication contains the list of Hashicorp Vault authentication methods
:param pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultCredentialArgs'] credential: Credential defines the Hashicorp Vault credentials depending on the authentication method
"""
pulumi.set(__self__, "address", address)
pulumi.set(__self__, "authentication", authentication)
pulumi.set(__self__, "secrets", secrets)
if credential is not None:
pulumi.set(__self__, "credential", credential)
if mount is not None:
pulumi.set(__self__, "mount", mount)
if role is not None:
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def address(self) -> pulumi.Input[str]:
return pulumi.get(self, "address")
@address.setter
def address(self, value: pulumi.Input[str]):
pulumi.set(self, "address", value)
@property
@pulumi.getter
def authentication(self) -> pulumi.Input[str]:
"""
VaultAuthentication contains the list of Hashicorp Vault authentication methods
"""
return pulumi.get(self, "authentication")
@authentication.setter
def authentication(self, value: pulumi.Input[str]):
pulumi.set(self, "authentication", value)
@property
@pulumi.getter
def secrets(self) -> pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultSecretsArgs']]]:
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: pulumi.Input[Sequence[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultSecretsArgs']]]):
pulumi.set(self, "secrets", value)
@property
@pulumi.getter
def credential(self) -> Optional[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultCredentialArgs']]:
"""
Credential defines the Hashicorp Vault credentials depending on the authentication method
"""
return pulumi.get(self, "credential")
@credential.setter
def credential(self, value: Optional[pulumi.Input['TriggerAuthenticationSpecHashiCorpVaultCredentialArgs']]):
pulumi.set(self, "credential", value)
@property
@pulumi.getter
def mount(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "mount")
@mount.setter
def mount(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@pulumi.input_type
class TriggerAuthenticationSpecHashiCorpVaultCredentialArgs:
def __init__(__self__, *,
service_account: Optional[pulumi.Input[str]] = None,
token: Optional[pulumi.Input[str]] = None):
"""
Credential defines the Hashicorp Vault credentials depending on the authentication method
"""
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if token is not None:
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter
def token(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "token")
@token.setter
def token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token", value)
@pulumi.input_type
class TriggerAuthenticationSpecHashiCorpVaultSecretsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
parameter: pulumi.Input[str],
path: pulumi.Input[str]):
"""
VaultSecret defines the mapping between the path of the secret in Vault to the parameter
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "parameter", parameter)
pulumi.set(__self__, "path", path)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def parameter(self) -> pulumi.Input[str]:
return pulumi.get(self, "parameter")
@parameter.setter
def parameter(self, value: pulumi.Input[str]):
pulumi.set(self, "parameter", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@pulumi.input_type
class TriggerAuthenticationSpecPodIdentityArgs:
def __init__(__self__, *,
provider: pulumi.Input[str]):
"""
AuthPodIdentity allows users to select the platform native identity mechanism
:param pulumi.Input[str] provider: PodIdentityProvider contains the list of providers
"""
pulumi.set(__self__, "provider", provider)
@property
@pulumi.getter
def provider(self) -> pulumi.Input[str]:
"""
PodIdentityProvider contains the list of providers
"""
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: pulumi.Input[str]):
pulumi.set(self, "provider", value)
@pulumi.input_type
class TriggerAuthenticationSpecSecretTargetRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: pulumi.Input[str],
parameter: pulumi.Input[str]):
"""
AuthSecretTargetRef is used to authenticate using a reference to a secret
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "parameter", parameter)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameter(self) -> pulumi.Input[str]:
return pulumi.get(self, "parameter")
@parameter.setter
def parameter(self, value: pulumi.Input[str]):
pulumi.set(self, "parameter", value)
| 54.358383
| 983
| 0.711481
| 75,511
| 657,519
| 6.077485
| 0.021348
| 0.077469
| 0.063717
| 0.027532
| 0.809382
| 0.785787
| 0.755683
| 0.711292
| 0.700547
| 0.689323
| 0
| 0.001538
| 0.197263
| 657,519
| 12,095
| 984
| 54.362877
| 0.867927
| 0.386144
| 0
| 0.703678
| 1
| 0
| 0.215752
| 0.163605
| 0
| 0
| 0
| 0.006366
| 0
| 1
| 0.208943
| false
| 0.002188
| 0.000684
| 0.00547
| 0.327636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
854af890bfcc46c7b91a9c2b49eb6ab544c9c285
| 154
|
py
|
Python
|
redshells/model/__init__.py
|
hirosassa/redshells
|
7824381a7d1f042405014b4572a5d5824338fc74
|
[
"MIT"
] | 42
|
2019-01-02T01:31:39.000Z
|
2022-01-29T08:56:12.000Z
|
redshells/model/__init__.py
|
hirosassa/redshells
|
7824381a7d1f042405014b4572a5d5824338fc74
|
[
"MIT"
] | 29
|
2019-03-28T02:33:01.000Z
|
2021-09-27T00:45:25.000Z
|
redshells/model/__init__.py
|
hirosassa/redshells
|
7824381a7d1f042405014b4572a5d5824338fc74
|
[
"MIT"
] | 17
|
2019-02-21T03:08:20.000Z
|
2022-02-17T23:27:48.000Z
|
from redshells.model.lda_model import LdaModel
from redshells.model.scdv import SCDV
from redshells.model.tfidf import Tfidf
import redshells.model.utils
| 30.8
| 46
| 0.857143
| 23
| 154
| 5.695652
| 0.391304
| 0.427481
| 0.412214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 154
| 4
| 47
| 38.5
| 0.935714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a413196c3d19f44bd38995fa9edc85d27f55cca8
| 128
|
py
|
Python
|
nn/__init__.py
|
koonn/deep-learning-from-scratch
|
c2b5ad88fd6738fcfe0239e8259827b6f99f66ec
|
[
"MIT"
] | null | null | null |
nn/__init__.py
|
koonn/deep-learning-from-scratch
|
c2b5ad88fd6738fcfe0239e8259827b6f99f66ec
|
[
"MIT"
] | null | null | null |
nn/__init__.py
|
koonn/deep-learning-from-scratch
|
c2b5ad88fd6738fcfe0239e8259827b6f99f66ec
|
[
"MIT"
] | null | null | null |
from .functions import *
from .loss_functions import *
from .network import *
from .train import *
from .visualization import *
| 21.333333
| 29
| 0.765625
| 16
| 128
| 6.0625
| 0.4375
| 0.412371
| 0.391753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 128
| 5
| 30
| 25.6
| 0.898148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a41b7fabd73c60511a1ecedfbd8f3ce43b64ed59
| 206,408
|
py
|
Python
|
rbb_client/src/rbb_client/apis/basic_api.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 55
|
2019-05-09T06:43:05.000Z
|
2021-12-08T05:56:43.000Z
|
rbb_client/src/rbb_client/apis/basic_api.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 5
|
2019-09-08T15:33:28.000Z
|
2021-04-17T17:30:53.000Z
|
rbb_client/src/rbb_client/apis/basic_api.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 16
|
2019-08-08T07:15:35.000Z
|
2021-12-07T15:34:41.000Z
|
# coding: utf-8
"""
BasicApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class BasicApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def authorize_step_get(self, store_name, step, **kwargs):
"""
Authorization step forwarded to storage plugin
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.authorize_step_get(store_name, step, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str step: Step of the authorization procedure (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'step']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method authorize_step_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `authorize_step_get`")
# verify the required parameter 'step' is set
if ('step' not in params) or (params['step'] is None):
raise ValueError("Missing the required parameter `step` when calling `authorize_step_get`")
resource_path = '/file-storage/{store_name}/authorize/{step}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'step' in params:
path_params['step'] = params['step']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def authorize_step_post(self, store_name, step, **kwargs):
"""
Authorization step forwarded to storage plugin
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.authorize_step_post(store_name, step, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str step: Step of the authorization procedure (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'step']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method authorize_step_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `authorize_step_post`")
# verify the required parameter 'step' is set
if ('step' not in params) or (params['step'] is None):
raise ValueError("Missing the required parameter `step` when calling `authorize_step_post`")
resource_path = '/file-storage/{store_name}/authorize/{step}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'step' in params:
path_params['step'] = params['step']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def bag_store_authorize_step_get(self, store_name, step, **kwargs):
"""
Authorization step forwarded to storage plugin
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.bag_store_authorize_step_get(store_name, step, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str step: Step of the authorization procedure (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'step']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method bag_store_authorize_step_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `bag_store_authorize_step_get`")
# verify the required parameter 'step' is set
if ('step' not in params) or (params['step'] is None):
raise ValueError("Missing the required parameter `step` when calling `bag_store_authorize_step_get`")
resource_path = '/stores/{store_name}/authorize/{step}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'step' in params:
path_params['step'] = params['step']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def bag_store_authorize_step_post(self, store_name, step, **kwargs):
"""
Authorization step forwarded to storage plugin
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.bag_store_authorize_step_post(store_name, step, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str step: Step of the authorization procedure (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'step']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method bag_store_authorize_step_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `bag_store_authorize_step_post`")
# verify the required parameter 'step' is set
if ('step' not in params) or (params['step'] is None):
raise ValueError("Missing the required parameter `step` when calling `bag_store_authorize_step_post`")
resource_path = '/stores/{store_name}/authorize/{step}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'step' in params:
path_params['step'] = params['step']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_bag_comment(self, store_name, bag_name, comment_id, **kwargs):
"""
Delete a comment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_bag_comment(store_name, bag_name, comment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:param int comment_id: Comment identifier (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name', 'comment_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_bag_comment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `delete_bag_comment`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `delete_bag_comment`")
# verify the required parameter 'comment_id' is set
if ('comment_id' not in params) or (params['comment_id'] is None):
raise ValueError("Missing the required parameter `comment_id` when calling `delete_bag_comment`")
resource_path = '/stores/{store_name}/bags/{bag_name}/comments/{comment_id}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
if 'comment_id' in params:
path_params['comment_id'] = params['comment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_extraction_configuration(self, config_name, **kwargs):
"""
Delete extraction configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_extraction_configuration(config_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str config_name: Name of the configuration (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['config_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_extraction_configuration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'config_name' is set
if ('config_name' not in params) or (params['config_name'] is None):
raise ValueError("Missing the required parameter `config_name` when calling `delete_extraction_configuration`")
resource_path = '/extraction/configs/{config_name}'.replace('{format}', 'json')
path_params = {}
if 'config_name' in params:
path_params['config_name'] = params['config_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_file_store(self, store_name, **kwargs):
"""
Delete file store
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_store(store_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_store" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `delete_file_store`")
resource_path = '/file-storage/{store_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_session(self, session_id, **kwargs):
"""
Delete a session or sessions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_session(session_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str session_id: Session id or all or current (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['session_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_session" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'session_id' is set
if ('session_id' not in params) or (params['session_id'] is None):
raise ValueError("Missing the required parameter `session_id` when calling `delete_session`")
resource_path = '/sessions/{session_id}'.replace('{format}', 'json')
path_params = {}
if 'session_id' in params:
path_params['session_id'] = params['session_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_simulation(self, sim_identifier, **kwargs):
"""
Delete simulation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_simulation(sim_identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int sim_identifier: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sim_identifier']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_simulation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sim_identifier' is set
if ('sim_identifier' not in params) or (params['sim_identifier'] is None):
raise ValueError("Missing the required parameter `sim_identifier` when calling `delete_simulation`")
resource_path = '/simulations/{sim_identifier}'.replace('{format}', 'json')
path_params = {}
if 'sim_identifier' in params:
path_params['sim_identifier'] = params['sim_identifier']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_simulation_environment(self, env_name, **kwargs):
"""
Delete simulation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_simulation_environment(env_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str env_name: Name of the simulation environment (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['env_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_simulation_environment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'env_name' is set
if ('env_name' not in params) or (params['env_name'] is None):
raise ValueError("Missing the required parameter `env_name` when calling `delete_simulation_environment`")
resource_path = '/simulation-environments/{env_name}'.replace('{format}', 'json')
path_params = {}
if 'env_name' in params:
path_params['env_name'] = params['env_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_store(self, store_name, **kwargs):
"""
Delete file store
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_store(store_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_store" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `delete_store`")
resource_path = '/stores/{store_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_user_account(self, alias, **kwargs):
"""
Delete user account
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_user_account(alias, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str alias: Alias of the user (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['alias']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_user_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'alias' is set
if ('alias' not in params) or (params['alias'] is None):
raise ValueError("Missing the required parameter `alias` when calling `delete_user_account`")
resource_path = '/users/account/{alias}'.replace('{format}', 'json')
path_params = {}
if 'alias' in params:
path_params['alias'] = params['alias']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def dequeue_task(self, worker_name, tasks, labels, **kwargs):
"""
Take a task from the queue
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.dequeue_task(worker_name, tasks, labels, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str worker_name: Name of the worker trying to acquire a task (required)
:param str tasks: Tasks that the worker can do (any or a list of tasks) (required)
:param str labels: Labels the worker wants to do (required)
:return: TaskDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['worker_name', 'tasks', 'labels']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method dequeue_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'worker_name' is set
if ('worker_name' not in params) or (params['worker_name'] is None):
raise ValueError("Missing the required parameter `worker_name` when calling `dequeue_task`")
# verify the required parameter 'tasks' is set
if ('tasks' not in params) or (params['tasks'] is None):
raise ValueError("Missing the required parameter `tasks` when calling `dequeue_task`")
# verify the required parameter 'labels' is set
if ('labels' not in params) or (params['labels'] is None):
raise ValueError("Missing the required parameter `labels` when calling `dequeue_task`")
resource_path = '/queue/dequeue'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'worker_name' in params:
query_params['worker_name'] = params['worker_name']
if 'tasks' in params:
query_params['tasks'] = params['tasks']
if 'labels' in params:
query_params['labels'] = params['labels']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def do_task_action(self, task_identifier, action, **kwargs):
"""
Perform an action on the task
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.do_task_action(task_identifier, action, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str task_identifier: (required)
:param str action: Action to perform (cancel/prio_up) (required)
:param TaskDetailed task: The task, required depending on the action
:return: TaskDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['task_identifier', 'action', 'task']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method do_task_action" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'task_identifier' is set
if ('task_identifier' not in params) or (params['task_identifier'] is None):
raise ValueError("Missing the required parameter `task_identifier` when calling `do_task_action`")
# verify the required parameter 'action' is set
if ('action' not in params) or (params['action'] is None):
raise ValueError("Missing the required parameter `action` when calling `do_task_action`")
resource_path = '/queue/{task_identifier}'.replace('{format}', 'json')
path_params = {}
if 'task_identifier' in params:
path_params['task_identifier'] = params['task_identifier']
query_params = {}
if 'action' in params:
query_params['action'] = params['action']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'task' in params:
body_params = params['task']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_bag_comments(self, store_name, bag_name, **kwargs):
"""
List comments from bag
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bag_comments(store_name, bag_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:return: list[Comment]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bag_comments" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_bag_comments`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `get_bag_comments`")
resource_path = '/stores/{store_name}/bags/{bag_name}/comments'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Comment]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_bag_file(self, store_name, bag_name, **kwargs):
"""
Get rosbag
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bag_file(store_name, bag_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bag_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_bag_file`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `get_bag_file`")
resource_path = '/stores/{store_name}/bags/{bag_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_bag_meta(self, store_name, bag_name, **kwargs):
"""
List products from bag
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bag_meta(store_name, bag_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:return: BagDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bag_meta" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_bag_meta`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `get_bag_meta`")
resource_path = '/stores/{store_name}/bags/{bag_name}/meta'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BagDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_bag_tags(self, store_name, bag_name, **kwargs):
"""
List tag from bag
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bag_tags(store_name, bag_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:return: list[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bag_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_bag_tags`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `get_bag_tags`")
resource_path = '/stores/{store_name}/bags/{bag_name}/tags'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Tag]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_configuration_key(self, config_key, **kwargs):
"""
Get configuration key
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_key(config_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str config_key: Configuration keys to read, * is all (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['config_key']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_configuration_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'config_key' is set
if ('config_key' not in params) or (params['config_key'] is None):
raise ValueError("Missing the required parameter `config_key` when calling `get_configuration_key`")
resource_path = '/configuration/{config_key}'.replace('{format}', 'json')
path_params = {}
if 'config_key' in params:
path_params['config_key'] = params['config_key']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_cron_endpoint(self, **kwargs):
"""
Endpoint that should be periodically triggered
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cron_endpoint(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str jobs: Specific cron jobs to trigger
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['jobs']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cron_endpoint" % key
)
params[key] = val
del params['kwargs']
resource_path = '/cron'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'jobs' in params:
query_params['jobs'] = params['jobs']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_current_user(self, **kwargs):
"""
Get current user information
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_current_user(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_current_user" % key
)
params[key] = val
del params['kwargs']
resource_path = '/users/me'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_extraction_config(self, config_name, **kwargs):
"""
Get configuration details
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_extraction_config(config_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str config_name: Name of the configuration (required)
:return: BagExtractionConfiguration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['config_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_extraction_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'config_name' is set
if ('config_name' not in params) or (params['config_name'] is None):
raise ValueError("Missing the required parameter `config_name` when calling `get_extraction_config`")
resource_path = '/extraction/configs/{config_name}'.replace('{format}', 'json')
path_params = {}
if 'config_name' in params:
path_params['config_name'] = params['config_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BagExtractionConfiguration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_file(self, store_name, uid, file_name, **kwargs):
"""
Get file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_file(store_name, uid, file_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param int uid: Unique identifier of the file (required)
:param str file_name: Name of the file (required)
:param bool no_redirect:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'uid', 'file_name', 'no_redirect']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_file`")
# verify the required parameter 'uid' is set
if ('uid' not in params) or (params['uid'] is None):
raise ValueError("Missing the required parameter `uid` when calling `get_file`")
# verify the required parameter 'file_name' is set
if ('file_name' not in params) or (params['file_name'] is None):
raise ValueError("Missing the required parameter `file_name` when calling `get_file`")
resource_path = '/file-storage/{store_name}/{uid}/{file_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'uid' in params:
path_params['uid'] = params['uid']
if 'file_name' in params:
path_params['file_name'] = params['file_name']
query_params = {}
if 'no_redirect' in params:
query_params['no_redirect'] = params['no_redirect']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_file_meta(self, store_name, uid, file_name, **kwargs):
"""
Get file meta data
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_file_meta(store_name, uid, file_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param int uid: Unique identifier of the file (required)
:param str file_name: Name of the file (required)
:return: FileDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'uid', 'file_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file_meta" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_file_meta`")
# verify the required parameter 'uid' is set
if ('uid' not in params) or (params['uid'] is None):
raise ValueError("Missing the required parameter `uid` when calling `get_file_meta`")
# verify the required parameter 'file_name' is set
if ('file_name' not in params) or (params['file_name'] is None):
raise ValueError("Missing the required parameter `file_name` when calling `get_file_meta`")
resource_path = '/file-storage/{store_name}/{uid}/{file_name}/meta'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'uid' in params:
path_params['uid'] = params['uid']
if 'file_name' in params:
path_params['file_name'] = params['file_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_file_store(self, store_name, **kwargs):
"""
Get store details
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_file_store(store_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:return: FileStore
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file_store" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_file_store`")
resource_path = '/file-storage/{store_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileStore',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_simulation(self, sim_identifier, **kwargs):
"""
Get simulation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_simulation(sim_identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int sim_identifier: (required)
:param bool expand:
:return: SimulationDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sim_identifier', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_simulation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sim_identifier' is set
if ('sim_identifier' not in params) or (params['sim_identifier'] is None):
raise ValueError("Missing the required parameter `sim_identifier` when calling `get_simulation`")
resource_path = '/simulations/{sim_identifier}'.replace('{format}', 'json')
path_params = {}
if 'sim_identifier' in params:
path_params['sim_identifier'] = params['sim_identifier']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SimulationDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_simulation_environment(self, env_name, **kwargs):
"""
Get simulation environment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_simulation_environment(env_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str env_name: Name of the simulation environment (required)
:return: SimulationEnvironmentDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['env_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_simulation_environment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'env_name' is set
if ('env_name' not in params) or (params['env_name'] is None):
raise ValueError("Missing the required parameter `env_name` when calling `get_simulation_environment`")
resource_path = '/simulation-environments/{env_name}'.replace('{format}', 'json')
path_params = {}
if 'env_name' in params:
path_params['env_name'] = params['env_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SimulationEnvironmentDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_simulation_run(self, sim_identifier, run_identifier, **kwargs):
"""
Get simulation run
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_simulation_run(sim_identifier, run_identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int sim_identifier: (required)
:param int run_identifier: (required)
:param bool expand:
:return: SimulationRunDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sim_identifier', 'run_identifier', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_simulation_run" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sim_identifier' is set
if ('sim_identifier' not in params) or (params['sim_identifier'] is None):
raise ValueError("Missing the required parameter `sim_identifier` when calling `get_simulation_run`")
# verify the required parameter 'run_identifier' is set
if ('run_identifier' not in params) or (params['run_identifier'] is None):
raise ValueError("Missing the required parameter `run_identifier` when calling `get_simulation_run`")
resource_path = '/simulations/{sim_identifier}/runs/{run_identifier}'.replace('{format}', 'json')
path_params = {}
if 'sim_identifier' in params:
path_params['sim_identifier'] = params['sim_identifier']
if 'run_identifier' in params:
path_params['run_identifier'] = params['run_identifier']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SimulationRunDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_store(self, store_name, **kwargs):
"""
Get store details
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_store(store_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:return: BagStoreDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_store" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_store`")
resource_path = '/stores/{store_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BagStoreDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_store_extraction_configs(self, store_name, **kwargs):
"""
Get list of auto extraction configs
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_store_extraction_configs(store_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:return: list[BagExtractionConfiguration]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_store_extraction_configs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `get_store_extraction_configs`")
resource_path = '/stores/{store_name}/auto-extraction-configs'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BagExtractionConfiguration]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_tag(self, tag, **kwargs):
"""
Get tag info
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_tag(tag, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str tag: Name of the tag (required)
:return: Tag
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tag']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tag" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'tag' is set
if ('tag' not in params) or (params['tag'] is None):
raise ValueError("Missing the required parameter `tag` when calling `get_tag`")
resource_path = '/tags/{tag}'.replace('{format}', 'json')
path_params = {}
if 'tag' in params:
path_params['tag'] = params['tag']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tag',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_task(self, task_identifier, **kwargs):
"""
Take a task from the queue
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_task(task_identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str task_identifier: (required)
:return: TaskDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['task_identifier']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'task_identifier' is set
if ('task_identifier' not in params) or (params['task_identifier'] is None):
raise ValueError("Missing the required parameter `task_identifier` when calling `get_task`")
resource_path = '/queue/{task_identifier}'.replace('{format}', 'json')
path_params = {}
if 'task_identifier' in params:
path_params['task_identifier'] = params['task_identifier']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_user_account(self, alias, **kwargs):
"""
Get user information
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_user_account(alias, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str alias: Alias of the user (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['alias']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'alias' is set
if ('alias' not in params) or (params['alias'] is None):
raise ValueError("Missing the required parameter `alias` when calling `get_user_account`")
resource_path = '/users/account/{alias}'.replace('{format}', 'json')
path_params = {}
if 'alias' in params:
path_params['alias'] = params['alias']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_bags(self, store_name, **kwargs):
"""
List bags in store
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_bags(store_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param int limit:
:param int offset:
:param str ordering:
:param datetime discovered_gte:
:param datetime discovered_lte:
:param datetime start_time_gte:
:param datetime start_time_lte:
:param datetime end_time_gte:
:param datetime end_time_lte:
:param float duration_gte:
:param float duration_lte:
:param bool meta_available:
:param bool is_extracted:
:param str name:
:param str tags:
:param bool in_trash:
:return: list[BagSummary]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'limit', 'offset', 'ordering', 'discovered_gte', 'discovered_lte', 'start_time_gte', 'start_time_lte', 'end_time_gte', 'end_time_lte', 'duration_gte', 'duration_lte', 'meta_available', 'is_extracted', 'name', 'tags', 'in_trash']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_bags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `list_bags`")
resource_path = '/stores/{store_name}/bags'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
if 'limit' in params:
query_params['limit'] = params['limit']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'ordering' in params:
query_params['ordering'] = params['ordering']
if 'discovered_gte' in params:
query_params['discovered_gte'] = params['discovered_gte']
if 'discovered_lte' in params:
query_params['discovered_lte'] = params['discovered_lte']
if 'start_time_gte' in params:
query_params['start_time_gte'] = params['start_time_gte']
if 'start_time_lte' in params:
query_params['start_time_lte'] = params['start_time_lte']
if 'end_time_gte' in params:
query_params['end_time_gte'] = params['end_time_gte']
if 'end_time_lte' in params:
query_params['end_time_lte'] = params['end_time_lte']
if 'duration_gte' in params:
query_params['duration_gte'] = params['duration_gte']
if 'duration_lte' in params:
query_params['duration_lte'] = params['duration_lte']
if 'meta_available' in params:
query_params['meta_available'] = params['meta_available']
if 'is_extracted' in params:
query_params['is_extracted'] = params['is_extracted']
if 'name' in params:
query_params['name'] = params['name']
if 'tags' in params:
query_params['tags'] = params['tags']
if 'in_trash' in params:
query_params['in_trash'] = params['in_trash']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BagSummary]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_extraction_configurations(self, **kwargs):
"""
List available configurations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_extraction_configurations(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[BagExtractionConfiguration]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_extraction_configurations" % key
)
params[key] = val
del params['kwargs']
resource_path = '/extraction/configs'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BagExtractionConfiguration]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_file_stores(self, **kwargs):
"""
List available file stores
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_stores(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[FileStore]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_file_stores" % key
)
params[key] = val
del params['kwargs']
resource_path = '/file-storage'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[FileStore]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_queue(self, **kwargs):
"""
List task queue
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_queue(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int limit:
:param int offset:
:param str ordering:
:param str running: Include running tasks, default is true
:param str finished: Include finished tasks, default is true
:param str queued: Include queued tasks, default is true
:return: list[TaskSummary]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'offset', 'ordering', 'running', 'finished', 'queued']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_queue" % key
)
params[key] = val
del params['kwargs']
resource_path = '/queue'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'limit' in params:
query_params['limit'] = params['limit']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'ordering' in params:
query_params['ordering'] = params['ordering']
if 'running' in params:
query_params['running'] = params['running']
if 'finished' in params:
query_params['finished'] = params['finished']
if 'queued' in params:
query_params['queued'] = params['queued']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TaskSummary]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_sessions(self, **kwargs):
"""
List current session
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_sessions(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Session]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_sessions" % key
)
params[key] = val
del params['kwargs']
resource_path = '/sessions'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Session]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_simulation_environments(self, **kwargs):
"""
List available simulation environments
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_simulation_environments(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[SimulationEnvironmentSummary]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_simulation_environments" % key
)
params[key] = val
del params['kwargs']
resource_path = '/simulation-environments'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SimulationEnvironmentSummary]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_simulation_runs(self, sim_identifier, **kwargs):
"""
List simulation runs
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_simulation_runs(sim_identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int sim_identifier: (required)
:return: list[SimulationRunSummary]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sim_identifier']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_simulation_runs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sim_identifier' is set
if ('sim_identifier' not in params) or (params['sim_identifier'] is None):
raise ValueError("Missing the required parameter `sim_identifier` when calling `list_simulation_runs`")
resource_path = '/simulations/{sim_identifier}/runs'.replace('{format}', 'json')
path_params = {}
if 'sim_identifier' in params:
path_params['sim_identifier'] = params['sim_identifier']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SimulationRunSummary]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_simulations(self, **kwargs):
"""
List simulations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_simulations(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int limit:
:param int offset:
:param str ordering:
:return: list[SimulationSummary]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'offset', 'ordering']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_simulations" % key
)
params[key] = val
del params['kwargs']
resource_path = '/simulations'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'limit' in params:
query_params['limit'] = params['limit']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'ordering' in params:
query_params['ordering'] = params['ordering']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SimulationSummary]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_stores(self, **kwargs):
"""
List available stores
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_stores(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[BagStoreDetailed]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_stores" % key
)
params[key] = val
del params['kwargs']
resource_path = '/stores'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BagStoreDetailed]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_tags(self, **kwargs):
"""
List all tags
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_tags(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_tags" % key
)
params[key] = val
del params['kwargs']
resource_path = '/tags'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Tag]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_user_accounts(self, **kwargs):
"""
List user acounts
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_user_accounts(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[User]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_user_accounts" % key
)
params[key] = val
del params['kwargs']
resource_path = '/users/account'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[User]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def new_bag_comment(self, store_name, bag_name, comment, **kwargs):
"""
New bag comment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.new_bag_comment(store_name, bag_name, comment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:param Comment comment: Comment (required)
:return: Comment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name', 'comment']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_bag_comment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `new_bag_comment`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `new_bag_comment`")
# verify the required parameter 'comment' is set
if ('comment' not in params) or (params['comment'] is None):
raise ValueError("Missing the required parameter `comment` when calling `new_bag_comment`")
resource_path = '/stores/{store_name}/bags/{bag_name}/comments'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'comment' in params:
body_params = params['comment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Comment',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def new_file(self, store_name, file, **kwargs):
"""
Register new file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.new_file(store_name, file, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param FileDetailed file: The file metadata (required)
:return: FileDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'file']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `new_file`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `new_file`")
resource_path = '/file-storage/{store_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'file' in params:
body_params = params['file']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def new_session(self, **kwargs):
"""
Create a new session
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.new_session(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int valid_for: Validity in seconds
:return: Session
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['valid_for']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_session" % key
)
params[key] = val
del params['kwargs']
resource_path = '/sessions'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'valid_for' in params:
query_params['valid_for'] = params['valid_for']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Session',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def new_simulation(self, simulation, **kwargs):
"""
New simulation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.new_simulation(simulation, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param SimulationDetailed simulation: Simulation (required)
:param str trigger: Hooks to trigger
:return: SimulationDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['simulation', 'trigger']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_simulation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'simulation' is set
if ('simulation' not in params) or (params['simulation'] is None):
raise ValueError("Missing the required parameter `simulation` when calling `new_simulation`")
resource_path = '/simulations'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'trigger' in params:
query_params['trigger'] = params['trigger']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'simulation' in params:
body_params = params['simulation']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SimulationDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def new_simulation_run(self, sim_identifier, simulation_run, **kwargs):
"""
New simulation run
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.new_simulation_run(sim_identifier, simulation_run, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int sim_identifier: (required)
:param SimulationRunDetailed simulation_run: Simulation run (required)
:return: SimulationRunDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sim_identifier', 'simulation_run']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_simulation_run" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sim_identifier' is set
if ('sim_identifier' not in params) or (params['sim_identifier'] is None):
raise ValueError("Missing the required parameter `sim_identifier` when calling `new_simulation_run`")
# verify the required parameter 'simulation_run' is set
if ('simulation_run' not in params) or (params['simulation_run'] is None):
raise ValueError("Missing the required parameter `simulation_run` when calling `new_simulation_run`")
resource_path = '/simulations/{sim_identifier}/runs'.replace('{format}', 'json')
path_params = {}
if 'sim_identifier' in params:
path_params['sim_identifier'] = params['sim_identifier']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'simulation_run' in params:
body_params = params['simulation_run']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SimulationRunDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def new_task(self, task, **kwargs):
"""
Create a new task
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.new_task(task, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TaskDetailed task: The task (required)
:return: TaskDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['task']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'task' is set
if ('task' not in params) or (params['task'] is None):
raise ValueError("Missing the required parameter `task` when calling `new_task`")
resource_path = '/queue'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'task' in params:
body_params = params['task']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_bag_meta(self, store_name, bag_name, bag, **kwargs):
"""
Partial update of bag information (this only supports a few fields)
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_bag_meta(store_name, bag_name, bag, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:param object bag: Bag to register (required)
:param str trigger: Hooks to trigger
:return: BagDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name', 'bag', 'trigger']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_bag_meta" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `patch_bag_meta`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `patch_bag_meta`")
# verify the required parameter 'bag' is set
if ('bag' not in params) or (params['bag'] is None):
raise ValueError("Missing the required parameter `bag` when calling `patch_bag_meta`")
resource_path = '/stores/{store_name}/bags/{bag_name}/meta'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
query_params = {}
if 'trigger' in params:
query_params['trigger'] = params['trigger']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'bag' in params:
body_params = params['bag']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BagDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_task(self, task_identifier, task, **kwargs):
"""
Partial update of task (this only supports a few fields)
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_task(task_identifier, task, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str task_identifier: (required)
:param object task: Fields to update (required)
:return: TaskDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['task_identifier', 'task']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'task_identifier' is set
if ('task_identifier' not in params) or (params['task_identifier'] is None):
raise ValueError("Missing the required parameter `task_identifier` when calling `patch_task`")
# verify the required parameter 'task' is set
if ('task' not in params) or (params['task'] is None):
raise ValueError("Missing the required parameter `task` when calling `patch_task`")
resource_path = '/queue/{task_identifier}'.replace('{format}', 'json')
path_params = {}
if 'task_identifier' in params:
path_params['task_identifier'] = params['task_identifier']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'task' in params:
body_params = params['task']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_bag_meta(self, store_name, bag_name, bag, **kwargs):
"""
Create/update bag information
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_bag_meta(store_name, bag_name, bag, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:param BagDetailed bag: Bag to register (required)
:param str trigger: Hooks to trigger
:return: BagDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name', 'bag', 'trigger']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_bag_meta" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `put_bag_meta`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `put_bag_meta`")
# verify the required parameter 'bag' is set
if ('bag' not in params) or (params['bag'] is None):
raise ValueError("Missing the required parameter `bag` when calling `put_bag_meta`")
resource_path = '/stores/{store_name}/bags/{bag_name}/meta'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
query_params = {}
if 'trigger' in params:
query_params['trigger'] = params['trigger']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'bag' in params:
body_params = params['bag']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BagDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_bag_tags(self, store_name, bag_name, tags, **kwargs):
"""
Change bag tags
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_bag_tags(store_name, bag_name, tags, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param str bag_name: Name of the bag (required)
:param list[str] tags: List of tags (required)
:param bool auto_create: Create non existing tags
:return: list[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'bag_name', 'tags', 'auto_create']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_bag_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `put_bag_tags`")
# verify the required parameter 'bag_name' is set
if ('bag_name' not in params) or (params['bag_name'] is None):
raise ValueError("Missing the required parameter `bag_name` when calling `put_bag_tags`")
# verify the required parameter 'tags' is set
if ('tags' not in params) or (params['tags'] is None):
raise ValueError("Missing the required parameter `tags` when calling `put_bag_tags`")
resource_path = '/stores/{store_name}/bags/{bag_name}/tags'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
if 'bag_name' in params:
path_params['bag_name'] = params['bag_name']
query_params = {}
if 'auto_create' in params:
query_params['auto_create'] = params['auto_create']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tags' in params:
body_params = params['tags']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Tag]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_configuration_key(self, config_key, config_value, **kwargs):
"""
Write configuration key
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_configuration_key(config_key, config_value, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str config_key: Configuration key to read (required)
:param object config_value: Configuration keys (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['config_key', 'config_value']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_configuration_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'config_key' is set
if ('config_key' not in params) or (params['config_key'] is None):
raise ValueError("Missing the required parameter `config_key` when calling `put_configuration_key`")
# verify the required parameter 'config_value' is set
if ('config_value' not in params) or (params['config_value'] is None):
raise ValueError("Missing the required parameter `config_value` when calling `put_configuration_key`")
resource_path = '/configuration/{config_key}'.replace('{format}', 'json')
path_params = {}
if 'config_key' in params:
path_params['config_key'] = params['config_key']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'config_value' in params:
body_params = params['config_value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_current_user(self, user, **kwargs):
"""
Change current user information
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_current_user(user, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param User user: The user information (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_current_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user' is set
if ('user' not in params) or (params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `put_current_user`")
resource_path = '/users/me'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user' in params:
body_params = params['user']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_extraction_configuration(self, config_name, configuration_obj, **kwargs):
"""
Create/update configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_extraction_configuration(config_name, configuration_obj, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str config_name: Name of the configuration (required)
:param BagExtractionConfiguration configuration_obj: Configuration information (required)
:param bool block_on_existing: Only create a new one, block if one already exists
:return: BagExtractionConfiguration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['config_name', 'configuration_obj', 'block_on_existing']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_extraction_configuration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'config_name' is set
if ('config_name' not in params) or (params['config_name'] is None):
raise ValueError("Missing the required parameter `config_name` when calling `put_extraction_configuration`")
# verify the required parameter 'configuration_obj' is set
if ('configuration_obj' not in params) or (params['configuration_obj'] is None):
raise ValueError("Missing the required parameter `configuration_obj` when calling `put_extraction_configuration`")
resource_path = '/extraction/configs/{config_name}'.replace('{format}', 'json')
path_params = {}
if 'config_name' in params:
path_params['config_name'] = params['config_name']
query_params = {}
if 'block_on_existing' in params:
query_params['block_on_existing'] = params['block_on_existing']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'configuration_obj' in params:
body_params = params['configuration_obj']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BagExtractionConfiguration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_file_store(self, store_name, store, **kwargs):
"""
Create/update store
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_file_store(store_name, store, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param FileStore store: Store information (required)
:param bool block_on_existing: Only create a new one, block if one already exists
:return: FileStore
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'store', 'block_on_existing']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_file_store" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `put_file_store`")
# verify the required parameter 'store' is set
if ('store' not in params) or (params['store'] is None):
raise ValueError("Missing the required parameter `store` when calling `put_file_store`")
resource_path = '/file-storage/{store_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
if 'block_on_existing' in params:
query_params['block_on_existing'] = params['block_on_existing']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'store' in params:
body_params = params['store']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileStore',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_simulation(self, sim_identifier, simulation, **kwargs):
"""
Update a simulation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_simulation(sim_identifier, simulation, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int sim_identifier: (required)
:param SimulationDetailed simulation: Simulation (required)
:return: SimulationDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sim_identifier', 'simulation']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_simulation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sim_identifier' is set
if ('sim_identifier' not in params) or (params['sim_identifier'] is None):
raise ValueError("Missing the required parameter `sim_identifier` when calling `put_simulation`")
# verify the required parameter 'simulation' is set
if ('simulation' not in params) or (params['simulation'] is None):
raise ValueError("Missing the required parameter `simulation` when calling `put_simulation`")
resource_path = '/simulations/{sim_identifier}'.replace('{format}', 'json')
path_params = {}
if 'sim_identifier' in params:
path_params['sim_identifier'] = params['sim_identifier']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'simulation' in params:
body_params = params['simulation']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SimulationDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_simulation_environment(self, env_name, environment, **kwargs):
"""
Create/update a simulation environment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_simulation_environment(env_name, environment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str env_name: Name of the simulation environment (required)
:param SimulationEnvironmentDetailed environment: Simulation environment (required)
:param bool block_on_existing: Only create a new one, block if one already exists
:return: SimulationEnvironmentDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['env_name', 'environment', 'block_on_existing']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_simulation_environment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'env_name' is set
if ('env_name' not in params) or (params['env_name'] is None):
raise ValueError("Missing the required parameter `env_name` when calling `put_simulation_environment`")
# verify the required parameter 'environment' is set
if ('environment' not in params) or (params['environment'] is None):
raise ValueError("Missing the required parameter `environment` when calling `put_simulation_environment`")
resource_path = '/simulation-environments/{env_name}'.replace('{format}', 'json')
path_params = {}
if 'env_name' in params:
path_params['env_name'] = params['env_name']
query_params = {}
if 'block_on_existing' in params:
query_params['block_on_existing'] = params['block_on_existing']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'environment' in params:
body_params = params['environment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SimulationEnvironmentDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_store(self, store_name, store, **kwargs):
"""
Create/update store
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_store(store_name, store, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param BagStoreDetailed store: Store information (required)
:param bool block_on_existing: Only create a new one, block if one already exists
:return: BagStoreDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'store', 'block_on_existing']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_store" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `put_store`")
# verify the required parameter 'store' is set
if ('store' not in params) or (params['store'] is None):
raise ValueError("Missing the required parameter `store` when calling `put_store`")
resource_path = '/stores/{store_name}'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
if 'block_on_existing' in params:
query_params['block_on_existing'] = params['block_on_existing']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'store' in params:
body_params = params['store']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BagStoreDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_store_extraction_configs(self, store_name, config_list, **kwargs):
"""
Create/update store
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_store_extraction_configs(store_name, config_list, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str store_name: Name of the store (required)
:param list[str] config_list: List of config names (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['store_name', 'config_list']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_store_extraction_configs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'store_name' is set
if ('store_name' not in params) or (params['store_name'] is None):
raise ValueError("Missing the required parameter `store_name` when calling `put_store_extraction_configs`")
# verify the required parameter 'config_list' is set
if ('config_list' not in params) or (params['config_list'] is None):
raise ValueError("Missing the required parameter `config_list` when calling `put_store_extraction_configs`")
resource_path = '/stores/{store_name}/auto-extraction-configs'.replace('{format}', 'json')
path_params = {}
if 'store_name' in params:
path_params['store_name'] = params['store_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'config_list' in params:
body_params = params['config_list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_tag(self, tag, tag_obj, **kwargs):
"""
Create/update tag
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_tag(tag, tag_obj, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str tag: Name of the tag (required)
:param Tag tag_obj: Tag information (required)
:return: Tag
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tag', 'tag_obj']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_tag" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'tag' is set
if ('tag' not in params) or (params['tag'] is None):
raise ValueError("Missing the required parameter `tag` when calling `put_tag`")
# verify the required parameter 'tag_obj' is set
if ('tag_obj' not in params) or (params['tag_obj'] is None):
raise ValueError("Missing the required parameter `tag_obj` when calling `put_tag`")
resource_path = '/tags/{tag}'.replace('{format}', 'json')
path_params = {}
if 'tag' in params:
path_params['tag'] = params['tag']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'tag_obj' in params:
body_params = params['tag_obj']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tag',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_task(self, task_identifier, task, **kwargs):
"""
Update a task
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_task(task_identifier, task, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str task_identifier: (required)
:param TaskDetailed task: The task (required)
:return: TaskDetailed
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['task_identifier', 'task']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'task_identifier' is set
if ('task_identifier' not in params) or (params['task_identifier'] is None):
raise ValueError("Missing the required parameter `task_identifier` when calling `put_task`")
# verify the required parameter 'task' is set
if ('task' not in params) or (params['task'] is None):
raise ValueError("Missing the required parameter `task` when calling `put_task`")
resource_path = '/queue/{task_identifier}'.replace('{format}', 'json')
path_params = {}
if 'task_identifier' in params:
path_params['task_identifier'] = params['task_identifier']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'task' in params:
body_params = params['task']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskDetailed',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_user_account(self, alias, user, **kwargs):
"""
Change user information
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_user_account(alias, user, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str alias: Alias of the user (required)
:param User user: The user information (required)
:param bool block_on_existing: Only create a new one, block if one already exists
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['alias', 'user', 'block_on_existing']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_user_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'alias' is set
if ('alias' not in params) or (params['alias'] is None):
raise ValueError("Missing the required parameter `alias` when calling `put_user_account`")
# verify the required parameter 'user' is set
if ('user' not in params) or (params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `put_user_account`")
resource_path = '/users/account/{alias}'.replace('{format}', 'json')
path_params = {}
if 'alias' in params:
path_params['alias'] = params['alias']
query_params = {}
if 'block_on_existing' in params:
query_params['block_on_existing'] = params['block_on_existing']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user' in params:
body_params = params['user']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| 38.696663
| 264
| 0.534979
| 20,106
| 206,408
| 5.289466
| 0.015916
| 0.044006
| 0.024203
| 0.023225
| 0.938166
| 0.924457
| 0.914029
| 0.91071
| 0.904626
| 0.894565
| 0
| 0.000086
| 0.380804
| 206,408
| 5,333
| 265
| 38.703919
| 0.832028
| 0.249985
| 0
| 0.844142
| 0
| 0
| 0.173276
| 0.019445
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023013
| false
| 0
| 0.002092
| 0
| 0.048117
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f11f5d4ac50af4da29a854dd0029b2fa8dfcf49e
| 2,957
|
py
|
Python
|
utils/loss.py
|
Andong-Li-speech/TaylorBeamformer
|
fc3d0e79a19a0feb2f6faf37c538219d7ea78433
|
[
"MIT"
] | 4
|
2022-03-29T08:50:37.000Z
|
2022-03-30T12:00:22.000Z
|
utils/loss.py
|
Andong-Li-speech/TaylorBeamformer
|
fc3d0e79a19a0feb2f6faf37c538219d7ea78433
|
[
"MIT"
] | null | null | null |
utils/loss.py
|
Andong-Li-speech/TaylorBeamformer
|
fc3d0e79a19a0feb2f6faf37c538219d7ea78433
|
[
"MIT"
] | null | null | null |
import torch
class SpatialFilterLoss(object):
def __init__(self, alpha, l_type):
self.alpha = alpha
self.l_type = l_type
def __call__(self, resi, frame_list):
"""
resi: (B,T,F,2), frame_list: list
"""
b_size, seq_len, freq_num, _ = resi.shape
mask_for_loss = []
with torch.no_grad():
for i in range(b_size):
tmp_mask = torch.ones((frame_list[i], freq_num, 2), dtype=resi.dtype)
mask_for_loss.append(tmp_mask)
mask_for_loss = torch.nn.utils.rnn.pad_sequence(mask_for_loss, batch_first=True).to(resi.device)
mag_mask_for_loss = mask_for_loss[...,0]
resi_mag = torch.norm(resi, dim=-1)
if self.l_type == "L1" or self.l_type == "l1":
loss_com = (torch.abs(resi) * mask_for_loss).sum() / mask_for_loss.sum()
loss_mag = (torch.abs(resi_mag) * mag_mask_for_loss).sum() / mag_mask_for_loss.sum()
elif self.l_type == "L2" or self.l_type == "l2":
loss_com = (torch.square(resi) * mask_for_loss).sum() / mask_for_loss.sum()
loss_mag = (torch.square(resi_mag) * mag_mask_for_loss).sum() / mag_mask_for_loss.sum()
else:
raise RuntimeError("only L1 and L2 are supported")
return self.alpha * loss_com + (1 - self.alpha) * loss_mag
class ComMagEuclideanLoss(object):
def __init__(self, alpha, l_type):
self.alpha = alpha
self.l_type = l_type
def __call__(self, est, label, frame_list):
"""
est: (B,T,F,2)
label: (B,T,F,2)
frame_list: list
alpha: scalar
l_type: str, L1 or L2
"""
b_size, seq_len, freq_num, _ = est.shape
mask_for_loss = []
with torch.no_grad():
for i in range(b_size):
tmp_mask = torch.ones((frame_list[i], freq_num, 2), dtype=est.dtype)
mask_for_loss.append(tmp_mask)
mask_for_loss = torch.nn.utils.rnn.pad_sequence(mask_for_loss, batch_first=True).to(est.device)
mag_mask_for_loss = mask_for_loss[...,0]
est_mag, label_mag = torch.norm(est, dim=-1), torch.norm(label, dim=-1)
if self.l_type == "L1" or self.l_type == "l1":
loss_com = (torch.abs(est - label) * mask_for_loss).sum() / mask_for_loss.sum()
loss_mag = (torch.abs(est_mag - label_mag) * mag_mask_for_loss).sum() / mag_mask_for_loss.sum()
elif self.l_type == "L2" or self.l_type == "l2":
loss_com = (torch.square(est - label) * mask_for_loss).sum() / mask_for_loss.sum()
loss_mag = (torch.square(est_mag - label_mag) * mag_mask_for_loss).sum() / mag_mask_for_loss.sum()
else:
raise RuntimeError("only L1 and L2 are supported!")
return self.alpha * loss_com + (1 - self.alpha) * loss_mag
| 45.492308
| 111
| 0.578627
| 431
| 2,957
| 3.63109
| 0.174014
| 0.12524
| 0.196805
| 0.143131
| 0.863898
| 0.863898
| 0.840895
| 0.819169
| 0.819169
| 0.778275
| 0
| 0.012422
| 0.292188
| 2,957
| 64
| 112
| 46.203125
| 0.735308
| 0.039905
| 0
| 0.510638
| 0
| 0
| 0.02732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.021277
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
74c466143fdd7defd80f9b3697bd714877616635
| 191,288
|
py
|
Python
|
pyboto3/apigateway.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/apigateway.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/apigateway.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_api_key(name=None, description=None, enabled=None, generateDistinctId=None, value=None, stageKeys=None, customerId=None):
"""
Create an ApiKey resource.
See also: AWS API Documentation
:example: response = client.create_api_key(
name='string',
description='string',
enabled=True|False,
generateDistinctId=True|False,
value='string',
stageKeys=[
{
'restApiId': 'string',
'stageName': 'string'
},
],
customerId='string'
)
:type name: string
:param name: The name of the ApiKey .
:type description: string
:param description: The description of the ApiKey .
:type enabled: boolean
:param enabled: Specifies whether the ApiKey can be used by callers.
:type generateDistinctId: boolean
:param generateDistinctId: Specifies whether (true ) or not (false ) the key identifier is distinct from the created API key value.
:type value: string
:param value: Specifies a value of the API key.
:type stageKeys: list
:param stageKeys: DEPRECATED FOR USAGE PLANS - Specifies stages associated with the API key.
(dict) --A reference to a unique stage identified in the format {restApiId}/{stage} .
restApiId (string) --A list of Stage resources that are associated with the ApiKey resource.
stageName (string) --The stage name in the RestApi that the stage key references.
:type customerId: string
:param customerId: An AWS Marketplace customer identifier , when integrating with the AWS SaaS Marketplace.
:rtype: dict
:return: {
'id': 'string',
'value': 'string',
'name': 'string',
'customerId': 'string',
'description': 'string',
'enabled': True|False,
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1),
'stageKeys': [
'string',
]
}
:returns:
(string) --
"""
pass
def create_authorizer(restApiId=None, name=None, type=None, providerARNs=None, authType=None, authorizerUri=None, authorizerCredentials=None, identitySource=None, identityValidationExpression=None, authorizerResultTtlInSeconds=None):
"""
Adds a new Authorizer resource to an existing RestApi resource.
See also: AWS API Documentation
:example: response = client.create_authorizer(
restApiId='string',
name='string',
type='TOKEN'|'COGNITO_USER_POOLS',
providerARNs=[
'string',
],
authType='string',
authorizerUri='string',
authorizerCredentials='string',
identitySource='string',
identityValidationExpression='string',
authorizerResultTtlInSeconds=123
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier under which the Authorizer will be created.
:type name: string
:param name: [REQUIRED]
[Required] The name of the authorizer.
:type type: string
:param type: [REQUIRED]
[Required] The type of the authorizer.
:type providerARNs: list
:param providerARNs: A list of the Cognito Your User Pool authorizer's provider ARNs.
(string) --
:type authType: string
:param authType: Optional customer-defined field, used in Swagger imports/exports. Has no functional impact.
:type authorizerUri: string
:param authorizerUri: [Required] Specifies the authorizer's Uniform Resource Identifier (URI).
:type authorizerCredentials: string
:param authorizerCredentials: Specifies the credentials required for the authorizer, if any.
:type identitySource: string
:param identitySource: [REQUIRED]
[Required] The source of the identity in an incoming request.
:type identityValidationExpression: string
:param identityValidationExpression: A validation expression for the incoming identity.
:type authorizerResultTtlInSeconds: integer
:param authorizerResultTtlInSeconds: The TTL of cached authorizer results.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'type': 'TOKEN'|'COGNITO_USER_POOLS',
'providerARNs': [
'string',
],
'authType': 'string',
'authorizerUri': 'string',
'authorizerCredentials': 'string',
'identitySource': 'string',
'identityValidationExpression': 'string',
'authorizerResultTtlInSeconds': 123
}
:returns:
(string) --
"""
pass
def create_base_path_mapping(domainName=None, basePath=None, restApiId=None, stage=None):
"""
Creates a new BasePathMapping resource.
See also: AWS API Documentation
:example: response = client.create_base_path_mapping(
domainName='string',
basePath='string',
restApiId='string',
stage='string'
)
:type domainName: string
:param domainName: [REQUIRED]
The domain name of the BasePathMapping resource to create.
:type basePath: string
:param basePath: The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Leave this blank if you do not want callers to specify a base path name after the domain name.
:type restApiId: string
:param restApiId: [REQUIRED]
The name of the API that you want to apply this mapping to.
:type stage: string
:param stage: The name of the API's stage that you want to use for this mapping. Leave this blank if you do not want callers to explicitly specify the stage name after any base path name.
:rtype: dict
:return: {
'basePath': 'string',
'restApiId': 'string',
'stage': 'string'
}
"""
pass
def create_deployment(restApiId=None, stageName=None, stageDescription=None, description=None, cacheClusterEnabled=None, cacheClusterSize=None, variables=None):
"""
Creates a Deployment resource, which makes a specified RestApi callable over the internet.
See also: AWS API Documentation
:example: response = client.create_deployment(
restApiId='string',
stageName='string',
stageDescription='string',
description='string',
cacheClusterEnabled=True|False,
cacheClusterSize='0.5'|'1.6'|'6.1'|'13.5'|'28.4'|'58.2'|'118'|'237',
variables={
'string': 'string'
}
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi resource identifier for the Deployment resource to create.
:type stageName: string
:param stageName: The name of the Stage resource for the Deployment resource to create.
:type stageDescription: string
:param stageDescription: The description of the Stage resource for the Deployment resource to create.
:type description: string
:param description: The description for the Deployment resource to create.
:type cacheClusterEnabled: boolean
:param cacheClusterEnabled: Enables a cache cluster for the Stage resource specified in the input.
:type cacheClusterSize: string
:param cacheClusterSize: Specifies the cache cluster size for the Stage resource specified in the input, if a cache cluster is enabled.
:type variables: dict
:param variables: A map that defines the stage variables for the Stage resource that is associated with the new deployment. Variable names can have alphanumeric and underscore characters, and the values must match [A-Za-z0-9-._~:/?#=,]+ .
(string) --
(string) --
:rtype: dict
:return: {
'id': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'apiSummary': {
'string': {
'string': {
'authorizationType': 'string',
'apiKeyRequired': True|False
}
}
}
}
"""
pass
def create_documentation_part(restApiId=None, location=None, properties=None):
"""
See also: AWS API Documentation
:example: response = client.create_documentation_part(
restApiId='string',
location={
'type': 'API'|'AUTHORIZER'|'MODEL'|'RESOURCE'|'METHOD'|'PATH_PARAMETER'|'QUERY_PARAMETER'|'REQUEST_HEADER'|'REQUEST_BODY'|'RESPONSE'|'RESPONSE_HEADER'|'RESPONSE_BODY',
'path': 'string',
'method': 'string',
'statusCode': 'string',
'name': 'string'
},
properties='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of an API of the to-be-created documentation part.
:type location: dict
:param location: [REQUIRED]
[Required] The location of the targeted API entity of the to-be-created documentation part.
type (string) -- [REQUIRED]The type of API entity to which the documentation content applies. It is a valid and required field for API entity types of API , AUTHORIZER , MODEL , RESOURCE , METHOD , PATH_PARAMETER , QUERY_PARAMETER , REQUEST_HEADER , REQUEST_BODY , RESPONSE , RESPONSE_HEADER , and RESPONSE_BODY . Content inheritance does not apply to any entity of the API , AUTHROZER , METHOD , MODEL , REQUEST_BODY , or RESOURCE type.
path (string) --The URL path of the target. It is a valid field for the API entity types of RESOURCE , METHOD , PATH_PARAMETER , QUERY_PARAMETER , REQUEST_HEADER , REQUEST_BODY , RESPONSE , RESPONSE_HEADER , and RESPONSE_BODY . The default value is / for the root resource. When an applicable child entity inherits the content of another entity of the same type with more general specifications of the other location attributes, the child entity's path attribute must match that of the parent entity as a prefix.
method (string) --The HTTP verb of a method. It is a valid field for the API entity types of METHOD , PATH_PARAMETER , QUERY_PARAMETER , REQUEST_HEADER , REQUEST_BODY , RESPONSE , RESPONSE_HEADER , and RESPONSE_BODY . The default value is * for any method. When an applicable child entity inherits the content of an entity of the same type with more general specifications of the other location attributes, the child entity's method attribute must match that of the parent entity exactly.
statusCode (string) --The HTTP status code of a response. It is a valid field for the API entity types of RESPONSE , RESPONSE_HEADER , and RESPONSE_BODY . The default value is * for any status code. When an applicable child entity inherits the content of an entity of the same type with more general specifications of the other location attributes, the child entity's statusCode attribute must match that of the parent entity exactly.
name (string) --The name of the targeted API entity. It is a valid and required field for the API entity types of AUTHORIZER , MODEL , PATH_PARAMETER , QUERY_PARAMETER , REQUEST_HEADER , REQUEST_BODY and RESPONSE_HEADER . It is an invalid field for any other entity type.
:type properties: string
:param properties: [REQUIRED]
[Required] The new documentation content map of the targeted API entity. Enclosed key-value pairs are API-specific, but only Swagger-compliant key-value pairs can be exported and, hence, published.
:rtype: dict
:return: {
'id': 'string',
'location': {
'type': 'API'|'AUTHORIZER'|'MODEL'|'RESOURCE'|'METHOD'|'PATH_PARAMETER'|'QUERY_PARAMETER'|'REQUEST_HEADER'|'REQUEST_BODY'|'RESPONSE'|'RESPONSE_HEADER'|'RESPONSE_BODY',
'path': 'string',
'method': 'string',
'statusCode': 'string',
'name': 'string'
},
'properties': 'string'
}
"""
pass
def create_documentation_version(restApiId=None, documentationVersion=None, stageName=None, description=None):
"""
See also: AWS API Documentation
:example: response = client.create_documentation_version(
restApiId='string',
documentationVersion='string',
stageName='string',
description='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] Specifies the API identifier of the to-be-created documentation version.
:type documentationVersion: string
:param documentationVersion: [REQUIRED]
[Required] The version identifier of the new snapshot.
:type stageName: string
:param stageName: The stage name to be associated with the new documentation snapshot.
:type description: string
:param description: A description about the new documentation snapshot.
:rtype: dict
:return: {
'version': 'string',
'createdDate': datetime(2015, 1, 1),
'description': 'string'
}
"""
pass
def create_domain_name(domainName=None, certificateName=None, certificateBody=None, certificatePrivateKey=None, certificateChain=None, certificateArn=None):
"""
Creates a new domain name.
See also: AWS API Documentation
:example: response = client.create_domain_name(
domainName='string',
certificateName='string',
certificateBody='string',
certificatePrivateKey='string',
certificateChain='string',
certificateArn='string'
)
:type domainName: string
:param domainName: [REQUIRED]
(Required) The name of the DomainName resource.
:type certificateName: string
:param certificateName: The user-friendly name of the certificate.
:type certificateBody: string
:param certificateBody: [Deprecated] The body of the server certificate provided by your certificate authority.
:type certificatePrivateKey: string
:param certificatePrivateKey: [Deprecated] Your certificate's private key.
:type certificateChain: string
:param certificateChain: [Deprecated] The intermediate certificates and optionally the root certificate, one after the other without any blank lines. If you include the root certificate, your certificate chain must start with intermediate certificates and end with the root certificate. Use the intermediate certificates that were provided by your certificate authority. Do not include any intermediaries that are not in the chain of trust path.
:type certificateArn: string
:param certificateArn: The reference to an AWS-managed certificate. AWS Certificate Manager is the only supported source.
:rtype: dict
:return: {
'domainName': 'string',
'certificateName': 'string',
'certificateArn': 'string',
'certificateUploadDate': datetime(2015, 1, 1),
'distributionDomainName': 'string'
}
"""
pass
def create_model(restApiId=None, name=None, description=None, schema=None, contentType=None):
"""
Adds a new Model resource to an existing RestApi resource.
See also: AWS API Documentation
:example: response = client.create_model(
restApiId='string',
name='string',
description='string',
schema='string',
contentType='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier under which the Model will be created.
:type name: string
:param name: [REQUIRED]
The name of the model.
:type description: string
:param description: The description of the model.
:type schema: string
:param schema: The schema for the model. For application/json models, this should be JSON-schema draft v4 model.
:type contentType: string
:param contentType: [REQUIRED]
The content-type for the model.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'schema': 'string',
'contentType': 'string'
}
"""
pass
def create_request_validator(restApiId=None, name=None, validateRequestBody=None, validateRequestParameters=None):
"""
Creates a ReqeustValidator of a given RestApi .
See also: AWS API Documentation
:example: response = client.create_request_validator(
restApiId='string',
name='string',
validateRequestBody=True|False,
validateRequestParameters=True|False
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of the RestApi for which the RequestValidator is created.
:type name: string
:param name: The name of the to-be-created RequestValidator .
:type validateRequestBody: boolean
:param validateRequestBody: A Boolean flag to indicate whether to validate request body according to the configured model schema for the method (true ) or not (false ).
:type validateRequestParameters: boolean
:param validateRequestParameters: A Boolean flag to indicate whether to validate request parameters, true , or not false .
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'validateRequestBody': True|False,
'validateRequestParameters': True|False
}
"""
pass
def create_resource(restApiId=None, parentId=None, pathPart=None):
"""
Creates a Resource resource.
See also: AWS API Documentation
:example: response = client.create_resource(
restApiId='string',
parentId='string',
pathPart='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi for the resource.
:type parentId: string
:param parentId: [REQUIRED]
The parent resource's identifier.
:type pathPart: string
:param pathPart: [REQUIRED]
The last path segment for this resource.
:rtype: dict
:return: {
'id': 'string',
'parentId': 'string',
'pathPart': 'string',
'path': 'string',
'resourceMethods': {
'string': {
'httpMethod': 'string',
'authorizationType': 'string',
'authorizerId': 'string',
'apiKeyRequired': True|False,
'requestValidatorId': 'string',
'operationName': 'string',
'requestParameters': {
'string': True|False
},
'requestModels': {
'string': 'string'
},
'methodResponses': {
'string': {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
},
'methodIntegration': {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
}
}
}
:returns:
(string) --
(boolean) --
"""
pass
def create_rest_api(name=None, description=None, version=None, cloneFrom=None, binaryMediaTypes=None):
"""
Creates a new RestApi resource.
See also: AWS API Documentation
:example: response = client.create_rest_api(
name='string',
description='string',
version='string',
cloneFrom='string',
binaryMediaTypes=[
'string',
]
)
:type name: string
:param name: [REQUIRED]
The name of the RestApi .
:type description: string
:param description: The description of the RestApi .
:type version: string
:param version: A version identifier for the API.
:type cloneFrom: string
:param cloneFrom: The ID of the RestApi that you want to clone from.
:type binaryMediaTypes: list
:param binaryMediaTypes: The list of binary media types supported by the RestApi . By default, the RestApi supports only UTF-8-encoded text payloads.
(string) --
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'version': 'string',
'warnings': [
'string',
],
'binaryMediaTypes': [
'string',
]
}
:returns:
(string) --
"""
pass
def create_stage(restApiId=None, stageName=None, deploymentId=None, description=None, cacheClusterEnabled=None, cacheClusterSize=None, variables=None, documentationVersion=None):
"""
Creates a new Stage resource that references a pre-existing Deployment for the API.
See also: AWS API Documentation
:example: response = client.create_stage(
restApiId='string',
stageName='string',
deploymentId='string',
description='string',
cacheClusterEnabled=True|False,
cacheClusterSize='0.5'|'1.6'|'6.1'|'13.5'|'28.4'|'58.2'|'118'|'237',
variables={
'string': 'string'
},
documentationVersion='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi resource for the Stage resource to create.
:type stageName: string
:param stageName: [REQUIRED]
The name for the Stage resource.
:type deploymentId: string
:param deploymentId: [REQUIRED]
The identifier of the Deployment resource for the Stage resource.
:type description: string
:param description: The description of the Stage resource.
:type cacheClusterEnabled: boolean
:param cacheClusterEnabled: Whether cache clustering is enabled for the stage.
:type cacheClusterSize: string
:param cacheClusterSize: The stage's cache cluster size.
:type variables: dict
:param variables: A map that defines the stage variables for the new Stage resource. Variable names can have alphanumeric and underscore characters, and the values must match [A-Za-z0-9-._~:/?#=,]+ .
(string) --
(string) --
:type documentationVersion: string
:param documentationVersion: The version of the associated API documentation.
:rtype: dict
:return: {
'deploymentId': 'string',
'clientCertificateId': 'string',
'stageName': 'string',
'description': 'string',
'cacheClusterEnabled': True|False,
'cacheClusterSize': '0.5'|'1.6'|'6.1'|'13.5'|'28.4'|'58.2'|'118'|'237',
'cacheClusterStatus': 'CREATE_IN_PROGRESS'|'AVAILABLE'|'DELETE_IN_PROGRESS'|'NOT_AVAILABLE'|'FLUSH_IN_PROGRESS',
'methodSettings': {
'string': {
'metricsEnabled': True|False,
'loggingLevel': 'string',
'dataTraceEnabled': True|False,
'throttlingBurstLimit': 123,
'throttlingRateLimit': 123.0,
'cachingEnabled': True|False,
'cacheTtlInSeconds': 123,
'cacheDataEncrypted': True|False,
'requireAuthorizationForCacheControl': True|False,
'unauthorizedCacheControlHeaderStrategy': 'FAIL_WITH_403'|'SUCCEED_WITH_RESPONSE_HEADER'|'SUCCEED_WITHOUT_RESPONSE_HEADER'
}
},
'variables': {
'string': 'string'
},
'documentationVersion': 'string',
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1)
}
:returns:
(string) --
(string) --
"""
pass
def create_usage_plan(name=None, description=None, apiStages=None, throttle=None, quota=None):
"""
Creates a usage plan with the throttle and quota limits, as well as the associated API stages, specified in the payload.
See also: AWS API Documentation
:example: response = client.create_usage_plan(
name='string',
description='string',
apiStages=[
{
'apiId': 'string',
'stage': 'string'
},
],
throttle={
'burstLimit': 123,
'rateLimit': 123.0
},
quota={
'limit': 123,
'offset': 123,
'period': 'DAY'|'WEEK'|'MONTH'
}
)
:type name: string
:param name: [REQUIRED]
The name of the usage plan.
:type description: string
:param description: The description of the usage plan.
:type apiStages: list
:param apiStages: The associated API stages of the usage plan.
(dict) --API stage name of the associated API stage in a usage plan.
apiId (string) --API Id of the associated API stage in a usage plan.
stage (string) --API stage name of the associated API stage in a usage plan.
:type throttle: dict
:param throttle: The throttling limits of the usage plan.
burstLimit (integer) --The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
rateLimit (float) --The API request steady-state rate limit.
:type quota: dict
:param quota: The quota of the usage plan.
limit (integer) --The maximum number of requests that can be made in a given time period.
offset (integer) --The number of requests subtracted from the given limit in the initial time period.
period (string) --The time period in which the limit applies. Valid values are 'DAY', 'WEEK' or 'MONTH'.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'apiStages': [
{
'apiId': 'string',
'stage': 'string'
},
],
'throttle': {
'burstLimit': 123,
'rateLimit': 123.0
},
'quota': {
'limit': 123,
'offset': 123,
'period': 'DAY'|'WEEK'|'MONTH'
},
'productCode': 'string'
}
"""
pass
def create_usage_plan_key(usagePlanId=None, keyId=None, keyType=None):
"""
Creates a usage plan key for adding an existing API key to a usage plan.
See also: AWS API Documentation
:example: response = client.create_usage_plan_key(
usagePlanId='string',
keyId='string',
keyType='string'
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The Id of the UsagePlan resource representing the usage plan containing the to-be-created UsagePlanKey resource representing a plan customer.
:type keyId: string
:param keyId: [REQUIRED]
The identifier of a UsagePlanKey resource for a plan customer.
:type keyType: string
:param keyType: [REQUIRED]
The type of a UsagePlanKey resource for a plan customer.
:rtype: dict
:return: {
'id': 'string',
'type': 'string',
'value': 'string',
'name': 'string'
}
"""
pass
def delete_api_key(apiKey=None):
"""
Deletes the ApiKey resource.
See also: AWS API Documentation
:example: response = client.delete_api_key(
apiKey='string'
)
:type apiKey: string
:param apiKey: [REQUIRED]
The identifier of the ApiKey resource to be deleted.
"""
pass
def delete_authorizer(restApiId=None, authorizerId=None):
"""
Deletes an existing Authorizer resource.
See also: AWS API Documentation
:example: response = client.delete_authorizer(
restApiId='string',
authorizerId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Authorizer resource.
:type authorizerId: string
:param authorizerId: [REQUIRED]
The identifier of the Authorizer resource.
"""
pass
def delete_base_path_mapping(domainName=None, basePath=None):
"""
Deletes the BasePathMapping resource.
See also: AWS API Documentation
:example: response = client.delete_base_path_mapping(
domainName='string',
basePath='string'
)
:type domainName: string
:param domainName: [REQUIRED]
The domain name of the BasePathMapping resource to delete.
:type basePath: string
:param basePath: [REQUIRED]
The base path name of the BasePathMapping resource to delete.
"""
pass
def delete_client_certificate(clientCertificateId=None):
"""
Deletes the ClientCertificate resource.
See also: AWS API Documentation
:example: response = client.delete_client_certificate(
clientCertificateId='string'
)
:type clientCertificateId: string
:param clientCertificateId: [REQUIRED]
The identifier of the ClientCertificate resource to be deleted.
"""
pass
def delete_deployment(restApiId=None, deploymentId=None):
"""
Deletes a Deployment resource. Deleting a deployment will only succeed if there are no Stage resources associated with it.
See also: AWS API Documentation
:example: response = client.delete_deployment(
restApiId='string',
deploymentId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi resource for the Deployment resource to delete.
:type deploymentId: string
:param deploymentId: [REQUIRED]
The identifier of the Deployment resource to delete.
"""
pass
def delete_documentation_part(restApiId=None, documentationPartId=None):
"""
See also: AWS API Documentation
:example: response = client.delete_documentation_part(
restApiId='string',
documentationPartId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] Specifies the identifier of an API of the to-be-deleted documentation part.
:type documentationPartId: string
:param documentationPartId: [REQUIRED]
[Required] The identifier of the to-be-deleted documentation part.
"""
pass
def delete_documentation_version(restApiId=None, documentationVersion=None):
"""
See also: AWS API Documentation
:example: response = client.delete_documentation_version(
restApiId='string',
documentationVersion='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of an API of a to-be-deleted documentation snapshot.
:type documentationVersion: string
:param documentationVersion: [REQUIRED]
[Required] The version identifier of a to-be-deleted documentation snapshot.
"""
pass
def delete_domain_name(domainName=None):
"""
Deletes the DomainName resource.
See also: AWS API Documentation
:example: response = client.delete_domain_name(
domainName='string'
)
:type domainName: string
:param domainName: [REQUIRED]
The name of the DomainName resource to be deleted.
"""
pass
def delete_integration(restApiId=None, resourceId=None, httpMethod=None):
"""
Represents a delete integration.
See also: AWS API Documentation
:example: response = client.delete_integration(
restApiId='string',
resourceId='string',
httpMethod='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies a delete integration request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Specifies a delete integration request's resource identifier.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies a delete integration request's HTTP method.
"""
pass
def delete_integration_response(restApiId=None, resourceId=None, httpMethod=None, statusCode=None):
"""
Represents a delete integration response.
See also: AWS API Documentation
:example: response = client.delete_integration_response(
restApiId='string',
resourceId='string',
httpMethod='string',
statusCode='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies a delete integration response request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Specifies a delete integration response request's resource identifier.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies a delete integration response request's HTTP method.
:type statusCode: string
:param statusCode: [REQUIRED]
Specifies a delete integration response request's status code.
"""
pass
def delete_method(restApiId=None, resourceId=None, httpMethod=None):
"""
Deletes an existing Method resource.
See also: AWS API Documentation
:example: response = client.delete_method(
restApiId='string',
resourceId='string',
httpMethod='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Method resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The Resource identifier for the Method resource.
:type httpMethod: string
:param httpMethod: [REQUIRED]
The HTTP verb of the Method resource.
"""
pass
def delete_method_response(restApiId=None, resourceId=None, httpMethod=None, statusCode=None):
"""
Deletes an existing MethodResponse resource.
See also: AWS API Documentation
:example: response = client.delete_method_response(
restApiId='string',
resourceId='string',
httpMethod='string',
statusCode='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the MethodResponse resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The Resource identifier for the MethodResponse resource.
:type httpMethod: string
:param httpMethod: [REQUIRED]
The HTTP verb of the Method resource.
:type statusCode: string
:param statusCode: [REQUIRED]
The status code identifier for the MethodResponse resource.
"""
pass
def delete_model(restApiId=None, modelName=None):
"""
Deletes a model.
See also: AWS API Documentation
:example: response = client.delete_model(
restApiId='string',
modelName='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi under which the model will be deleted.
:type modelName: string
:param modelName: [REQUIRED]
The name of the model to delete.
"""
pass
def delete_request_validator(restApiId=None, requestValidatorId=None):
"""
Deletes a RequestValidator of a given RestApi .
See also: AWS API Documentation
:example: response = client.delete_request_validator(
restApiId='string',
requestValidatorId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of the RestApi from which the given RequestValidator is deleted.
:type requestValidatorId: string
:param requestValidatorId: [REQUIRED]
[Required] The identifier of the RequestValidator to be deleted.
"""
pass
def delete_resource(restApiId=None, resourceId=None):
"""
Deletes a Resource resource.
See also: AWS API Documentation
:example: response = client.delete_resource(
restApiId='string',
resourceId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Resource resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The identifier of the Resource resource.
"""
pass
def delete_rest_api(restApiId=None):
"""
Deletes the specified API.
See also: AWS API Documentation
:example: response = client.delete_rest_api(
restApiId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The ID of the RestApi you want to delete.
"""
pass
def delete_stage(restApiId=None, stageName=None):
"""
Deletes a Stage resource.
See also: AWS API Documentation
:example: response = client.delete_stage(
restApiId='string',
stageName='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi resource for the Stage resource to delete.
:type stageName: string
:param stageName: [REQUIRED]
The name of the Stage resource to delete.
"""
pass
def delete_usage_plan(usagePlanId=None):
"""
Deletes a usage plan of a given plan Id.
See also: AWS API Documentation
:example: response = client.delete_usage_plan(
usagePlanId='string'
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The Id of the to-be-deleted usage plan.
"""
pass
def delete_usage_plan_key(usagePlanId=None, keyId=None):
"""
Deletes a usage plan key and remove the underlying API key from the associated usage plan.
See also: AWS API Documentation
:example: response = client.delete_usage_plan_key(
usagePlanId='string',
keyId='string'
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The Id of the UsagePlan resource representing the usage plan containing the to-be-deleted UsagePlanKey resource representing a plan customer.
:type keyId: string
:param keyId: [REQUIRED]
The Id of the UsagePlanKey resource to be deleted.
"""
pass
def flush_stage_authorizers_cache(restApiId=None, stageName=None):
"""
Flushes all authorizer cache entries on a stage.
See also: AWS API Documentation
:example: response = client.flush_stage_authorizers_cache(
restApiId='string',
stageName='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The API identifier of the stage to flush.
:type stageName: string
:param stageName: [REQUIRED]
The name of the stage to flush.
"""
pass
def flush_stage_cache(restApiId=None, stageName=None):
"""
Flushes a stage's cache.
See also: AWS API Documentation
:example: response = client.flush_stage_cache(
restApiId='string',
stageName='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The API identifier of the stage to flush its cache.
:type stageName: string
:param stageName: [REQUIRED]
The name of the stage to flush its cache.
"""
pass
def generate_client_certificate(description=None):
"""
Generates a ClientCertificate resource.
See also: AWS API Documentation
:example: response = client.generate_client_certificate(
description='string'
)
:type description: string
:param description: The description of the ClientCertificate .
:rtype: dict
:return: {
'clientCertificateId': 'string',
'description': 'string',
'pemEncodedCertificate': 'string',
'createdDate': datetime(2015, 1, 1),
'expirationDate': datetime(2015, 1, 1)
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_account():
"""
Gets information about the current Account resource.
See also: AWS API Documentation
:example: response = client.get_account()
:rtype: dict
:return: {
'cloudwatchRoleArn': 'string',
'throttleSettings': {
'burstLimit': 123,
'rateLimit': 123.0
},
'features': [
'string',
],
'apiKeyVersion': 'string'
}
:returns:
(string) --
"""
pass
def get_api_key(apiKey=None, includeValue=None):
"""
Gets information about the current ApiKey resource.
See also: AWS API Documentation
:example: response = client.get_api_key(
apiKey='string',
includeValue=True|False
)
:type apiKey: string
:param apiKey: [REQUIRED]
The identifier of the ApiKey resource.
:type includeValue: boolean
:param includeValue: A boolean flag to specify whether (true ) or not (false ) the result contains the key value.
:rtype: dict
:return: {
'id': 'string',
'value': 'string',
'name': 'string',
'customerId': 'string',
'description': 'string',
'enabled': True|False,
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1),
'stageKeys': [
'string',
]
}
:returns:
(string) --
"""
pass
def get_api_keys(position=None, limit=None, nameQuery=None, customerId=None, includeValues=None):
"""
Gets information about the current ApiKeys resource.
See also: AWS API Documentation
:example: response = client.get_api_keys(
position='string',
limit=123,
nameQuery='string',
customerId='string',
includeValues=True|False
)
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of ApiKeys to get information about.
:type nameQuery: string
:param nameQuery: The name of queried API keys.
:type customerId: string
:param customerId: The identifier of a customer in AWS Marketplace or an external system, such as a developer portal.
:type includeValues: boolean
:param includeValues: A boolean flag to specify whether (true ) or not (false ) the result contains key values.
:rtype: dict
:return: {
'warnings': [
'string',
],
'position': 'string',
'items': [
{
'id': 'string',
'value': 'string',
'name': 'string',
'customerId': 'string',
'description': 'string',
'enabled': True|False,
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1),
'stageKeys': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def get_authorizer(restApiId=None, authorizerId=None):
"""
Describe an existing Authorizer resource.
See also: AWS API Documentation
:example: response = client.get_authorizer(
restApiId='string',
authorizerId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Authorizer resource.
:type authorizerId: string
:param authorizerId: [REQUIRED]
The identifier of the Authorizer resource.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'type': 'TOKEN'|'COGNITO_USER_POOLS',
'providerARNs': [
'string',
],
'authType': 'string',
'authorizerUri': 'string',
'authorizerCredentials': 'string',
'identitySource': 'string',
'identityValidationExpression': 'string',
'authorizerResultTtlInSeconds': 123
}
:returns:
(string) --
"""
pass
def get_authorizers(restApiId=None, position=None, limit=None):
"""
Describe an existing Authorizers resource.
See also: AWS API Documentation
:example: response = client.get_authorizers(
restApiId='string',
position='string',
limit=123
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Authorizers resource.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'name': 'string',
'type': 'TOKEN'|'COGNITO_USER_POOLS',
'providerARNs': [
'string',
],
'authType': 'string',
'authorizerUri': 'string',
'authorizerCredentials': 'string',
'identitySource': 'string',
'identityValidationExpression': 'string',
'authorizerResultTtlInSeconds': 123
},
]
}
:returns:
(string) --
"""
pass
def get_base_path_mapping(domainName=None, basePath=None):
"""
Describe a BasePathMapping resource.
See also: AWS API Documentation
:example: response = client.get_base_path_mapping(
domainName='string',
basePath='string'
)
:type domainName: string
:param domainName: [REQUIRED]
The domain name of the BasePathMapping resource to be described.
:type basePath: string
:param basePath: [REQUIRED]
The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Leave this blank if you do not want callers to specify any base path name after the domain name.
:rtype: dict
:return: {
'basePath': 'string',
'restApiId': 'string',
'stage': 'string'
}
"""
pass
def get_base_path_mappings(domainName=None, position=None, limit=None):
"""
Represents a collection of BasePathMapping resources.
See also: AWS API Documentation
:example: response = client.get_base_path_mappings(
domainName='string',
position='string',
limit=123
)
:type domainName: string
:param domainName: [REQUIRED]
The domain name of a BasePathMapping resource.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page. The value is 25 by default and could be between 1 - 500.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'basePath': 'string',
'restApiId': 'string',
'stage': 'string'
},
]
}
"""
pass
def get_client_certificate(clientCertificateId=None):
"""
Gets information about the current ClientCertificate resource.
See also: AWS API Documentation
:example: response = client.get_client_certificate(
clientCertificateId='string'
)
:type clientCertificateId: string
:param clientCertificateId: [REQUIRED]
The identifier of the ClientCertificate resource to be described.
:rtype: dict
:return: {
'clientCertificateId': 'string',
'description': 'string',
'pemEncodedCertificate': 'string',
'createdDate': datetime(2015, 1, 1),
'expirationDate': datetime(2015, 1, 1)
}
"""
pass
def get_client_certificates(position=None, limit=None):
"""
Gets a collection of ClientCertificate resources.
See also: AWS API Documentation
:example: response = client.get_client_certificates(
position='string',
limit=123
)
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page. The value is 25 by default and could be between 1 - 500.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'clientCertificateId': 'string',
'description': 'string',
'pemEncodedCertificate': 'string',
'createdDate': datetime(2015, 1, 1),
'expirationDate': datetime(2015, 1, 1)
},
]
}
"""
pass
def get_deployment(restApiId=None, deploymentId=None, embed=None):
"""
Gets information about a Deployment resource.
See also: AWS API Documentation
:example: response = client.get_deployment(
restApiId='string',
deploymentId='string',
embed=[
'string',
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi resource for the Deployment resource to get information about.
:type deploymentId: string
:param deploymentId: [REQUIRED]
The identifier of the Deployment resource to get information about.
:type embed: list
:param embed: A query parameter to retrieve the specified embedded resources of the returned Deployment resource in the response. In a REST API call, this embed parameter value is a list of comma-separated strings, as in GET /restapis/{restapi_id}/deployments/{deployment_id}?embed=var1,var2 . The SDK and other platform-dependent libraries might use a different format for the list. Currently, this request supports only retrieval of the embedded API summary this way. Hence, the parameter value must be a single-valued list containing only the 'apisummary' string. For example, GET /restapis/{restapi_id}/deployments/{deployment_id}?embed=apisummary .
(string) --
:rtype: dict
:return: {
'id': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'apiSummary': {
'string': {
'string': {
'authorizationType': 'string',
'apiKeyRequired': True|False
}
}
}
}
"""
pass
def get_deployments(restApiId=None, position=None, limit=None):
"""
Gets information about a Deployments collection.
See also: AWS API Documentation
:example: response = client.get_deployments(
restApiId='string',
position='string',
limit=123
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi resource for the collection of Deployment resources to get information about.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page. The value is 25 by default and could be between 1 - 500.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'apiSummary': {
'string': {
'string': {
'authorizationType': 'string',
'apiKeyRequired': True|False
}
}
}
},
]
}
"""
pass
def get_documentation_part(restApiId=None, documentationPartId=None):
"""
See also: AWS API Documentation
:example: response = client.get_documentation_part(
restApiId='string',
documentationPartId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of an API of the to-be-retrieved documentation part.
:type documentationPartId: string
:param documentationPartId: [REQUIRED]
[Required] The identifier of the to-be-retrieved documentation part.
:rtype: dict
:return: {
'id': 'string',
'location': {
'type': 'API'|'AUTHORIZER'|'MODEL'|'RESOURCE'|'METHOD'|'PATH_PARAMETER'|'QUERY_PARAMETER'|'REQUEST_HEADER'|'REQUEST_BODY'|'RESPONSE'|'RESPONSE_HEADER'|'RESPONSE_BODY',
'path': 'string',
'method': 'string',
'statusCode': 'string',
'name': 'string'
},
'properties': 'string'
}
"""
pass
def get_documentation_parts(restApiId=None, type=None, nameQuery=None, path=None, position=None, limit=None):
"""
See also: AWS API Documentation
:example: response = client.get_documentation_parts(
restApiId='string',
type='API'|'AUTHORIZER'|'MODEL'|'RESOURCE'|'METHOD'|'PATH_PARAMETER'|'QUERY_PARAMETER'|'REQUEST_HEADER'|'REQUEST_BODY'|'RESPONSE'|'RESPONSE_HEADER'|'RESPONSE_BODY',
nameQuery='string',
path='string',
position='string',
limit=123
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of the API of the to-be-retrieved documentation parts.
:type type: string
:param type: The type of API entities of the to-be-retrieved documentation parts.
:type nameQuery: string
:param nameQuery: The name of API entities of the to-be-retrieved documentation parts.
:type path: string
:param path: The path of API entities of the to-be-retrieved documentation parts.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'location': {
'type': 'API'|'AUTHORIZER'|'MODEL'|'RESOURCE'|'METHOD'|'PATH_PARAMETER'|'QUERY_PARAMETER'|'REQUEST_HEADER'|'REQUEST_BODY'|'RESPONSE'|'RESPONSE_HEADER'|'RESPONSE_BODY',
'path': 'string',
'method': 'string',
'statusCode': 'string',
'name': 'string'
},
'properties': 'string'
},
]
}
"""
pass
def get_documentation_version(restApiId=None, documentationVersion=None):
"""
See also: AWS API Documentation
:example: response = client.get_documentation_version(
restApiId='string',
documentationVersion='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of the API of the to-be-retrieved documentation snapshot.
:type documentationVersion: string
:param documentationVersion: [REQUIRED]
[Required] The version identifier of the to-be-retrieved documentation snapshot.
:rtype: dict
:return: {
'version': 'string',
'createdDate': datetime(2015, 1, 1),
'description': 'string'
}
"""
pass
def get_documentation_versions(restApiId=None, position=None, limit=None):
"""
See also: AWS API Documentation
:example: response = client.get_documentation_versions(
restApiId='string',
position='string',
limit=123
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of an API of the to-be-retrieved documentation versions.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'version': 'string',
'createdDate': datetime(2015, 1, 1),
'description': 'string'
},
]
}
"""
pass
def get_domain_name(domainName=None):
"""
Represents a domain name that is contained in a simpler, more intuitive URL that can be called.
See also: AWS API Documentation
:example: response = client.get_domain_name(
domainName='string'
)
:type domainName: string
:param domainName: [REQUIRED]
The name of the DomainName resource.
:rtype: dict
:return: {
'domainName': 'string',
'certificateName': 'string',
'certificateArn': 'string',
'certificateUploadDate': datetime(2015, 1, 1),
'distributionDomainName': 'string'
}
"""
pass
def get_domain_names(position=None, limit=None):
"""
Represents a collection of DomainName resources.
See also: AWS API Documentation
:example: response = client.get_domain_names(
position='string',
limit=123
)
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page. The value is 25 by default and could be between 1 - 500.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'domainName': 'string',
'certificateName': 'string',
'certificateArn': 'string',
'certificateUploadDate': datetime(2015, 1, 1),
'distributionDomainName': 'string'
},
]
}
"""
pass
def get_export(restApiId=None, stageName=None, exportType=None, parameters=None, accepts=None):
"""
Exports a deployed version of a RestApi in a specified format.
See also: AWS API Documentation
:example: response = client.get_export(
restApiId='string',
stageName='string',
exportType='string',
parameters={
'string': 'string'
},
accepts='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi to be exported.
:type stageName: string
:param stageName: [REQUIRED]
The name of the Stage that will be exported.
:type exportType: string
:param exportType: [REQUIRED]
The type of export. Currently only 'swagger' is supported.
:type parameters: dict
:param parameters: A key-value map of query string parameters that specify properties of the export, depending on the requested exportType . For exportType swagger , any combination of the following parameters are supported: integrations will export the API with x-amazon-apigateway-integration extensions. authorizers will export the API with x-amazon-apigateway-authorizer extensions. postman will export the API with Postman extensions, allowing for import to the Postman tool
(string) --
(string) --
:type accepts: string
:param accepts: The content-type of the export, for example application/json . Currently application/json and application/yaml are supported for exportType of swagger . This should be specified in the Accept header for direct API requests.
:rtype: dict
:return: {
'contentType': 'string',
'contentDisposition': 'string',
'body': StreamingBody()
}
"""
pass
def get_integration(restApiId=None, resourceId=None, httpMethod=None):
"""
Represents a get integration.
See also: AWS API Documentation
:example: response = client.get_integration(
restApiId='string',
resourceId='string',
httpMethod='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies a get integration request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Specifies a get integration request's resource identifier
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies a get integration request's HTTP method.
:rtype: dict
:return: {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
:returns:
(string) --
(string) --
"""
pass
def get_integration_response(restApiId=None, resourceId=None, httpMethod=None, statusCode=None):
"""
Represents a get integration response.
See also: AWS API Documentation
:example: response = client.get_integration_response(
restApiId='string',
resourceId='string',
httpMethod='string',
statusCode='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies a get integration response request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Specifies a get integration response request's resource identifier.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies a get integration response request's HTTP method.
:type statusCode: string
:param statusCode: [REQUIRED]
Specifies a get integration response request's status code.
:rtype: dict
:return: {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
:returns:
(string) --
(string) --
"""
pass
def get_method(restApiId=None, resourceId=None, httpMethod=None):
"""
Describe an existing Method resource.
See also: AWS API Documentation
:example: response = client.get_method(
restApiId='string',
resourceId='string',
httpMethod='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Method resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The Resource identifier for the Method resource.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies the method request's HTTP method type.
:rtype: dict
:return: {
'httpMethod': 'string',
'authorizationType': 'string',
'authorizerId': 'string',
'apiKeyRequired': True|False,
'requestValidatorId': 'string',
'operationName': 'string',
'requestParameters': {
'string': True|False
},
'requestModels': {
'string': 'string'
},
'methodResponses': {
'string': {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
},
'methodIntegration': {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
}
:returns:
(string) --
(boolean) --
"""
pass
def get_method_response(restApiId=None, resourceId=None, httpMethod=None, statusCode=None):
"""
Describes a MethodResponse resource.
See also: AWS API Documentation
:example: response = client.get_method_response(
restApiId='string',
resourceId='string',
httpMethod='string',
statusCode='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the MethodResponse resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The Resource identifier for the MethodResponse resource.
:type httpMethod: string
:param httpMethod: [REQUIRED]
The HTTP verb of the Method resource.
:type statusCode: string
:param statusCode: [REQUIRED]
The status code for the MethodResponse resource.
:rtype: dict
:return: {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
:returns:
(string) --
(boolean) --
"""
pass
def get_model(restApiId=None, modelName=None, flatten=None):
"""
Describes an existing model defined for a RestApi resource.
See also: AWS API Documentation
:example: response = client.get_model(
restApiId='string',
modelName='string',
flatten=True|False
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier under which the Model exists.
:type modelName: string
:param modelName: [REQUIRED]
The name of the model as an identifier.
:type flatten: boolean
:param flatten: A query parameter of a Boolean value to resolve (true ) all external model references and returns a flattened model schema or not (false ) The default is false .
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'schema': 'string',
'contentType': 'string'
}
"""
pass
def get_model_template(restApiId=None, modelName=None):
"""
Generates a sample mapping template that can be used to transform a payload into the structure of a model.
See also: AWS API Documentation
:example: response = client.get_model_template(
restApiId='string',
modelName='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The ID of the RestApi under which the model exists.
:type modelName: string
:param modelName: [REQUIRED]
The name of the model for which to generate a template.
:rtype: dict
:return: {
'value': 'string'
}
"""
pass
def get_models(restApiId=None, position=None, limit=None):
"""
Describes existing Models defined for a RestApi resource.
See also: AWS API Documentation
:example: response = client.get_models(
restApiId='string',
position='string',
limit=123
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page. The value is 25 by default and could be between 1 - 500.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'name': 'string',
'description': 'string',
'schema': 'string',
'contentType': 'string'
},
]
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_request_validator(restApiId=None, requestValidatorId=None):
"""
Gets a RequestValidator of a given RestApi .
See also: AWS API Documentation
:example: response = client.get_request_validator(
restApiId='string',
requestValidatorId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of the RestApi to which the specified RequestValidator belongs.
:type requestValidatorId: string
:param requestValidatorId: [REQUIRED]
[Required] The identifier of the RequestValidator to be retrieved.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'validateRequestBody': True|False,
'validateRequestParameters': True|False
}
"""
pass
def get_request_validators(restApiId=None, position=None, limit=None):
"""
Gets the RequestValidators collection of a given RestApi .
See also: AWS API Documentation
:example: response = client.get_request_validators(
restApiId='string',
position='string',
limit=123
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of a RestApi to which the RequestValidators collection belongs.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'name': 'string',
'validateRequestBody': True|False,
'validateRequestParameters': True|False
},
]
}
"""
pass
def get_resource(restApiId=None, resourceId=None, embed=None):
"""
Lists information about a resource.
See also: AWS API Documentation
:example: response = client.get_resource(
restApiId='string',
resourceId='string',
embed=[
'string',
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The identifier for the Resource resource.
:type embed: list
:param embed: A query parameter to retrieve the specified resources embedded in the returned Resource representation in the response. This embed parameter value is a list of comma-separated strings. Currently, the request supports only retrieval of the embedded Method resources this way. The query parameter value must be a single-valued list and contain the 'methods' string. For example, GET /restapis/{restapi_id}/resources/{resource_id}?embed=methods .
(string) --
:rtype: dict
:return: {
'id': 'string',
'parentId': 'string',
'pathPart': 'string',
'path': 'string',
'resourceMethods': {
'string': {
'httpMethod': 'string',
'authorizationType': 'string',
'authorizerId': 'string',
'apiKeyRequired': True|False,
'requestValidatorId': 'string',
'operationName': 'string',
'requestParameters': {
'string': True|False
},
'requestModels': {
'string': 'string'
},
'methodResponses': {
'string': {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
},
'methodIntegration': {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
}
}
}
:returns:
(string) --
(boolean) --
"""
pass
def get_resources(restApiId=None, position=None, limit=None, embed=None):
"""
Lists information about a collection of Resource resources.
See also: AWS API Documentation
:example: response = client.get_resources(
restApiId='string',
position='string',
limit=123,
embed=[
'string',
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Resource.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page. The value is 25 by default and could be between 1 - 500.
:type embed: list
:param embed: A query parameter used to retrieve the specified resources embedded in the returned Resources resource in the response. This embed parameter value is a list of comma-separated strings. Currently, the request supports only retrieval of the embedded Method resources this way. The query parameter value must be a single-valued list and contain the 'methods' string. For example, GET /restapis/{restapi_id}/resources?embed=methods .
(string) --
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'parentId': 'string',
'pathPart': 'string',
'path': 'string',
'resourceMethods': {
'string': {
'httpMethod': 'string',
'authorizationType': 'string',
'authorizerId': 'string',
'apiKeyRequired': True|False,
'requestValidatorId': 'string',
'operationName': 'string',
'requestParameters': {
'string': True|False
},
'requestModels': {
'string': 'string'
},
'methodResponses': {
'string': {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
},
'methodIntegration': {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
}
}
},
]
}
:returns:
(string) --
(boolean) --
"""
pass
def get_rest_api(restApiId=None):
"""
Lists the RestApi resource in the collection.
See also: AWS API Documentation
:example: response = client.get_rest_api(
restApiId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi resource.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'version': 'string',
'warnings': [
'string',
],
'binaryMediaTypes': [
'string',
]
}
:returns:
(string) --
"""
pass
def get_rest_apis(position=None, limit=None):
"""
Lists the RestApis resources for your collection.
See also: AWS API Documentation
:example: response = client.get_rest_apis(
position='string',
limit=123
)
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page. The value is 25 by default and could be between 1 - 500.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'name': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'version': 'string',
'warnings': [
'string',
],
'binaryMediaTypes': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def get_sdk(restApiId=None, stageName=None, sdkType=None, parameters=None):
"""
Generates a client SDK for a RestApi and Stage .
See also: AWS API Documentation
:example: response = client.get_sdk(
restApiId='string',
stageName='string',
sdkType='string',
parameters={
'string': 'string'
}
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi that the SDK will use.
:type stageName: string
:param stageName: [REQUIRED]
The name of the Stage that the SDK will use.
:type sdkType: string
:param sdkType: [REQUIRED]
The language for the generated SDK. Currently javascript , android , and objectivec (for iOS) are supported.
:type parameters: dict
:param parameters: A key-value map of query string parameters that specify properties of the SDK, depending on the requested sdkType . For sdkType of objectivec , a parameter named classPrefix is required. For sdkType of android , parameters named groupId , artifactId , artifactVersion , and invokerPackage are required.
(string) --
(string) --
:rtype: dict
:return: {
'contentType': 'string',
'contentDisposition': 'string',
'body': StreamingBody()
}
"""
pass
def get_sdk_type(id=None):
"""
See also: AWS API Documentation
:example: response = client.get_sdk_type(
id='string'
)
:type id: string
:param id: [REQUIRED]
The identifier of the queried SdkType instance.
:rtype: dict
:return: {
'id': 'string',
'friendlyName': 'string',
'description': 'string',
'configurationProperties': [
{
'name': 'string',
'friendlyName': 'string',
'description': 'string',
'required': True|False,
'defaultValue': 'string'
},
]
}
"""
pass
def get_sdk_types(position=None, limit=None):
"""
See also: AWS API Documentation
:example: response = client.get_sdk_types(
position='string',
limit=123
)
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'friendlyName': 'string',
'description': 'string',
'configurationProperties': [
{
'name': 'string',
'friendlyName': 'string',
'description': 'string',
'required': True|False,
'defaultValue': 'string'
},
]
},
]
}
"""
pass
def get_stage(restApiId=None, stageName=None):
"""
Gets information about a Stage resource.
See also: AWS API Documentation
:example: response = client.get_stage(
restApiId='string',
stageName='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi resource for the Stage resource to get information about.
:type stageName: string
:param stageName: [REQUIRED]
The name of the Stage resource to get information about.
:rtype: dict
:return: {
'deploymentId': 'string',
'clientCertificateId': 'string',
'stageName': 'string',
'description': 'string',
'cacheClusterEnabled': True|False,
'cacheClusterSize': '0.5'|'1.6'|'6.1'|'13.5'|'28.4'|'58.2'|'118'|'237',
'cacheClusterStatus': 'CREATE_IN_PROGRESS'|'AVAILABLE'|'DELETE_IN_PROGRESS'|'NOT_AVAILABLE'|'FLUSH_IN_PROGRESS',
'methodSettings': {
'string': {
'metricsEnabled': True|False,
'loggingLevel': 'string',
'dataTraceEnabled': True|False,
'throttlingBurstLimit': 123,
'throttlingRateLimit': 123.0,
'cachingEnabled': True|False,
'cacheTtlInSeconds': 123,
'cacheDataEncrypted': True|False,
'requireAuthorizationForCacheControl': True|False,
'unauthorizedCacheControlHeaderStrategy': 'FAIL_WITH_403'|'SUCCEED_WITH_RESPONSE_HEADER'|'SUCCEED_WITHOUT_RESPONSE_HEADER'
}
},
'variables': {
'string': 'string'
},
'documentationVersion': 'string',
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1)
}
:returns:
(string) --
(string) --
"""
pass
def get_stages(restApiId=None, deploymentId=None):
"""
Gets information about one or more Stage resources.
See also: AWS API Documentation
:example: response = client.get_stages(
restApiId='string',
deploymentId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The stages' API identifiers.
:type deploymentId: string
:param deploymentId: The stages' deployment identifiers.
:rtype: dict
:return: {
'item': [
{
'deploymentId': 'string',
'clientCertificateId': 'string',
'stageName': 'string',
'description': 'string',
'cacheClusterEnabled': True|False,
'cacheClusterSize': '0.5'|'1.6'|'6.1'|'13.5'|'28.4'|'58.2'|'118'|'237',
'cacheClusterStatus': 'CREATE_IN_PROGRESS'|'AVAILABLE'|'DELETE_IN_PROGRESS'|'NOT_AVAILABLE'|'FLUSH_IN_PROGRESS',
'methodSettings': {
'string': {
'metricsEnabled': True|False,
'loggingLevel': 'string',
'dataTraceEnabled': True|False,
'throttlingBurstLimit': 123,
'throttlingRateLimit': 123.0,
'cachingEnabled': True|False,
'cacheTtlInSeconds': 123,
'cacheDataEncrypted': True|False,
'requireAuthorizationForCacheControl': True|False,
'unauthorizedCacheControlHeaderStrategy': 'FAIL_WITH_403'|'SUCCEED_WITH_RESPONSE_HEADER'|'SUCCEED_WITHOUT_RESPONSE_HEADER'
}
},
'variables': {
'string': 'string'
},
'documentationVersion': 'string',
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1)
},
]
}
:returns:
(string) --
(string) --
"""
pass
def get_usage(usagePlanId=None, keyId=None, startDate=None, endDate=None, position=None, limit=None):
"""
Gets the usage data of a usage plan in a specified time interval.
See also: AWS API Documentation
:example: response = client.get_usage(
usagePlanId='string',
keyId='string',
startDate='string',
endDate='string',
position='string',
limit=123
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The Id of the usage plan associated with the usage data.
:type keyId: string
:param keyId: The Id of the API key associated with the resultant usage data.
:type startDate: string
:param startDate: [REQUIRED]
The starting date (e.g., 2016-01-01) of the usage data.
:type endDate: string
:param endDate: [REQUIRED]
The ending date (e.g., 2016-12-31) of the usage data.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page.
:rtype: dict
:return: {
'usagePlanId': 'string',
'startDate': 'string',
'endDate': 'string',
'position': 'string',
'items': {
'string': [
[
123,
],
]
}
}
:returns:
(string) --
(list) --
(list) --
(integer) --
"""
pass
def get_usage_plan(usagePlanId=None):
"""
Gets a usage plan of a given plan identifier.
See also: AWS API Documentation
:example: response = client.get_usage_plan(
usagePlanId='string'
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The identifier of the UsagePlan resource to be retrieved.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'apiStages': [
{
'apiId': 'string',
'stage': 'string'
},
],
'throttle': {
'burstLimit': 123,
'rateLimit': 123.0
},
'quota': {
'limit': 123,
'offset': 123,
'period': 'DAY'|'WEEK'|'MONTH'
},
'productCode': 'string'
}
"""
pass
def get_usage_plan_key(usagePlanId=None, keyId=None):
"""
Gets a usage plan key of a given key identifier.
See also: AWS API Documentation
:example: response = client.get_usage_plan_key(
usagePlanId='string',
keyId='string'
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The Id of the UsagePlan resource representing the usage plan containing the to-be-retrieved UsagePlanKey resource representing a plan customer.
:type keyId: string
:param keyId: [REQUIRED]
The key Id of the to-be-retrieved UsagePlanKey resource representing a plan customer.
:rtype: dict
:return: {
'id': 'string',
'type': 'string',
'value': 'string',
'name': 'string'
}
"""
pass
def get_usage_plan_keys(usagePlanId=None, position=None, limit=None, nameQuery=None):
"""
Gets all the usage plan keys representing the API keys added to a specified usage plan.
See also: AWS API Documentation
:example: response = client.get_usage_plan_keys(
usagePlanId='string',
position='string',
limit=123,
nameQuery='string'
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The Id of the UsagePlan resource representing the usage plan containing the to-be-retrieved UsagePlanKey resource representing a plan customer.
:type position: string
:param position: The current pagination position in the paged result set.
:type limit: integer
:param limit: The maximum number of returned results per page.
:type nameQuery: string
:param nameQuery: A query parameter specifying the name of the to-be-returned usage plan keys.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'type': 'string',
'value': 'string',
'name': 'string'
},
]
}
"""
pass
def get_usage_plans(position=None, keyId=None, limit=None):
"""
Gets all the usage plans of the caller's account.
See also: AWS API Documentation
:example: response = client.get_usage_plans(
position='string',
keyId='string',
limit=123
)
:type position: string
:param position: The current pagination position in the paged result set.
:type keyId: string
:param keyId: The identifier of the API key associated with the usage plans.
:type limit: integer
:param limit: The maximum number of returned results per page.
:rtype: dict
:return: {
'position': 'string',
'items': [
{
'id': 'string',
'name': 'string',
'description': 'string',
'apiStages': [
{
'apiId': 'string',
'stage': 'string'
},
],
'throttle': {
'burstLimit': 123,
'rateLimit': 123.0
},
'quota': {
'limit': 123,
'offset': 123,
'period': 'DAY'|'WEEK'|'MONTH'
},
'productCode': 'string'
},
]
}
"""
pass
def get_waiter():
"""
"""
pass
def import_api_keys(body=None, format=None, failOnWarnings=None):
"""
Import API keys from an external source, such as a CSV-formatted file.
See also: AWS API Documentation
:example: response = client.import_api_keys(
body=b'bytes'|file,
format='csv',
failOnWarnings=True|False
)
:type body: bytes or seekable file-like object
:param body: [REQUIRED]
The payload of the POST request to import API keys. For the payload format, see API Key File Format .
:type format: string
:param format: [REQUIRED]
A query parameter to specify the input format to imported API keys. Currently, only the csv format is supported.
:type failOnWarnings: boolean
:param failOnWarnings: A query parameter to indicate whether to rollback ApiKey importation (true ) or not (false ) when error is encountered.
:rtype: dict
:return: {
'ids': [
'string',
],
'warnings': [
'string',
]
}
:returns:
(string) --
"""
pass
def import_documentation_parts(restApiId=None, mode=None, failOnWarnings=None, body=None):
"""
See also: AWS API Documentation
:example: response = client.import_documentation_parts(
restApiId='string',
mode='merge'|'overwrite',
failOnWarnings=True|False,
body=b'bytes'|file
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of an API of the to-be-imported documentation parts.
:type mode: string
:param mode: A query parameter to indicate whether to overwrite (OVERWRITE ) any existing DocumentationParts definition or to merge (MERGE ) the new definition into the existing one. The default value is MERGE .
:type failOnWarnings: boolean
:param failOnWarnings: A query parameter to specify whether to rollback the documentation importation (true ) or not (false ) when a warning is encountered. The default value is false .
:type body: bytes or seekable file-like object
:param body: [REQUIRED]
[Required] Raw byte array representing the to-be-imported documentation parts. To import from a Swagger file, this is a JSON object.
:rtype: dict
:return: {
'ids': [
'string',
],
'warnings': [
'string',
]
}
:returns:
(string) --
"""
pass
def import_rest_api(failOnWarnings=None, parameters=None, body=None):
"""
A feature of the Amazon API Gateway control service for creating a new API from an external API definition file.
See also: AWS API Documentation
:example: response = client.import_rest_api(
failOnWarnings=True|False,
parameters={
'string': 'string'
},
body=b'bytes'|file
)
:type failOnWarnings: boolean
:param failOnWarnings: A query parameter to indicate whether to rollback the API creation (true ) or not (false ) when a warning is encountered. The default value is false .
:type parameters: dict
:param parameters: Custom header parameters as part of the request.
(string) --
(string) --
:type body: bytes or seekable file-like object
:param body: [REQUIRED]
The POST request body containing external API definitions. Currently, only Swagger definition JSON files are supported.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'version': 'string',
'warnings': [
'string',
],
'binaryMediaTypes': [
'string',
]
}
:returns:
(string) --
"""
pass
def put_integration(restApiId=None, resourceId=None, httpMethod=None, type=None, integrationHttpMethod=None, uri=None, credentials=None, requestParameters=None, requestTemplates=None, passthroughBehavior=None, cacheNamespace=None, cacheKeyParameters=None, contentHandling=None):
"""
Represents a put integration.
See also: AWS API Documentation
:example: response = client.put_integration(
restApiId='string',
resourceId='string',
httpMethod='string',
type='HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
integrationHttpMethod='string',
uri='string',
credentials='string',
requestParameters={
'string': 'string'
},
requestTemplates={
'string': 'string'
},
passthroughBehavior='string',
cacheNamespace='string',
cacheKeyParameters=[
'string',
],
contentHandling='CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies a put integration request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Specifies a put integration request's resource ID.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies a put integration request's HTTP method.
:type type: string
:param type: [REQUIRED]
Specifies a put integration input's type.
:type integrationHttpMethod: string
:param integrationHttpMethod: Specifies a put integration HTTP method. When the integration type is HTTP or AWS, this field is required.
:type uri: string
:param uri: Specifies a put integration input's Uniform Resource Identifier (URI). When the integration type is HTTP or AWS, this field is required. For integration with Lambda as an AWS service proxy, this value is of the 'arn:aws:apigateway:region:lambda:path/2015-03-31/functions/functionArn/invocations' format.
:type credentials: string
:param credentials: Specifies whether credentials are required for a put integration.
:type requestParameters: dict
:param requestParameters: A key-value map specifying request parameters that are passed from the method request to the back end. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the back end. The method request parameter value must match the pattern of method.request.{location}.{name} , where location is querystring , path , or header and name must be a valid and unique method request parameter name.
(string) --
(string) --
:type requestTemplates: dict
:param requestTemplates: Represents a map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. The content type value is the key in this map, and the template (as a String) is the value.
(string) --
(string) --
:type passthroughBehavior: string
:param passthroughBehavior: Specifies the pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the requestTemplates property on the Integration resource. There are three valid values: WHEN_NO_MATCH , WHEN_NO_TEMPLATES , and NEVER .
WHEN_NO_MATCH passes the request body for unmapped content types through to the integration back end without transformation.
NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media Type' response.
WHEN_NO_TEMPLATES allows pass-through when the integration has NO content types mapped to templates. However if there is at least one content type defined, unmapped content types will be rejected with the same 415 response.
:type cacheNamespace: string
:param cacheNamespace: Specifies a put integration input's cache namespace.
:type cacheKeyParameters: list
:param cacheKeyParameters: Specifies a put integration input's cache key parameters.
(string) --
:type contentHandling: string
:param contentHandling: Specifies how to handle request payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT , with the following behaviors:
CONVERT_TO_BINARY : Converts a request payload from a Base64-encoded string to the corresponding binary blob.
CONVERT_TO_TEXT : Converts a request payload from a binary blob to a Base64-encoded string.
If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through.
:rtype: dict
:return: {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
:returns:
(string) --
(string) --
"""
pass
def put_integration_response(restApiId=None, resourceId=None, httpMethod=None, statusCode=None, selectionPattern=None, responseParameters=None, responseTemplates=None, contentHandling=None):
"""
Represents a put integration.
See also: AWS API Documentation
:example: response = client.put_integration_response(
restApiId='string',
resourceId='string',
httpMethod='string',
statusCode='string',
selectionPattern='string',
responseParameters={
'string': 'string'
},
responseTemplates={
'string': 'string'
},
contentHandling='CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies a put integration response request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Specifies a put integration response request's resource identifier.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies a put integration response request's HTTP method.
:type statusCode: string
:param statusCode: [REQUIRED]
Specifies the status code that is used to map the integration response to an existing MethodResponse .
:type selectionPattern: string
:param selectionPattern: Specifies the selection pattern of a put integration response.
:type responseParameters: dict
:param responseParameters: A key-value map specifying response parameters that are passed to the method response from the back end. The key is a method response header parameter name and the mapped value is an integration response header value, a static value enclosed within a pair of single quotes, or a JSON expression from the integration response body. The mapping key must match the pattern of method.response.header.{name} , where name is a valid and unique header name. The mapped non-static value must match the pattern of integration.response.header.{name} or integration.response.body.{JSON-expression} , where name must be a valid and unique response header name and JSON-expression a valid JSON expression without the $ prefix.
(string) --
(string) --
:type responseTemplates: dict
:param responseTemplates: Specifies a put integration response's templates.
(string) --
(string) --
:type contentHandling: string
:param contentHandling: Specifies how to handle response payload content type conversions. Supported values are CONVERT_TO_BINARY and CONVERT_TO_TEXT , with the following behaviors:
CONVERT_TO_BINARY : Converts a response payload from a Base64-encoded string to the corresponding binary blob.
CONVERT_TO_TEXT : Converts a response payload from a binary blob to a Base64-encoded string.
If this property is not defined, the response payload will be passed through from the integration response to the method response without modification.
:rtype: dict
:return: {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
:returns:
(string) --
(string) --
"""
pass
def put_method(restApiId=None, resourceId=None, httpMethod=None, authorizationType=None, authorizerId=None, apiKeyRequired=None, operationName=None, requestParameters=None, requestModels=None, requestValidatorId=None):
"""
Add a method to an existing Resource resource.
See also: AWS API Documentation
:example: response = client.put_method(
restApiId='string',
resourceId='string',
httpMethod='string',
authorizationType='string',
authorizerId='string',
apiKeyRequired=True|False,
operationName='string',
requestParameters={
'string': True|False
},
requestModels={
'string': 'string'
},
requestValidatorId='string'
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the new Method resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The Resource identifier for the new Method resource.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies the method request's HTTP method type.
:type authorizationType: string
:param authorizationType: [REQUIRED]
The method's authorization type. Valid values are NONE for open access, AWS_IAM for using AWS IAM permissions, CUSTOM for using a custom authorizer, or COGNITO_USER_POOLS for using a Cognito user pool.
:type authorizerId: string
:param authorizerId: Specifies the identifier of an Authorizer to use on this Method, if the type is CUSTOM.
:type apiKeyRequired: boolean
:param apiKeyRequired: Specifies whether the method required a valid ApiKey .
:type operationName: string
:param operationName: A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example.
:type requestParameters: dict
:param requestParameters: A key-value map defining required or optional method request parameters that can be accepted by Amazon API Gateway. A key defines a method request parameter name matching the pattern of method.request.{location}.{name} , where location is querystring , path , or header and name is a valid and unique parameter name. The value associated with the key is a Boolean flag indicating whether the parameter is required (true ) or optional (false ). The method request parameter names defined here are available in Integration to be mapped to integration request parameters or body-mapping templates.
(string) --
(boolean) --
:type requestModels: dict
:param requestModels: Specifies the Model resources used for the request's content type. Request models are represented as a key/value map, with a content type as the key and a Model name as the value.
(string) --
(string) --
:type requestValidatorId: string
:param requestValidatorId: The identifier of a RequestValidator for validating the method request.
:rtype: dict
:return: {
'httpMethod': 'string',
'authorizationType': 'string',
'authorizerId': 'string',
'apiKeyRequired': True|False,
'requestValidatorId': 'string',
'operationName': 'string',
'requestParameters': {
'string': True|False
},
'requestModels': {
'string': 'string'
},
'methodResponses': {
'string': {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
},
'methodIntegration': {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
}
:returns:
(string) --
(boolean) --
"""
pass
def put_method_response(restApiId=None, resourceId=None, httpMethod=None, statusCode=None, responseParameters=None, responseModels=None):
"""
Adds a MethodResponse to an existing Method resource.
See also: AWS API Documentation
:example: response = client.put_method_response(
restApiId='string',
resourceId='string',
httpMethod='string',
statusCode='string',
responseParameters={
'string': True|False
},
responseModels={
'string': 'string'
}
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Method resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The Resource identifier for the Method resource.
:type httpMethod: string
:param httpMethod: [REQUIRED]
The HTTP verb of the Method resource.
:type statusCode: string
:param statusCode: [REQUIRED]
The method response's status code.
:type responseParameters: dict
:param responseParameters: A key-value map specifying required or optional response parameters that Amazon API Gateway can send back to the caller. A key defines a method response header name and the associated value is a Boolean flag indicating whether the method response parameter is required or not. The method response header names must match the pattern of method.response.header.{name} , where name is a valid and unique header name. The response parameter names defined here are available in the integration response to be mapped from an integration response header expressed in integration.response.header.{name} , a static value enclosed within a pair of single quotes (e.g., 'application/json' ), or a JSON expression from the back-end response payload in the form of integration.response.body.{JSON-expression} , where JSON-expression is a valid JSON expression without the $ prefix.)
(string) --
(boolean) --
:type responseModels: dict
:param responseModels: Specifies the Model resources used for the response's content type. Response models are represented as a key/value map, with a content type as the key and a Model name as the value.
(string) --
(string) --
:rtype: dict
:return: {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
:returns:
(string) --
(boolean) --
"""
pass
def put_rest_api(restApiId=None, mode=None, failOnWarnings=None, parameters=None, body=None):
"""
A feature of the Amazon API Gateway control service for updating an existing API with an input of external API definitions. The update can take the form of merging the supplied definition into the existing API or overwriting the existing API.
See also: AWS API Documentation
:example: response = client.put_rest_api(
restApiId='string',
mode='merge'|'overwrite',
failOnWarnings=True|False,
parameters={
'string': 'string'
},
body=b'bytes'|file
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi to be updated.
:type mode: string
:param mode: The mode query parameter to specify the update mode. Valid values are 'merge' and 'overwrite'. By default, the update mode is 'merge'.
:type failOnWarnings: boolean
:param failOnWarnings: A query parameter to indicate whether to rollback the API update (true ) or not (false ) when a warning is encountered. The default value is false .
:type parameters: dict
:param parameters: Custom headers supplied as part of the request.
(string) --
(string) --
:type body: bytes or seekable file-like object
:param body: [REQUIRED]
The PUT request body containing external API definitions. Currently, only Swagger definition JSON files are supported.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'version': 'string',
'warnings': [
'string',
],
'binaryMediaTypes': [
'string',
]
}
:returns:
(string) --
"""
pass
def test_invoke_authorizer(restApiId=None, authorizerId=None, headers=None, pathWithQueryString=None, body=None, stageVariables=None, additionalContext=None):
"""
Simulate the execution of an Authorizer in your RestApi with headers, parameters, and an incoming request body.
See also: AWS API Documentation
:example: response = client.test_invoke_authorizer(
restApiId='string',
authorizerId='string',
headers={
'string': 'string'
},
pathWithQueryString='string',
body='string',
stageVariables={
'string': 'string'
},
additionalContext={
'string': 'string'
}
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies a test invoke authorizer request's RestApi identifier.
:type authorizerId: string
:param authorizerId: [REQUIRED]
Specifies a test invoke authorizer request's Authorizer ID.
:type headers: dict
:param headers: [Required] A key-value map of headers to simulate an incoming invocation request. This is where the incoming authorization token, or identity source, should be specified.
(string) --
(string) --
:type pathWithQueryString: string
:param pathWithQueryString: [Optional] The URI path, including query string, of the simulated invocation request. Use this to specify path parameters and query string parameters.
:type body: string
:param body: [Optional] The simulated request body of an incoming invocation request.
:type stageVariables: dict
:param stageVariables: A key-value map of stage variables to simulate an invocation on a deployed Stage .
(string) --
(string) --
:type additionalContext: dict
:param additionalContext: [Optional] A key-value map of additional context variables.
(string) --
(string) --
:rtype: dict
:return: {
'clientStatus': 123,
'log': 'string',
'latency': 123,
'principalId': 'string',
'policy': 'string',
'authorization': {
'string': [
'string',
]
},
'claims': {
'string': 'string'
}
}
:returns:
(string) --
(list) --
(string) --
"""
pass
def test_invoke_method(restApiId=None, resourceId=None, httpMethod=None, pathWithQueryString=None, body=None, headers=None, clientCertificateId=None, stageVariables=None):
"""
Simulate the execution of a Method in your RestApi with headers, parameters, and an incoming request body.
See also: AWS API Documentation
:example: response = client.test_invoke_method(
restApiId='string',
resourceId='string',
httpMethod='string',
pathWithQueryString='string',
body='string',
headers={
'string': 'string'
},
clientCertificateId='string',
stageVariables={
'string': 'string'
}
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies a test invoke method request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Specifies a test invoke method request's resource ID.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies a test invoke method request's HTTP method.
:type pathWithQueryString: string
:param pathWithQueryString: The URI path, including query string, of the simulated invocation request. Use this to specify path parameters and query string parameters.
:type body: string
:param body: The simulated request body of an incoming invocation request.
:type headers: dict
:param headers: A key-value map of headers to simulate an incoming invocation request.
(string) --
(string) --
:type clientCertificateId: string
:param clientCertificateId: A ClientCertificate identifier to use in the test invocation. API Gateway will use the certificate when making the HTTPS request to the defined back-end endpoint.
:type stageVariables: dict
:param stageVariables: A key-value map of stage variables to simulate an invocation on a deployed Stage .
(string) --
(string) --
:rtype: dict
:return: {
'status': 123,
'body': 'string',
'headers': {
'string': 'string'
},
'log': 'string',
'latency': 123
}
:returns:
(string) --
(string) --
"""
pass
def update_account(patchOperations=None):
"""
Changes information about the current Account resource.
See also: AWS API Documentation
:example: response = client.update_account(
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'cloudwatchRoleArn': 'string',
'throttleSettings': {
'burstLimit': 123,
'rateLimit': 123.0
},
'features': [
'string',
],
'apiKeyVersion': 'string'
}
:returns:
(string) --
"""
pass
def update_api_key(apiKey=None, patchOperations=None):
"""
Changes information about an ApiKey resource.
See also: AWS API Documentation
:example: response = client.update_api_key(
apiKey='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type apiKey: string
:param apiKey: [REQUIRED]
The identifier of the ApiKey resource to be updated.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'value': 'string',
'name': 'string',
'customerId': 'string',
'description': 'string',
'enabled': True|False,
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1),
'stageKeys': [
'string',
]
}
:returns:
(string) --
"""
pass
def update_authorizer(restApiId=None, authorizerId=None, patchOperations=None):
"""
Updates an existing Authorizer resource.
See also: AWS API Documentation
:example: response = client.update_authorizer(
restApiId='string',
authorizerId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Authorizer resource.
:type authorizerId: string
:param authorizerId: [REQUIRED]
The identifier of the Authorizer resource.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'type': 'TOKEN'|'COGNITO_USER_POOLS',
'providerARNs': [
'string',
],
'authType': 'string',
'authorizerUri': 'string',
'authorizerCredentials': 'string',
'identitySource': 'string',
'identityValidationExpression': 'string',
'authorizerResultTtlInSeconds': 123
}
:returns:
(string) --
"""
pass
def update_base_path_mapping(domainName=None, basePath=None, patchOperations=None):
"""
Changes information about the BasePathMapping resource.
See also: AWS API Documentation
:example: response = client.update_base_path_mapping(
domainName='string',
basePath='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type domainName: string
:param domainName: [REQUIRED]
The domain name of the BasePathMapping resource to change.
:type basePath: string
:param basePath: [REQUIRED]
The base path of the BasePathMapping resource to change.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'basePath': 'string',
'restApiId': 'string',
'stage': 'string'
}
"""
pass
def update_client_certificate(clientCertificateId=None, patchOperations=None):
"""
Changes information about an ClientCertificate resource.
See also: AWS API Documentation
:example: response = client.update_client_certificate(
clientCertificateId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type clientCertificateId: string
:param clientCertificateId: [REQUIRED]
The identifier of the ClientCertificate resource to be updated.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'clientCertificateId': 'string',
'description': 'string',
'pemEncodedCertificate': 'string',
'createdDate': datetime(2015, 1, 1),
'expirationDate': datetime(2015, 1, 1)
}
"""
pass
def update_deployment(restApiId=None, deploymentId=None, patchOperations=None):
"""
Changes information about a Deployment resource.
See also: AWS API Documentation
:example: response = client.update_deployment(
restApiId='string',
deploymentId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The replacement identifier of the RestApi resource for the Deployment resource to change information about.
:type deploymentId: string
:param deploymentId: [REQUIRED]
The replacement identifier for the Deployment resource to change information about.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'apiSummary': {
'string': {
'string': {
'authorizationType': 'string',
'apiKeyRequired': True|False
}
}
}
}
"""
pass
def update_documentation_part(restApiId=None, documentationPartId=None, patchOperations=None):
"""
See also: AWS API Documentation
:example: response = client.update_documentation_part(
restApiId='string',
documentationPartId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of an API of the to-be-updated documentation part.
:type documentationPartId: string
:param documentationPartId: [REQUIRED]
[Required] The identifier of the to-be-updated documentation part.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'location': {
'type': 'API'|'AUTHORIZER'|'MODEL'|'RESOURCE'|'METHOD'|'PATH_PARAMETER'|'QUERY_PARAMETER'|'REQUEST_HEADER'|'REQUEST_BODY'|'RESPONSE'|'RESPONSE_HEADER'|'RESPONSE_BODY',
'path': 'string',
'method': 'string',
'statusCode': 'string',
'name': 'string'
},
'properties': 'string'
}
"""
pass
def update_documentation_version(restApiId=None, documentationVersion=None, patchOperations=None):
"""
See also: AWS API Documentation
:example: response = client.update_documentation_version(
restApiId='string',
documentationVersion='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of an API of the to-be-updated documentation version.
:type documentationVersion: string
:param documentationVersion: [REQUIRED]
[Required] The version identifier of the to-be-updated documentation version.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'version': 'string',
'createdDate': datetime(2015, 1, 1),
'description': 'string'
}
"""
pass
def update_domain_name(domainName=None, patchOperations=None):
"""
Changes information about the DomainName resource.
See also: AWS API Documentation
:example: response = client.update_domain_name(
domainName='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type domainName: string
:param domainName: [REQUIRED]
The name of the DomainName resource to be changed.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'domainName': 'string',
'certificateName': 'string',
'certificateArn': 'string',
'certificateUploadDate': datetime(2015, 1, 1),
'distributionDomainName': 'string'
}
"""
pass
def update_integration(restApiId=None, resourceId=None, httpMethod=None, patchOperations=None):
"""
Represents an update integration.
See also: AWS API Documentation
:example: response = client.update_integration(
restApiId='string',
resourceId='string',
httpMethod='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
Represents an update integration request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Represents an update integration request's resource identifier.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Represents an update integration request's HTTP method.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
:returns:
(string) --
(string) --
"""
pass
def update_integration_response(restApiId=None, resourceId=None, httpMethod=None, statusCode=None, patchOperations=None):
"""
Represents an update integration response.
See also: AWS API Documentation
:example: response = client.update_integration_response(
restApiId='string',
resourceId='string',
httpMethod='string',
statusCode='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
Specifies an update integration response request's API identifier.
:type resourceId: string
:param resourceId: [REQUIRED]
Specifies an update integration response request's resource identifier.
:type httpMethod: string
:param httpMethod: [REQUIRED]
Specifies an update integration response request's HTTP method.
:type statusCode: string
:param statusCode: [REQUIRED]
Specifies an update integration response request's status code.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
:returns:
(string) --
(string) --
"""
pass
def update_method(restApiId=None, resourceId=None, httpMethod=None, patchOperations=None):
"""
Updates an existing Method resource.
See also: AWS API Documentation
:example: response = client.update_method(
restApiId='string',
resourceId='string',
httpMethod='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Method resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The Resource identifier for the Method resource.
:type httpMethod: string
:param httpMethod: [REQUIRED]
The HTTP verb of the Method resource.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'httpMethod': 'string',
'authorizationType': 'string',
'authorizerId': 'string',
'apiKeyRequired': True|False,
'requestValidatorId': 'string',
'operationName': 'string',
'requestParameters': {
'string': True|False
},
'requestModels': {
'string': 'string'
},
'methodResponses': {
'string': {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
},
'methodIntegration': {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
}
:returns:
(string) --
(boolean) --
"""
pass
def update_method_response(restApiId=None, resourceId=None, httpMethod=None, statusCode=None, patchOperations=None):
"""
Updates an existing MethodResponse resource.
See also: AWS API Documentation
:example: response = client.update_method_response(
restApiId='string',
resourceId='string',
httpMethod='string',
statusCode='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the MethodResponse resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The Resource identifier for the MethodResponse resource.
:type httpMethod: string
:param httpMethod: [REQUIRED]
The HTTP verb of the Method resource.
:type statusCode: string
:param statusCode: [REQUIRED]
The status code for the MethodResponse resource.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
:returns:
(string) --
(boolean) --
"""
pass
def update_model(restApiId=None, modelName=None, patchOperations=None):
"""
Changes information about a model.
See also: AWS API Documentation
:example: response = client.update_model(
restApiId='string',
modelName='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier under which the model exists.
:type modelName: string
:param modelName: [REQUIRED]
The name of the model to update.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'schema': 'string',
'contentType': 'string'
}
"""
pass
def update_request_validator(restApiId=None, requestValidatorId=None, patchOperations=None):
"""
Updates a RequestValidator of a given RestApi .
See also: AWS API Documentation
:example: response = client.update_request_validator(
restApiId='string',
requestValidatorId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of the RestApi for which the given RequestValidator is updated.
:type requestValidatorId: string
:param requestValidatorId: [REQUIRED]
[Required] The identifier of RequestValidator to be updated.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'validateRequestBody': True|False,
'validateRequestParameters': True|False
}
"""
pass
def update_resource(restApiId=None, resourceId=None, patchOperations=None):
"""
Changes information about a Resource resource.
See also: AWS API Documentation
:example: response = client.update_resource(
restApiId='string',
resourceId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the Resource resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The identifier of the Resource resource.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'parentId': 'string',
'pathPart': 'string',
'path': 'string',
'resourceMethods': {
'string': {
'httpMethod': 'string',
'authorizationType': 'string',
'authorizerId': 'string',
'apiKeyRequired': True|False,
'requestValidatorId': 'string',
'operationName': 'string',
'requestParameters': {
'string': True|False
},
'requestModels': {
'string': 'string'
},
'methodResponses': {
'string': {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
},
'methodIntegration': {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
}
}
}
:returns:
(string) --
(boolean) --
"""
pass
def update_rest_api(restApiId=None, patchOperations=None):
"""
Changes information about the specified API.
See also: AWS API Documentation
:example: response = client.update_rest_api(
restApiId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The ID of the RestApi you want to update.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'createdDate': datetime(2015, 1, 1),
'version': 'string',
'warnings': [
'string',
],
'binaryMediaTypes': [
'string',
]
}
:returns:
(string) --
"""
pass
def update_stage(restApiId=None, stageName=None, patchOperations=None):
"""
Changes information about a Stage resource.
See also: AWS API Documentation
:example: response = client.update_stage(
restApiId='string',
stageName='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The identifier of the RestApi resource for the Stage resource to change information about.
:type stageName: string
:param stageName: [REQUIRED]
The name of the Stage resource to change information about.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'deploymentId': 'string',
'clientCertificateId': 'string',
'stageName': 'string',
'description': 'string',
'cacheClusterEnabled': True|False,
'cacheClusterSize': '0.5'|'1.6'|'6.1'|'13.5'|'28.4'|'58.2'|'118'|'237',
'cacheClusterStatus': 'CREATE_IN_PROGRESS'|'AVAILABLE'|'DELETE_IN_PROGRESS'|'NOT_AVAILABLE'|'FLUSH_IN_PROGRESS',
'methodSettings': {
'string': {
'metricsEnabled': True|False,
'loggingLevel': 'string',
'dataTraceEnabled': True|False,
'throttlingBurstLimit': 123,
'throttlingRateLimit': 123.0,
'cachingEnabled': True|False,
'cacheTtlInSeconds': 123,
'cacheDataEncrypted': True|False,
'requireAuthorizationForCacheControl': True|False,
'unauthorizedCacheControlHeaderStrategy': 'FAIL_WITH_403'|'SUCCEED_WITH_RESPONSE_HEADER'|'SUCCEED_WITHOUT_RESPONSE_HEADER'
}
},
'variables': {
'string': 'string'
},
'documentationVersion': 'string',
'createdDate': datetime(2015, 1, 1),
'lastUpdatedDate': datetime(2015, 1, 1)
}
:returns:
(string) --
(string) --
"""
pass
def update_usage(usagePlanId=None, keyId=None, patchOperations=None):
"""
Grants a temporary extension to the reamining quota of a usage plan associated with a specified API key.
See also: AWS API Documentation
:example: response = client.update_usage(
usagePlanId='string',
keyId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The Id of the usage plan associated with the usage data.
:type keyId: string
:param keyId: [REQUIRED]
The identifier of the API key associated with the usage plan in which a temporary extension is granted to the remaining quota.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'usagePlanId': 'string',
'startDate': 'string',
'endDate': 'string',
'position': 'string',
'items': {
'string': [
[
123,
],
]
}
}
:returns:
(string) --
(list) --
(list) --
(integer) --
"""
pass
def update_usage_plan(usagePlanId=None, patchOperations=None):
"""
Updates a usage plan of a given plan Id.
See also: AWS API Documentation
:example: response = client.update_usage_plan(
usagePlanId='string',
patchOperations=[
{
'op': 'add'|'remove'|'replace'|'move'|'copy'|'test',
'path': 'string',
'value': 'string',
'from': 'string'
},
]
)
:type usagePlanId: string
:param usagePlanId: [REQUIRED]
The Id of the to-be-updated usage plan.
:type patchOperations: list
:param patchOperations: A list of update operations to be applied to the specified resource and in the order specified in this list.
(dict) -- A single patch operation to apply to the specified resource. Please refer to http://tools.ietf.org/html/rfc6902#section-4 for an explanation of how each operation is used.
op (string) --An update operation to be performed with this PATCH request. The valid value can be 'add', 'remove', or 'replace'. Not all valid operations are supported for a given resource. Support of the operations depends on specific operational contexts. Attempts to apply an unsupported operation on a resource will return an error message.
path (string) --The op operation's target, as identified by a JSON Pointer value that references a location within the targeted resource. For example, if the target resource has an updateable property of {'name':'value'} , the path for this property is /name . If the name property value is a JSON object (e.g., {'name': {'child/name': 'child-value'}} ), the path for the child/name property will be /name/child~1name . Any slash ('/') character appearing in path names must be escaped with '~1', as shown in the example above. Each op operation can have only one path associated with it.
value (string) --The new target value of the update operation. When using AWS CLI to update a property of a JSON value, enclose the JSON object with a pair of single quotes in a Linux shell, e.g., '{'a': ...}'. In a Windows shell, see Using JSON for Parameters .
from (string) --Not supported.
:rtype: dict
:return: {
'id': 'string',
'name': 'string',
'description': 'string',
'apiStages': [
{
'apiId': 'string',
'stage': 'string'
},
],
'throttle': {
'burstLimit': 123,
'rateLimit': 123.0
},
'quota': {
'limit': 123,
'offset': 123,
'period': 'DAY'|'WEEK'|'MONTH'
},
'productCode': 'string'
}
"""
pass
| 33.892275
| 900
| 0.589472
| 19,811
| 191,288
| 5.657362
| 0.042855
| 0.026892
| 0.009547
| 0.012411
| 0.8311
| 0.798542
| 0.777351
| 0.753475
| 0.738718
| 0.718812
| 0
| 0.006346
| 0.319539
| 191,288
| 5,643
| 901
| 33.898281
| 0.854706
| 0.838913
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.504505
| 0.013514
| 0
| 0.513514
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
745f29974846a38a1a18b3479d5a0b73168ede39
| 49,294
|
py
|
Python
|
source/GUI/Banner.py
|
Hackernese/SneakyEXE
|
2d8e0762cc6783304e636df14e3ff19ed7a2d991
|
[
"MIT"
] | 95
|
2019-07-05T13:52:56.000Z
|
2020-04-22T09:50:00.000Z
|
source/GUI/Banner.py
|
Hackernese/SneakyEXE
|
2d8e0762cc6783304e636df14e3ff19ed7a2d991
|
[
"MIT"
] | 2
|
2019-07-08T06:32:25.000Z
|
2020-03-04T06:50:31.000Z
|
source/GUI/Banner.py
|
Hackernese/SneakyEXE
|
2d8e0762cc6783304e636df14e3ff19ed7a2d991
|
[
"MIT"
] | 28
|
2019-07-05T18:03:40.000Z
|
2020-08-17T17:06:57.000Z
|
BannerBytes = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x03\xae\x00\x00\x01h\x08\x06\x00\x00\x00W(\xa6\xf2\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x19tEXtSoftware\x00gnome-screenshot\xef\x03\xbf>\x00\x00 \x00IDATx\x9c\xed\xdd{pT\xf7\x99\xe7\xff\xcf\xe9n\x04\x8dddt\xa1!XHq\x84\xc0vl\x14\xd4\xc4d0\xbf!\x8e7\xf6\x8f5\xb1\x8d1\x94\xb3N6Ule\xc7I~\xb5U;\xc9\xcf\x93\x9d\x19\x03\xe3\x99\x84\x95k\xd7S3\xb9\xfcj*\xaeJ\xa6\xca\x95\xac\x868\x1e\xf0\xe22\xb9\xdaKB\xc6\x08\x86k\x8c\xc1&\xb2\xc4X\x80uAX\xb2\xee:\xbf?\x04\x94\t\x92\xe8V\x9f>\xe79\xa7\xdf\xaf\xaa\xaeT\xa4G\xad\xe7y\xce\xd3_\xce\xd7\xe7\xa8\xdbY~\xef"\xf7\xbd\xae\x01\x9d\xda\x7f^\x00\x00\x00\x00\x00X\x13\x93$\'\xe8,\x00\x00\x00\x00\x00\x98D,\xe8\x04\x00\x00\x00\x00\x00\x98J\xccu]\xb9Ag\x01\x00\x00\x00\x00\xc0$\xb8\xe2\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc046\xae\x00\x00\x00\x00\x00\xd3b\x8e\xe3\xf0\xae\xc2\x00\x00\x00\x00\x00\xb3\xb8\xe2\n\x00\x00\x00\x000-\xe6p\xb9\x15\x00\x00\x00\x00`\x18W\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\x16s\xdd\xa0S\x00\x00\x00\x00\x00`r\\q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\x16\x93\xa4/=\xf6\xdf\x82\xce\x03\x00\x00\x00\x00\x80\tq\xc5\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80i1\xd7u\x83\xce\x01\x00\x00\x00\x00\x80Iq\xc5\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80i\xb1\xde\xee\xc1\xa0s\x00\x00\x00\x00\x00`R\xb1\xc1\xf7G\x82\xce\x01\x00\x00\x00\x00\x80Iq\xab0\x00\x00\x00\x00\xc0\xb4X\xb2d\x86FF\x86\x82\xce\x03\x00\x00\x00\x00\x80\t\xc5f\x97\x16i\xdf\xbf\xfe2\xe8<\x00\x00\x00\x00\x00\x98PL\x92\x8e\xbcq \xe8<\x00\x00\x00\x00\x00\x98\x10\x7f\xe3\n\x00\x00\x00\x000-&IN\xd0Y\x00\x00\x00\x00\x000\x89\x98$\xb9Ag\x01\x00\x00\x00\x00\xc0$.m\\\xd9\xba\x02\x00\x00\x00\x00l\xe2o\\\x01\x00\x00\x00\x00\xa6]\xfa\x1bW\xfe\xca\x15\x00\x00\x00\x00`S\xccu]n\x15\x06\x00\x00\x00\x00\x98\xc5\xad\xc2\x00\x00\x00\x00\x00\xd3\xd8\xb8\x02\x00\x00\x00\x00Lc\xe3\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc0\xb4\x98\xe38\xbc\xab0\x00\x00\x00\x00\xc0,\xae\xb8\x02\x00\x00\x00\x00Lc\xe3\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc0\xb4K\x7f\xe3\n\x00\x00\x00\x00\x80M\\q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80i1\xc7\t:\x05\x00\x00\x00\x00\x00&\xc7\x15W\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\xf5\xc1\x925\xa5jlM\xeb\xbe\'\x16^\xf3\xbd\xdaUs\xd4\xd8\x9aV\xaa.9\xe9\xcf\x871\x06\x00\x82\xe6\xc7\xda\x0b\xf8\xa5\x10\xcf%\x82x\x9dFu\xdd\xb0V\x17\xf3<\xfd\x98B\xc6\xc6\xd5\x07\xe9\x8d\xe5\x92\xa4\x03;:\xaf\xf9\x9es\xe9\x08\xc4\xe2\x93\xff|\x18c\x00 h~\xac\xbd\x80_\n\xf1\\"\x88\xd7iT\xd7\rku1\xcf\xd3\x8f)d\xf1\x05\x1f)\xdd:\xd4?\xa2\xcew\xfa\x82\xce%\x92\x92\xa5qmh\xfc\xb0\xda\x0e\xf5\xe9W\xdf={\xe5\xeb\x89"G\x0f>U\xad\xb5\x7fv\x93\xe23\x1c-\x7f\xa8\\eU3\xf5\xd6\xbe\xf74:\xec\x866\x06\x00,\xc8\xf7\xda\x0b\xf8\xa9\xd0\xce%\x82z\x9dFu\xdd\xb0V\x17\xf3\x1c\xae\xf9\xb1\x84+\xaeyV\xff@\xb9\x12E\x8e\xf67u\\\xf5\xf5\xbb6\xa7\xb4\xf2\xb1J\r\xf6\x8d\xea\xd8K\xdd\xea\xeb\x1aQ\xc3\x86\n\xa5\x16\'C\x1d\x03\x00\x16\xe4{\xed\x05\xfcTh\xe7\x12A\xbdN\xa3\xbanX\xab\x8by\x0e\xd7\xfcX\x92\x08:\x81\xa8[\xb1\xb1\\\xc3\x03c:\xb2\xab\xfb\xaa\xaf\xcf\xab\x9d%I:\xf2b\xb7^x\xb2U\xb1\xb8\xa3T\xdd,\xb5\xbf\xde\x1f\xea\x18\x00\xb0 \xdfk/\xe0\xa7B;\x97\x08\xeau\x1a\xd5u\xc3Z]\xccs\xb8\xe6\xc7\x92\xf8\x82Zn\x15\xce\x97\xf9K\x92\xba\xf7k\x0buxg\x97\x0e\xed\xec\xba\xea{\xf1\x19\x8e\xee\xb8\xbfLU\xf5\xc5\xba}\xed\\\x8d\x0e\xbbz\xe3\x95\x1e\xc9\rw\x0c\x00\x04\xcd\x8f\xb5\x17\xf0K!\x9eK\x04\xf1:\x8d\xea\xbaa\xad.\xe69\\\xf3c\r\x1b\xd7<Z\xf3\xf8\x02U7\x94h\xd7\xb66u\xb5\r]\xf5\xbds\xa7\x06\xd4~\xa2_5\xe9\x12U\xd4\xcc\xd2\xad\xf7\xdc\xa8[\xee.\xd5\xb1\x97/h\xb8\x7f,\xb41\x00\x104?\xd6^\xc0/\x85x.\x11\xc4\xeb4\xaa\xeb\x86\xb5\xba\x98\xe7p\xcd\x8f5l\\\xf3$\x16w\xb4\xe9\x99\x1a\xf5u\x8dj\xe7\xb6\xb6\tc\xce\xbf9\xa0\xdf\xfc\xe0\xbc:N\x0f\xaa\xaa\xbeX\xf3j\x93\x92+\x9d\xda{1\xd41\x00\x10\x14?\xd7^ \xdf\n\xf9\\\xc2\xcf\xd7iT\xd7\rku1\xcf\xe1\x9a\x1f\x8bxs\xa6<Yzw\xa9J*f\xe8\xc0\x8e\x8e\t/\xf3\xdf\xf9h\xa5\x1ey\xbaF\xf3\x97$ulO\xb7\x8e\xef\xb9 i\xfc\x9d\xd6\xc2\x1c\x03\x00A\xf2k\xed\x05\xfcP\xa8\xe7\x12~\xbfN\xa3\xbanX\xab\x8by\x0e\xd7\xfcX\xc4\x9b3\xe5\xc9\x8a\x8d\x15r]\xa9y\x82\xcf\xa7\x92#\xad\xfc\\\xa5\x16~t\xb6Vl\xaa\xb8\xf2\xe5\xd1aW\xcdM\x9d\xe1\x8d\x01\x80\x80\xf9\xb2\xf6\x02>)\xc8s\x89\x00^\xa7Q]7\xac\xd5\xc5<\x87k~,\x8a?\xf8\xe9?\xda\xba\xb4\xf2f\xfd\xcb\xc1\x13A\xe7\x12\x19\xc5e\t\xad\xdf^\xad\xdf\xbf\xd6\xab\xbd\xcf\x9e\x9b0\xe6\xc0\x8eN\xf5\xb4\x0f)13\xa6\xf2\xea\x99:\xf8|\xa7\x9a\xbe\xda\xa23G\xfaB\x1d\x03\x00A\xf1s\xed\x05\xf2\xad\x90\xcf%\xfc|\x9dFu\xdd\xb0V\x17\xf3\x1c\xae\xf91\xeb\xef\xff\xfa\xcb\xee\xd3O}\xdd\xd5\xf8E{\x1e\x1e<VoN\xb9\x8d\xadi\xb7aC\xf9uckW\xcdq\x1b[\xd3n\xaa.\x19\xa9\x18\x1e<x\xf0\xf0\xfb\x11\xc4\xda\xcb\x83G\xbe\x1e\x9cK\xf8\xf3:\x8d\xea\xbaa\xad.\xe69\\\xf3c\xf5\x11s\x1cGC\xef\xf3G\xbf^J\xd5%\xd5\xb2\xbfWGww_7vdpL\xddg\x0656\xe2F*\x06\x00\xfc\x16\xc4\xda\x0b\xe4\x0b\xe7\x12\xfe\xbcN\xa3\xbanX\xab\x8by\x0e\xd7\xfcX\xe5|\xebo\xbe\xe2\xf6\xf4\xba\xfa\xf3o~;\xe8\\\x00\x00\x00\x00\x00\xb8\x06\xef*\x0c\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc046\xae\x00\x00\x00\x00\x00\xd3\xc67\xaeN\xc0Y\x00\x00\x00\x00\x000\x89\x98+\xc9a\xe3\n\x00\x00\x00\x000\x8a[\x85s\xb4dM\xa9\x1a[\xd3\xba\xef\x89\x85\xd7|\xafv\xd5\x1c5\xb6\xa6\x95\xaaKN\xfa\xf3\x99\xc4X\x13\xd5\xba\x00\xd8\xb7nK\x95\x1a[\xd3Z\xb6\xae,\xa7\xe7a\x1d\x83\x05\xcc\xb3w\xa6:\x1f\x8bz\x9f\xa7\xaa=\x13^\xd5\x15\xf5>\xfb)\xd7c\x1aUl\\s\x94\xdeX.I:\xb0\xa3\xf3\x9a\xef9\x97\xba\x1b\x8bO\xfe\xf3\x99\xc4X\x13\xd5\xba\x00\x14\x0e\xd61D\t\xf3<\xf5\xf9\x98W\xac\xf69\xd7\xda\xad\xd5e-\x9f \xf81\xcfa\xc4\xc65\x07\xc9\xd2\xb8n\xfb\xf4\\\xbd}\xa0W\xef\xbe5p\xe5\xeb\x89"G\xeb\xbfQ\xad/|\xafV\x92\xf4\xa5\x1f/\xd5\xc3\xdb\xabUT\x1c\xcb*\xc6\x9a\xa8\xd6\x05\xa0p\xb0\x8e!J\x98\xe7q\x93\x9d\x8fy\xc5r\x9fs\xa9\xddZ]\xd6\xf2\tJ\xbe\xe79\xcc\xa2s\x94\x03P\xff@\xb9\x12E\x8e\xf67u\\\xf5\xf5\xbb6\xa7\xb4\xf2\xb1J\r\xf6\x8d\xea\xd8K\xdd\xea\xeb\x1aQ\xc3\x86\n\xa5\x16\'\xb3\x8a\xb1&\xaau\x01(\x1c\xacc\x88\x12\xe6y\xdcd\xe7c^\xb1\xdc\xe7\\j\xb7V\x97\xb5|\x82\x92\xefy\x0e\xb3\x84$\xc9\x89\xe8u\xf6<[\xb1\xb1\\\xc3\x03c:\xb2\xab\xfb\xaa\xaf\xcf\xab\x9d%I:\xf2b\xb7^x\xb2U\xb1\xb8\xa3T\xdd,\xb5\xbf\xde\x9fU\x8c5Q\xad\x0b@\xe1`\x1dC\x940\xcf\xe3&;\x1f\xf3\x8a\xe5>\xe7R\xbb\xb5\xba\xac\xe5\x13\x94|\xcfs\x98\xc5\xd7~\xea\xe3[\x87F\x1c\xfd\xfc\xd5\x7f\t:\x97P\x99\xbf$\xa9{\xbf\xb6P\x87wv\xe9\xd0\xce\xae\xab\xbe\x17\x9f\xe1\xe8\x8e\xfb\xcbTU_\xac\xdb\xd7\xce\xd5\xe8\xb0\xab7^\xe9\x91\xdc\xecb\xac\x89j]\x00\xc2c\xc9\x9aRU//\xd1\xd1\xdd\xdd:w2\xfb\x13\x15\xd61X\xc2<\xe7n\xaa\xf3\xb1\xcb\xa2\xda\xe7Lj\x9f\x8a\xd7uE\xb5\xcf~\xca\xf5\x98F]\xfc\xdf\x7f\xea\xe3[\x87F\xd9\xb8fk\xcd\xe3\x0bT\xddP\xa2]\xdb\xda\xd4\xd56t\xd5\xf7\xce\x9d\x1aP\xfb\x89~\xd5\xa4KTQ3K\xb7\xdes\xa3n\xb9\xbbT\xc7^\xbe\xa0\xe1\xfe\xb1\x8cc\xac\x89j]\x00\xc2#\xd7\x13#\xd61X\xc2<\xe7n\xaa\xf3\xb1\xcb\xa2\xda\xe7Lj\x9f\x8a\xd7uE\xb5\xcf~\xca\xf5\x98F\xdd\xf8\x15\xd7\xd1\xb8~\xfe\xcao\x83\xce%4bqG\x9b\x9e\xa9Q_\xd7\xa8vnk\x9b0\xe6\xfc\x9b\x03\xfa\xcd\x0f\xce\xab\xe3\xf4\xa0\xaa\xea\x8b5\xaf6)\xb9\xd2\xa9\xbd\x17\xb3\x8a\xb1&\xaau\x01\x08\x87\\O\x8c$\xd61\xd8\xc1<\xe7&\x93\xf31)\x9a}\xce\xb4\xf6\xeb\xf1\xb2\xae(\xf6\xd9O^\x1d\xd3(\x8bI\x92\x13t\x16!\xb3\xf4\xeeR\x95T\xcc\xd0\x81\x1d\x1d\x13\xde\x9ap\xe7\xa3\x95z\xe4\xe9\x1a\xcd_\x92\xd4\xb1=\xdd:\xbe\xe7\x82\xa4\xf1w\t\xcb&\xc6\x9a\xa8\xd6\x05\xa0p\xb0\x8e!J\n}\x9e\xafw>\xe6\x15\x8b}\xf6\xa2vkuY\xcb\xc7o~\xcds\x98]zs&\xde\\8\x1b+6V\xc8u\xa5\xe6\x89>[\xc9\x91V~\xaeR\x0b?:[+6U\\\xf9\xf2\xe8\xb0\xab\xe6\xa6\xce\xccc\xac\x89j]\x00\n\x07\xeb\x18\xa2\x84y\x9e\xfa|\xcc+F\xfb\x9cs\xed\xd6\xea\xb2\x96O\x00|\x99\xe7\x90\x8b\xaf\xfd\xd4\xc7\xb7\x0e\x8f\xc6\xf5\xb3W\xf6\x05\x9dK(\x14\x97%\xb4~{\xb5~\xffZ\xaf\xf6>{n\xc2\x98\x03;:\xd5\xd3>\xa4\xc4\xcc\x98\xca\xabg\xea\xe0\xf3\x9dj\xfaj\x8b\xce\x1c\xe9\xcb*\xc6\x9a\xa8\xd6\x05 <r\xbd\x15\x8du\x0c\x960\xcf\xd3\x97\xc9\xf9\xd8eQ\xebs6\xb5O\xc5\xeb\xba\xa2\xd6g?yuL\xa3\xee\xd2\x15Wn\x16\xce\xd4\xf2\x87\xca\x15O8j\xfe\xa7\xc9?[idpL\xbf}\xee]u\xb4\x0cj\xf1\xea9\xfa\xe5w\xce^\xf3\x02\xce$\xc6\x9a\xa8\xd6\x05\xa0p\xb0\x8e!J\ny\x9e39\x1f\xf3\x8a\xb5>{U\xbb\xb5\xba\xac\xe5\xe3\'?\xe79\xcc\x12\x92\xe4\xb0q\xcdX\xaa.\xa9\x96\xfd\xbd:\xba\xfb\xfa\x9f\xad428\xa6\xee3\x83\x1a\x1b\x99\xfcF\xf5\xc9b\xe6.,\xd2\xd7\xf7\xdd1\xe1\xcf\xf4u\x8dh[\xfd\xa1\x8cb\xf2!\x97\xba\x00\xc0\x02\xd61D\x89\x85\xf3\r?c\xa4\xec\xce\xc7\xbcbe\xdd\xf0\xbav+uy\x91\x8f\xb5Y\xb5<\xcfa4~\xc5\x95\xb7g\xca\xd8\x8e\'Z2\x8emi\xee\xd57\xff\xe8\xe8\xb4b\x06\xdf\x1f\x9b\xf4\xbf\xba\x0c\xf5\x8de\x1c\x93\x0f\xb9\xd4\x05\x00\x16\xb0\x8e!J,\x9co\xf8\x19#ew>\xe6\x15+\xeb\x86\xd7\xb5[\xa9\xcb\x8b|\xac\xcd\xaa\xe5y\x0e#\xe7[\x7f\xf3\x15\xb7o$\xa9\'\xb6<\x1dt.\x00\x00Li\xc5\xa6\n\xddz\xcf\x8dz\xe5\x1f\xce\xaae\x7fo\xd0\xe9\x009a\x9e\xfdA\x9f\xfdA\x9f\x91o\xce\xdf\xfe\xd5\x9f\xb8\xef\xf5;\xfa\xcbo~7\xe8\\\x00\x00\x00\x00\x00\xb8F\xe2|O\xb7\x9c\xe1\xf0\x7f\xf6\x11\x00\x00\x00\x00 \x9ab\xaf\x1c\xfbWu\xf6\\\x0c:\x0f\x00\x00\x00\x00\x00&\x14\xbb\xd0\xdf\xab\x97\x9a\xf9\x0cW\x00\x00\x00\x00\x80M\xb1\x19E1\xcd\x98\x19\x0b:\x0fS\xd6m\xa9RckZ\xcb\xd6\x95\xe5\xf4<\xb5\xab\xe6\xa8\xb15\xadT]2\xa7\x98\\}\xf1\x87u\xda~\xbaA7T\xce\xc8\xdb\xef\x98\xcc\x925\xa5jlM\xeb\xbe\'\x16\xe6\xedy\xbc\xeas\x18c\x82P\x88\xc7"\x08\xf4ybQ[\x9f\xa7\xc3\x8f\xd9\x08s>^\xf1\xa3.\xe6\xd9\x1f^\xf59\x13^\x1f\x8b\\\xcf\xa3\xfc\xcc\x87yF\xbe\xc5\x9cXLN\x9c\x8f\xc3\xc9\x07\xe7\xd2\x7f\x0f\x88M\xf1\'\xc4\x99\xc4\xe4j\x7fS\xa7b\tG\xcb\xd7\x97\xe7\xef\x97L"\xbdq\xfcw\x1e\xd8\xd1\x99\xb7\xe7\xf1\xaa\xcfa\x8c\tB!\x1e\x8b \xd0\xe7\xfc\nc\xce\x97\xf91\x1ba\xce\xc7+a\xaa\xcbZ>\x85\xcc\xebc\x91\xeby\x94\xb5|2\xc1<c2\xf1\x0f\xd5\x96n\x1d\xea\x1fQ\xd7;}A\xe7b\xc6\x925\xa5\xaa^^\xa2\xa3\xbb\xbbu\xeed\x7f\xd6?\x9f(r\xf4\xe0S\xd5Z\xfbg7)>\xc3\xd1\xf2\x87\xcaUV5So\xed{O\xa3\xc3n\xc61^\xe9l\x19\xd4\xaa/\xccSy\xf5L\xed\xfb\xc7w=}\xee\xa9$K\xe3\xda\xd0\xf8a\xb5\x1d\xea\xd3\xaf\xbe{\xd6\xf3\xe7\xf1\xaa\xcfa\x8c\tJ\xa1\x1d\x8b\xa0\xd0\xe7\xc9Em}\xceV\xbeg#\xec\xf9x\xc5\xaf\xba\n}\x9e\xfd\x92k\x9f3\x91\x8fc\x91\xcbyT\x10\xf90\xcf\xc8\xb7\x98$\xb9\xe2@{\xe9\xae\xcd)\xad|\xacR\x83}\xa3:\xf6R\xb7\xfa\xbaF\xd4\xb0\xa1B\xa9\xc5\xc9\xacb\xbc2<0\xa6\xc3\xbb\xba\x94Z\x9cT\xd5\xb2b\xcf\x9f\x7f2\xf5\x0f\x94+Q\xe4h\x7f\xd3\xc4\x1f\xbc\x9c\xeb\xf3x\xd5\xe70\xc6\x04\xa5\xd0\x8eEP\xe8s\xfe\x841\xe7\x0f\xca\xf7l\x84=\x1f\xaf\x84\xa5.k\xf9\x14\xb2|\x1c\x8b\\\xce\xa3\xac\xe5\x13T\xce\x88\x96\xc4\xd8\xd8X\xd09D\xce\xbc\xdaY\x92\xa4#/v\xeb\x85\'[\x15\x8b;J\xd5\xcdR\xfb\xeb\xfdY\xc5x\xa9\xb9\xa9Sw~\xb6R\xe9\x8d\x15j;\xec\xcf\xd5\xf5\x15\x1b\xcb5<0\xa6#\xbb\xba\xf3\xf2<^\xf59\x8c1A)\xb4c\x11\x14\xfa\x9c?a\xcc\xf9\x83\xf2=\x1ba\xcf\xc7+a\xa9\xcbZ>\x85,\x1f\xc7"\x97\xf3(k\xf9\x04\x953\xa2%>\xff#7l\x1d\x1a\x18U\xd7;\xef\x07\x9d\x8b\x19\xb9\xde\xea\x10\x9f\xe1\xe8\x8e\xfb\xcbTU_\xac\xdb\xd7\xce\xd5\xe8\xb0\xab7^\xe9\xd1\x07/lg\x12\xe3\xa5\x9e\xf6!-\xfbL\x99\xaa\x97\x97h\xef\xb3\xe756\x9a\xdf\xab\xec\xf3\x97$u\xef\xd7\x16\xea\xf0\xce.\x1d\xda\xd9\x95\x97\xe7\xf1\xaa\xcfa\x8c\tB!\x1e\x8b \xd0\xe7\xa9Eq}\xce\x94\x1f\xb3\x11\xe6|\xbc\xe2g]\x85<\xcf~\xf2\xe3Va\xaf\x8fE\xae\xe7QA\xe4\xc3<#\xdf\xe2\xf3?2\x87\x8d\xeb\x1f\xc8\xf5\x85w\xee\xd4\x80\xdaO\xf4\xab&]\xa2\x8a\x9aY\xba\xf5\x9e\x1bu\xcb\xdd\xa5:\xf6\xf2\x05\r\xf7\x8fe\x1c\xe3\xb5\xa2\xd91\xddz\xcf\x8d:\xfbF\x7f\xde\x16\xee\xcb\xd6<\xbe@\xd5\r%\xda\xb5\xadM]mCyy\x1e\xaf\xfa\x1c\xc6\x98 \x14\xe2\xb1\x08\x02}\x9eZT\xd7\xe7L\xf81\x1ba\xce\xc7+~\xd6U\xc8\xf3\xec\'?6\xae^\x1f\x8b\\\xcf\xa3\x82\xc8\x87yF\xbe\xb1q\x9d\x80\x17\x0b\xdc\xf97\x07\xf4\x9b\x1f\x9cW\xc7\xe9AU\xd5\x17k^mRr\xa5S{/f\x15\xe3\xa5\xae\xb6A\xdd\xb59\xa5\xe4\rq\x1d\xfc\xc9\xf4\xaf\x82^O,\xeeh\xd335\xea\xeb\x1a\xd5\xcemmy}\x1e\xaf\xfa\x1c\xc6\x18?\x15\xf2\xb1\xf0\x13}\xbe\xbe\xa8\xae\xcf\xd7\xe3\xe7l\x841\x1f\xaf\xf8]W\xa1\xce\xb3\xdf\xfc\xd8\xb8J\xde\x1d\x0b\xaf\xce\xa3\xfc\xce\x87yF\xbe\xc5$\xc9\x11\x1f\x87\xe3\xa5;\x1f\xad\xd4#O\xd7h\xfe\x92\xa4\x8e\xed\xe9\xd6\xf1=\x17$\x8d\xbf\x1b[61^\xbbxnX\'_\xbd\xa8\xc5\xabKU\xba\xa0(o\xbfg\xe9\xdd\xa5*\xa9\x98\xa1\x03;:r\xbau\xe3z\xcf\xe3U\x9f\xc3\x18\xe3\xb7B=\x16~\xa3\xcf\xf9\x17\xc6\x9c%\xfff#\xac\xf9x%luY\xcb\xa7\x90yy,\xbc8\x8f\xb2\x96\x8f\xdf9#\x9a\x12A\'\x109\x8e\xb4\xf2s\x95Z\xf8\xd1\xd9Z\xb1\xa9\xe2\xca\x97G\x87]57uf\x1e\x93\'\xcdM\x1dZ\xfa\xc9R5<\\\xae_|\xab=/\xbfc\xc5\xc6\n\xb9\xae\xd4\x9c\xe3g|M\xf9<^\xf59\x8c1\x01(\xc8c\x11\x00\xfa\x9cga\xcc\xf9\x12_f#\xc4\xf9x%TuY\xcb\xa7\x90y|,r>\x8f\xb2\x96O\x009#\x9a\xe2\x0bjo\xbct\xab0\x9f\xe3zY\xae\xb7:\x1c\xd8\xd1\xa9\x9e\xf6!%f\xc6T^=S\x07\x9f\xefT\xd3W[t\xe6H_V1\xf9\xd0\xf9\xf6\xa0>\xf1\xf9y\xaa\xbcy\x96~\xfd\xfd\xf3\x9e?\x7fqYB\xeb\xb7W\xeb\xf7\xaf\xf5j\xef\xb3\xe7\xf2\xfa<^\xf59\x8c1~*\xe4c\xe1\'\xfa\x9c\x99(\xaf\xcf\x93\xf1s6\xc2\x98\x8fW\x82\xa8\xab\x10\xe79\x08~\xdc*\xec\xd5\xb1\xf0\xea<*\x88|\x98g\xe4\xdd_\xff\x8f?w\xff\xfb\xd3\xdfp5~\xf1\x9f\x87\xe4\xae\xdbR\xe56\xb6\xa6\xdde\xeb\xcarz\x9e\xdaUs\xdc\xc6\xd6\xb4\x9b\xaaK\xe6\x14\xe3\xf5\xe3\xc1\xa7\x16\xb9\x8d\xadi\xb7&]\xe2\xf9s\xaf\xde\x9cr\x1b[\xd3n\xc3\x86r\xdf\x9e\xc7\xab>\x871\xc6\x8f\x07\xc7\x82>[\xeas\xd4\xd7g+\xb3\x11\xa6|\xc2\xdc\xe7B\x9c\xe7 \x1e^\xf5\xd9\x8fc\xe1\xd5yT\x10\xf90\xcf<\xf2\xfdH\x1cz\xfd_\xb4\xff\xd5\xe3\x82\xf7F\x06\xc7\xd4}fPc#nN1^{\xedG\x1d\xba\xe9\xf6b-\xfaX\xb1Z\x9a{5wa\x91\xbe\xbe\xef\x8e\tc\xfb\xbaF\xb4\xad\xfePF1\x92\x94\xaaK\xaae\x7f\xaf\x8e\xee\xce\xed3\xbe\xb2y\x1e\xaf\xfa\x1c\xc6\x18?p,\xfcA\x9f\xfd\x15\xa6\x9c\x83\x98\x8d0\xe5\xe3\x950\xd7e-\x9fB\x96\xeb\xb1\xf0\xea<\xcaj>\x99`\x9e1\x19g\xf9\xbd\x8b\xdcw\xdf\xbe\xa8\xb6\x13\x17\x82\xce\xc5\x8cu[\xaa\xb4zsJ\xcf}\xf9\xb4\x0e\xef\xca\xdf\xbb\xefZ1{nB\xf7\xff\xc5M\x13~o\xa8oL/<\xd9\x9aQ\x0c\x00\xe4[\xa1\xad\xcf\x886\xe6\xd9\x1f\xf4\xd9\x1f\xf4\x19\xf9\x96\xf8\xf0M\x8b\xb5\xa6\xfeV\xfd\xcf\x13\x7f\x1ft.f\x9c=\xd1\xaf\xe3/_P\xcf\xd9\xe9\x7f\xfeh\x98\xbc\xdf=\xa2\xa6?m\xc99\x06\x00\xf2\xad\xd0\xd6gD\x1b\xf3\xec\x0f\xfa\xec\x0f\xfa\x8c|s^}\xf5W\xee\xd3\x7f\xf7\r\xed\xda\xb1\'\xe8\\\x00\x00\x00\x00\x00\xb8F\xac\xfd\xfc\xbf\xe9\x7f?\xff\xd3\xa0\xf3\x00\x00\x00\x00\x00`B\xb1\x0f\xa5\x16\xe9\xe6\xda\x9a\xa0\xf3\x00\x00\x00\x00\x00`B\xb1\x9f\xbd\xba[\x8f\xff\xe7\xaf\x04\x9d\x07\x00\x00\x00\x00\x00\x13\x8a\xedz\xf59=\xd5\xf8\x97A\xe7a\xca\xba-UjlMk\xd9\xba\xb2\xbc\xff\xae\xdaUs\xd4\xd8\x9aV\xaa.\x19\xa9\x98Lx\xd5g?s\xf6S\x10u-YS\xaa\xc6\xd6\xb4\xee{b\xa1\x89|\xfc\x10\xc6\x99\x0f#\xaf\xfa\x1c\xf5u\xc3\xcf\xd7\xe0T\xbf+\xaa}\xb6V\x97\xb5|\xac\x9d\'\xf8\xe1\x8b?\xac\xd3\xf6\xd3\r\xba\xa1r\x86o\xbf\xd3\xea\xb1\x98jM\xc8\x04\xf3\x8c|\x8bI\xd2\xbcE%A\xe7Q\xb0\x9c\xd8\xf8\xff\xc6\xe2\xd1\x8a\xf1S\x18s\xceD\x10u\xa57\x96K\x92\x0e\xec\xe84\x91\x8f\x1f\xa2Z\x975\xd6\xfal-\x9f\xcb\xfc|\rN\xf5\xbb\xbcb\xb5\xcf\xb9\xb2V\x97\xb5s\x800\x1d\xd3\xfdM\x9d\x8a%\x1c-__\xee\xdb\xef\xb4z,\xfcX\x132a\xad?a\x9a\xe7\xa8\x8b/\xa8-\xdd:\xd4?\xa2\xcew\xfa\x82\xce\xc5\x8c%kJU\xbd\xbcDGww\xeb\xdc\xc9\xfe\xbc\xfc\x8eD\x91\xa3\x07\x9f\xaa\xd6\xda?\xbbI\xf1\x19\x8e\x96?T\xae\xb2\xaa\x99zk\xdf{\x1a\x1dvC\x1b\x93\x8d\\\xfb\x1cD\xce~\x08\xaa\xaedi\\\x1b\x1a?\xac\xb6C}\xfa\xd5w\xcf\x06\x9eO\xbe\x85q\xe6\xc3\xc8\xeb>Gy\xdd\xf0\xf358\xd9\xef\xba,\xaa}\xb6V\x97\x95|\xac\x9e\'\xf8\xa1\xb3eP\xab\xbe0O\xe5\xd53\xb5\xef\x1f\xdf\xcd\xeb\xef\xb2|,\xae\xb7&d\x82yF\xbe\xc5\x82N\xa0P\xdd\xb59\xa5\x95\x8fUj\xb0oT\xc7^\xeaV_\xd7\x88\x1a6T(\xb58\x19\xea\x18?\x851\xe7L\x04UW\xfd\x03\xe5J\x149\xda\xdf\xd4a"\x9f|\x8bj]\xd6X\xeb\xb3\xb5|>\xc8\xcf\xd7\xe0d\xbf\xcb+\x96\xfb\x9c\x0bkuY;\x07\x08\xe31\x1d\x1e\x18\xd3\xe1]]J-N\xaajYq^\x7f\x97\xe5c\x91\xef5!\x13\xd6\xfa\x13\xc6y\x8e\xbaD\xd0\t\x14\xaay\xb5\xb3$IG^\xec\xd6\x0bO\xb6*\x16w\x94\xaa\x9b\xa5\xf6\xd7\xfbC\x1d\xe3\xa70\xe6\x9c\x89\xa0\xeaZ\xb1\xb1\\\xc3\x03c:\xb2\xab\xdbD>\xf9\x16\xd5\xba\xac\xb1\xd6gk\xf9|\x90\x9f\xaf\xc1\xc9~\x97W,\xf79\x17\xd6\xea\xb2v\x0e\x10\xc6c*I\xcdM\x9d\xba\xf3\xb3\x95Jo\xacP\xdb\xe1\xfc\xdd\x81h\xf9X\xe4{M\xc8\x84\xb5\xfe\x84u\x9e\xa3\x8c[\x85\'\xe0\xc7\xed|\xf1\x19\x8e\xee\xb8\xbfLU\xf5\xc5\xba}\xed\\\x8d\x0e\xbbz\xe3\x95\x1e\xc9\rwL6r\xeds\x109\xfb!\x88\xba\xe6/I\xea\xde\xaf-\xd4\xe1\x9d]:\xb4\xb3+\xf0|\xfc\x10\xc6\x99\x0f#\xaf\xfb\x1c\xd5u\xc3\xcf\xd7\xe0T\xbf\xeb\xb2\xa8\xf6\xd9Z]V\xf2\xb1z\x9e\xe0\x97\x9e\xf6!-\xfbL\x99\xaa\x97\x97h\xef\xb3\xe756\x9a\x9f\x84\xad\x1e\x8bL\xd6\x84L0\xcf\xc876\xae\x13\xf0\xe3\xe4\xf2\xdc\xa9\x01\xb5\x9f\xe8WM\xbaD\x155\xb3t\xeb=7\xea\x96\xbbKu\xec\xe5\x0b\x1a\xee\x1f\x0bmL6r\xeds\x109\xfb!\x88\xba\xd6<\xbe@\xd5\r%\xda\xb5\xadM]mC\x81\xe7\xe3\x870\xce|\x18y\xdd\xe7\xa8\xae\x1b~\xbe\x06\xa7\xfa]\x97E\xb5\xcf\xd6\xea\xb2\x92\x8f\xd5\xf3\x04?\x15\xcd\x8e\xe9\xd6{n\xd4\xd97\xfa\x0b\xe6\xdc\xef\xb2L\xd6\x84L0\xcf\xc8\xb7\xf8\x87\x16\xb3q\xfdC~\x9d\\\x9e\x7fs@\xbf\xf9\xc1yu\x9c\x1eTU}\xb1\xe6\xd5&%W:\xb5\xf7b\xa8c2\xe5E\x9f\xfd\xce\xd9/~\xd6\x15\x8b;\xda\xf4L\x8d\xfa\xbaF\xb5s[[\xe0\xf9\xf8)\x8c3\x1fF^\xf69\x8a\xeb\x86\x9f\xaf\xc1L~\x97\x14\xcd>K\xf6\xea\xb2\x94\x8f\xc5\xf3\x04?u\xb5\r\xea\xae\xcd)%o\x88\xeb\xe0O\xa6\x7f\xd5\xf1z\xac\x1d\x8bL\xd7\x84L0\xcf\xc87\xde\x9c) w>Z\xa9G\x9e\xae\xd1\xfc%I\x1d\xdb\xd3\xad\xe3{.H\x1a\x7fW\xb70\xc7\xf8)\x8c9g\xc2\xef\xba\x96\xde]\xaa\x92\x8a\x19:\xb0\xa3c\xc2\xdb_\xe8s\xb8\xea\xb2\xc6Z\x9f\xad\xe5#\xf9\xfb\x1a\xbc\xde\xef\xf2\x8a\xc5>{\xc1Z]\xd6\xce\x01\xc2xL/\xbbxnX\'_\xbd\xa8\xc5\xabKU\xba\xa0(/\xbf\xc3\xe2\xb1\xf0kM\xc8\x84\xb5\xfe\x84y\x9e\xa3\x8a7g\n\x82#\xad\xfc\\\xa5\x16~t\xb6Vl\xaa\xb8\xf2\xe5\xd1aW\xcdM\x9d\xe1\x8d\xf1S\x18s\xceD\x00u\xad\xd8X!\xd7\x95\x9a\'\xfa\xdc6\xfa\x1c\xae\xba\xac\xb1\xd6gk\xf9\\\xe2\xe7kp\xca\xdf\xe5\x15\xa3}\xce\x99\xb5\xba\xac\x9d\x03\x84\xf1\x98\xfe\x81\xe6\xa6\x0e-\xfdd\xa9\x1a\x1e.\xd7/\xbe\xd5\xee\xed\x93\x1b=\x16\xbe\xac\t\x99\xb0\xd6\x9f\x08\xccs\x14\xc5?\xc4\xdf\xb8^\xc3\x8f\xdb\xf9\x0e\xec\xe8TO\xfb\x90\x123c*\xaf\x9e\xa9\x83\xcfw\xaa\xe9\xab-:s\xa4/\xd41\xd9\xc8\xb5\xcfA\xe4\xec\x07?\xeb*.Kh\xfd\xf6j\xfd\xfe\xb5^\xed}\xf6\\\xe0\xf9\xf8)\x8c3\x1fF^\xf79j\xeb\x86\x9f\xaf\xc1L~\xd7eQ\xeb\xf3e\xd6\xea\xb2\x92\x8f\xd5\xf3\x04\xbfu\xbe=\xa8O|~\x9e*o\x9e\xa5_\x7f\xff\xbc\xe7\xcfo\xedXd\xb3&d\x82yF\xde5\xdc\xb7\xc8\xad[1\xcf\xd5\xf8\r\x02<$w\xdd\x96*\xb7\xb15\xed.[W\x96\xf7\xdfU\xbbj\x8e\xdb\xd8\x9avSu\xc9H\xc5\xf8\xd9g?s\xf6\xf3\xe1G]\xab7\xa7\xdc\xc6\xd6\xb4\xdb\xb0\xa1\xdcD>Q\xed\xf3\xe5\x87\x9fk\x8b\xb5\x87W}\x8e\xda\xba\xe1\xe7k0\x9b\xdf\x15\xb5>[\xad\xcbZ>\xd6\xce\x13\x82x<\xf8\xd4"\xb7\xb15\xed\xd6\xa4K\xf2\xf6;\xac\x1c\x8bl\xd6\x84L\x1e\xcc3\x8f|?\xb8U8`#\x83c\xea>3\xa8\xb1\x117R1~\xca%\xe7\xb9\x0b\x8b\xf4\xf5}wL\xf83}]#\xdaV\x7f(\xa3\x98|\xf0\xe3X\xa4\xea\x92j\xd9\xdf\xab\xa3\xbb\xaf\xff\xb9m\xf49\x7f3\xefU\x7f\xe8s\xf8\xf2\xf1\xeb5\x98\xed\xef\xf2\x8a\x95>{\xcdZ]\xd6\xce\x01\xc2xL/{\xedG\x1d\xba\xe9\xf6b-\xfaX\xb1Z\x9a{\xf3\xf2;\xac\x1c\x8b \xd6\x84LX\xe9O61\xf0\x87\xb3\xfc\xdeEno\xd7\x80N\xee\xf7\xfe\x96\x88\xb0Z\xb7\xa5J\xab7\xa7\xf4\xdc\x97O\xeb\xf0\xae\xfc\xbd\xb3\\\xa1\xb3\xd0\xe7\xd9s\x13\xba\xff/n\x9a\xf0{C}cz\xe1\xc9\xd6\x8cb05\xfa<n\xb2\x99\xf7\xaa?\x85\xd0g\x0b\xebF!\x88j\x9f\xad\xd5e-\x1f \x17\xcc3\xf2\x8d+\xae\x138{\xa2_\xc7_\xbe\xa0\x9e\xb3\xd3\xff,+\\\x9f\x85>\xbf\xdf=\xa2\xa6?m\xc99\x06S\xa3\xcf\xe3&\x9by\xaf\xfaS\x08}\xb6\xb0n\x14\x82\xa8\xf6\xd9Z]\xd6\xf2\x01r\xc1<#\xdf\x9c\xe5\xf7.r\xdf\xeb\x1a\xd0)\xae\xb8\x02\x00\x00\x00\x00\x0c\xe2s\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`ZL\x92\x9c\xa0\xb3\x08\x91/\xfe\xb0N\xdbO7\xe8\x86\xca\x19A\xa7\x12\x98\xdaUs\xd4\xd8\x9aV\xaa.\x99S\xcc\xba-UjlMk\xd9\xba2\x13\xf9\xe4\xca\xeb\xd9\x88j\x9f\xa3\x1a\xe3\xb5%kJ\xd5\xd8\x9a\xd6}O,\x9cV>Q\x8da\x9e3\x9b\xc3\xa9\xe6\'\x13\xd6\xfa\xec\x15kuY\xcb\xc7k\xb9\xcea\x18Y;\x16~\xe6\xc3<#\xdf\xb8\xe2\x9a\xa5\xfdM\x9d\x8a%\x1c-__\x1et*\x81q.MM,\x9e[L\xd4\xf2\xf1z6\xac\xd4\xe5u>Q\x8d\xf1Zz\xe3\xf8\x1c\x1d\xd8\xd19\xad|\xa2\x1a\xe3\x15kuy\xdd\x9f\xa9\xe6\xc7O\xd6\x8e\xbbW\xac\xd5e-\x9f\xcb\xac\xcc\xa1\x9f\xac\x1d\x0bk\xf9d\xc2j\xce\x858\xcf\xd6\xc4\x17\xd4\x96n\x1d\xea\x1fQ\xe7;}A\xe7\x12\n\x9d-\x83Z\xf5\x85y*\xaf\x9e\xa9}\xff\xf8n\xd0\xe9\xf8*Q\xe4\xe8\xc1\xa7\xaa\xb5\xf6\xcfnR|\x86\xa3\xe5\x0f\x95\xab\xacj\xa6\xde\xda\xf7\x9eF\x87\xdd\x8cc.[\xb2\xa6T\xd5\xcbKttw\xb7\xce\x9d\xec\x0f<\x9f\\y5\x1bQ\xedsTc\xf2!Y\x1a\xd7\x86\xc6\x0f\xab\xedP\x9f~\xf5\xdd\xb3\x1c\x8b\x0f`\x9e\xaf?\x87\x93\xcdO6\xac\xf4\xd9k\xd6\xea\xb2\x96\x8f\x97\xbc\x98C+\xfe\xf8O\xe6\xab\xa4,\xa1w\xdf\x1a\x984\xc6\xda\xb1\x08"\x1f\xe6\x19\xf9\xc6\x15\xd7,\r\x0f\x8c\xe9\xf0\xae.\xa5\x16\'U\xb5\xac8\xe8t|u\xd7\xe6\x94V>V\xa9\xc1\xbeQ\x1d{\xa9[}]#j\xd8P\xa1\xd4\xe2dV1Q\xcd\xc7\xab\xd9\xb0V\x97W\xf9D5&\x1f\xea\x1f(W\xa2\xc8\xd1\xfe\xa6\x8e\xab\xben\xad\xf60\xf6\xd9Z]\xf9\xe8\xcfd\xf3\xe3\'k\xc7\xdd+\xd6\xea\xb2\x96\xcf\x07Y\x98C\xaf\xccI\xcd\xd0\x7f\xfc^\xad\xfe\xc3\xb7oVIyb\xc2\x18k\xc7\xc2Z>a\xcf9J\xf3\x1cf\x13\xbf\xfa0\xa5\xe6\xa6N\xdd\xf9\xd9J\xa57V\xa8\xedp\xe1\\\xa9\x9eW;K\x92t\xe4\xc5n\xbd\xf0d\xabbqG\xa9\xbaYj\x7f\xbd?\xab\x98\xa8\xe6#y3\x1b\xd6\xea\xf2*\x9f\xa8\xc6\xe4\xc3\x8a\x8d\xe5\x1a\x1e\x18\xd3\x91]\xddW}\xddZ\xeda\xec\xb3\xb5\xba\xf2\xd1\x9f\xc9\xe6\xc7O\xd6\x8e\xbbW\xac\xd5e-\x9f\x0f\xb20\x87^\xf9\xf9\xdf\xb5k\xd6\rq\xad\xd8X\xa1\xc5w\xcd\xd1?om\xd3\xbf\xfe\xe4\xea\xdbE\xad\x1d\x0bk\xf9\x84=\xe7(\xcds\x98q\xab\xf04\xf4\xb4\x0fi\xd9g\xcaT\xbd\xbcD{\x9f=\xaf\xb1\xd1\xfc\xdd\x9a`I|\x86\xa3;\xee/SU}\xb1n_;W\xa3\xc3\xae\xdex\xa5Gr\xb3\x8b\xb9,\xd7[J\xbc\xce\xc7\x0b^\xccFT\xfb\x1c\xd5\x18\xaf\xcd_\x92\xd4\xbd_[\xa8\xc3;\xbbthg\xd7U\xdf\xb3V{\x10}f\x9e\xa7\xee\xcfT\xf3\x93\r+}\xf6\x9a\xb5\xba\xac\xe5\xe3\x15\xaf\xe6\xd0\x8a\xe1\x811\xfdn\xcf\x05\xbd\xb5\xef=\xd5\xae\x9a\xa3;?[\xa9\xaa\xfab\x1d}\xe9\xc2\x95\x7f\xe7\xad\x1d\x8b \xf2a\x9e\x91ol\\\xa7\xa9hvL\xb7\xdes\xa3\xce\xbe\xd1?\xad\x17g\x18\x9d;5\xa0\xf6\x13\xfd\xaaI\x97\xa8\xa2f\x96n\xbd\xe7F\xddrw\xa9\x8e\xbd|A\xc3\xfdc\x19\xc7\\\x96\xeb\x02\xe7u>^\xc9u6\xa2\xda\xe7\xa8\xc6xm\xcd\xe3\x0bT\xddP\xa2]\xdb\xda\xd4\xd56\xc4\xb1`\x9e\xb3\xea\xcfT\xf3\x93\r+}\xf6\x9a\xb5\xba\xac\xe5\xe3\x15\xaf\xe6\xd0\x9a\xee3C\xfa\xdd\xcf.\xe8c\x0f\x96\xebC\xb7\xcd\xd6\xaf\xbf\x7f^C\xef\xdb\xfc7%\x88|\x98g\xe4[\xfcC\x8bK\xb7\x0e\r\x8c\xaa\xf3\xdf\xd8\xb8f\xa3\xabmPwmN)yC\\\x07\x7fR8\xff\xf5\xe5\xfc\x9b\x03\xfa\xcd\x0f\xce\xab\xe3\xf4\xa0\xaa\xea\x8b5\xaf6)\xb9\xd2\xa9\xbd\x17\xb3\x8a\x91r_\xe0\xbc\xce\xc7+^\xccFT\xfb\x1c\xd5\x18\xaf\xc4\xe2\x8e6=S\xa3\xbe\xaeQ\xed\xdc\xd66a\x8c\xb5\xda\xfd\xee3\xf3<yL&\xf3\x93)K}\xf6\x92\xb5\xba\xac\xe5\xe3\x05/\xe7\xd0\x14G\xba\xf3\xd1J}\xfe\x1fj5\xbb4\xa1\xbd\xcf\x9e\xd3\xef\xf6\\\x90\xfb\x81+}\xd6\x8e\x85\xdf\xf90\xcf\xc87\xde\x9ci\x9a.\x9e\x1b\xd6\xc9W/j\xf1\xeaR\x95.(\n:\x1d_\xdc\xf9h\xa5\x1ey\xbaF\xf3\x97$ulO\xb7\x8e\xef\xb9 i\xfc\x9d\xd6\xb2\x89\x89j>\x97\xe5:\x1b\xd6\xea\xf2*\x9f\xa8\xc6xi\xe9\xdd\xa5*\xa9\x98\xa1\x03;:&\xbc\xed\xc9Z\xeda\xec\xb3\xb5\xba\xbc\xec\xcf\xf5\xe6\xc7O\xd6\x8e\xbbW\xac\xd5e-\x1f\xc9\xd6\x1cze^\xed,}\xe9\xc7K\xf5\xf0\x7f\xaf\xd6\xc5\xb3C\xfa\xf6C\xafk\xd7_\xb5]\xf5\xe7@\xd6\x8e\x85\xb5|\xc2\x9as\x14\xe79\xccxs\xa6\x1c47uh\xe9\'K\xd5\xf0p\xb9~\xf1\xad\xf6\xa0\xd3\xc9/GZ\xf9\xb9J-\xfc\xe8l\xad\xd8Tq\xe5\xcb\xa3\xc3\xae\x9a\x9b:3\x8f\x89j>\x7f`\xda\xb3a\xad.\xaf\xf2\x89j\x8c\xc7Vl\xac\x90\xebJ\xcd\x13}F\x9c\xb5\xda\xc3\xd8gkuy\xdc\x9f)\xe7\xc7O\xd6\x8e\xbbW\xac\xd5e-\x9fK\xcc\xcc\xa1\x87\xee\xfc\x0f\xe3\x7f\xd3\xfa\xb3\xbf}G?\xff\xfb\xf6k?v\xc5\xda\xb1\xb0\x96O&\x8c\xe6\x1c\xc5y\x0e\xb3\xf8\x87jK\xb7\x0e\xf5s\xab\xf0tt\xbe=\xa8O|~\x9e*o\x9e\xa5_\x7f\xff|\xd0\xe9\xe4\xdd\x81\x1d\x9d\xeai\x1fRbfL\xe5\xd53u\xf0\xf9N5}\xb5Eg\x8e\xf4e\x15sY\xae\xb7\x94x\x9d\x8f\x97r\x99\x8d\xa8\xf69\xaa1^).Kh\xfd\xf6j\xfd\xfe\xb5^\xed}\xf6\xdc\x841\xd6j\x0f\xa2\xcf\xcc\xf3\xc41\x99\xccO6\xac\xf4\xd9k\xd6\xea\xb2\x96O\xae\xbc\x9eC+n\xa8\x9c\xa1\x9f>\xf3\x8e\x0e\xef\xea\x96;\xc9\x9fQZ;\x16A\xe4\xc3<#\xef\x1a\xee[\xe4\xd6}<\xe5j\xfc\x028\x8f,\x1f\x0f>\xb5\xc8mlM\xbb5\xe9\x92\xc0s\xf1\xebQ\xbbj\x8e\xdb\xd8\x9avSu\xc9\x9cb\xd6m\xa9r\x1b[\xd3\xee\xb2ue&\xf2\xb16\x1bQ\xedsTcr}\xa4NdQ\x00\x00 \x00IDAT\xac\xde\x9cr\x1b[\xd3n\xc3\x86r\x8e\x05\xf3\x9cuL6\xf3\x93\xc9\xc3Z\x9f\xbdzX\xab\xcbZ>\xb9>\xbc\x9e\xc30>\xac\x1c\x8b \xf2a\x9ey\xe4\xfb\xc1\xad\xc29z\xedG\x1d\xba\xe9\xf6b-\xfaX\xb1Z\x9a{5wa\x91\xbe\xbe\xef\x8e\tc\xfb\xbaF\xb4\xad\xfe\x90\xaf1\xf9028\xa6\xee3\x83\x1a\x1bqs\x8a\x89j>\x97\xfd\xe1ld\xcbZ]^\xe5\x13\xd5\x98\\\xa5\xea\x92j\xd9\xdf\xab\xa3\xbb\xaf\xff\x19q\xd6j\x0fS\x9f\xbd\xce\xc7JL6\xf3\xe3\'k\xc7\xdd+\xd6\xea\xca%\x1f/\xcf7\xac\xce\xa1\x9f\xa24\x1bAa\x9e1\x19\xa7\xe1\xbeE\xee{]\x83:\xf9\x1a\x97\xc0\xbd0{nB\xf7\xff\xc5M\x13~o\xa8oL/<\xd9\xeak\x8ce\xeb\xb6Ti\xf5\xe6\x94\x9e\xfb\xf2i\x1d\xdeU8\xef\xcc\xec7\xfa\x8c(a\x9e\xfd\x11\xd5>[\xab\xcbB>\x85p\xbe\x01\x7f0\xcf\xc87\xae\xb8z\xec\xfd\xee\x115\xfdi\x8b\x99\x18\xcb\xce\x9e\xe8\xd7\xf1\x97/\xa8\xe7,\x9f\x89\x95O\xf4\x19Q\xc2<\xfb#\xaa}\xb6V\x97\x85|\n\xe1|\x03\xfe`\x9e\x91o\\q\x05\x00\x00\x00\x00\x98\xc6\xe7\xb8\x02\x00\x00\x00\x00Lc\xe3\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc046\xae9Z\xb2\xa6T\x8d\xadi\xdd\xf7\xc4\xc2k\xbeW\xbbj\x8e\x1a[\xd3J\xd5%\'\xfd\xf9Lbr\xf5\xc5\x1f\xd6i\xfb\xe9\x06\xddP9\xc3\x93\xe7\xf3\xaa\xaeu[\xaa\xd4\xd8\x9a\xd6\xb2ue&\xf2\xf1\x8a\xb5\xba\xac\xe5\x13fS\xbd\xde\xbdz\x9e\xa8\xf6\x99y\xce\x9d\x9f9G\xb5\xcf\xd6\xea\xb2\x96\x0f\xfc]\x9f\'\xfa]^\x9f\xb3e\x82yFX\xb0q\xcdQzc\xb9$\xe9\xc0\x8e\xcek\xbe\xe7\\\xean,>\xf9\xcfg\x12\x93\xab\xfdM\x9d\x8a%\x1c-__\xee\xc9\xf3Y\xa9\xcbj>^\xb1V\x97\xb5|\x820\xd5\xeb\xdd\xab\xe7\x89j\x9f\xad\xd5e-\x9fL\x90\xb3\x1d\xd6\xea\xb2\x96O\x98\xf9\xb9>O\xf4\xbb\xbc>g\xcb\x84\xb5\xf9\xb1\x96\x0f\xec`\xe3\x9a\x83di\\\xb7}z\xae\xde>\xd0\xabw\xdf\x1a\xb8\xf2\xf5D\x91\xa3\xf5\xdf\xa8\xd6\x17\xbeW+I\xfa\xd2\x8f\x97\xea\xe1\xed\xd5**\x8ee\x15\xe3\x95c/uk\xe0\xbd\xd1+\x0b\xe4tY\xab\xcbZ>^\xb1V\x97\xb5|\x822\xd9\xeb\xdd\xab\xe7\x89j\x9f\xad\xd5e-\x1fr\xb6\x93s&\xac\xd5e-\x9f\xb0\xf3s}\x9e\xecwyu\xce\x96\tk\xf3c-\x1f\xd8\xc3Q\xceA\xfd\x03\xe5J\x149\xda\xdf\xd4q\xd5\xd7\xef\xda\x9c\xd2\xca\xc7*5\xd87\xaac/u\xab\xafkD\r\x1b*\x94Z\x9c\xcc*\xc6+\xc3\x03c:\xbc\xabK\xa9\xc5IU-+\x9e\xf6\xf3X\xab\xcbZ>^\xb1V\x97\xb5|\x822\xd9\xeb\xdd\xab\xe7\x89j\x9f\xad\xd5e-\x1fr\xb6\x93s&\xac\xd5e-\x9f\xb0\xf3s}\x9e\xecwyu\xce\x96\tk\xf3c-\x1f\xd8\x93\x08:\x810[\xb1\xb1\\\xc3\x03c:\xb2\xab\xfb\xaa\xaf\xcf\xab\x9d%I:\xf2b\xb7^x\xb2U\xb1\xb8\xa3T\xdd,\xb5\xbf\xde\x9fU\x8c\x97\x9a\x9b:u\xe7g+\x95\xdeX\xa1\xb6\xc3}\xd3z\x0ekuY\xcb\xc7+\xd6\xea\xb2\x96OP&{\xbd{\xf5<Q\xed\xb3\xb5\xba\xac\xe5C\xcevr\xce\x84\xb5\xba\xac\xe5\x13v~\xae\xcfS\xfd\x9b\xe2\xc59[&\xac\xcd\x8f\xb5|`O\xfcC\xb5\xa5[\x87\xfaG\xd5\xf9o\xf9{aD\xd1\xfc%I\xdd\xfb\xb5\x85:\xbc\xb3K\x87vv]\xf5\xbd\xf8\x0cGw\xdc_\xa6\xaa\xfab\xdd\xbev\xaeF\x87]\xbd\xf1J\x8f\xe4f\x17\xe3\xa5\x9e\xf6!-\xfbL\x99\xaa\x97\x97h\xef\xb3\xe756\x9a\xfd/\xf2\xba\xae%kJU\xbd\xbcDGww\xeb\xdc\xc9\xec\x17\x1c\x8b}\x96\xec\xd5e-\x9f0\x9a\xea\xf5\xee\xd5\xf3D\xb5\xcf\xccs\xee\x82\xc89\xaa}\xb6V\x97\xb5|\n\x99\x9f\xeb\xf3\xf5\xfeM\xf1\xe2\x9c-\x13\xcc3\xc2&\xbe\xe0#\xa5[\x07\xfbG\xd5\xf5\x0e\x1b\xd7l\xacy|\x81\xaa\x1bJ\xb4k[\x9b\xba\xda\x86\xae\xfa\xde\xb9S\x03j?\xd1\xaf\x9at\x89*jf\xe9\xd6{n\xd4-w\x97\xea\xd8\xcb\x174\xdc?\x96q\x8c\xd7\x8af\xc7t\xeb=7\xea\xec\x1b\xfd\xd3ZP\xbc\xae+\xd7\x05\xcej\x9f\xad\xd5e-\x9f0\x9a\xea\xf5\xee\xd5\xf3D\xb5\xcf\xccs\xee\x82\xc89\xaa}\xb6V\x97\xb5|\n\x99\x9f\xebs&\xff\xa6\xe4z\xce\x96\t\xe6\x19a\x13_P{\xe3\xd6\xa1\xfe\x116\xaeY\x88\xc5\x1dmz\xa6F}]\xa3\xda\xb9\xadm\xc2\x98\xf3o\x0e\xe87?8\xaf\x8e\xd3\x83\xaa\xaa/\xd6\xbc\xda\xa4\xe4J\xa7\xf6^\xcc*\xc6K]m\x83\xbaksJ\xc9\x1b\xe2:\xf8\x93\xe9]5\xf2\xb2\xae\\\x178\xaf\xf3\xf1\x8a\xb5\xba\xac\xe5\x136\x99\xbc\xde\xbdz\x9e\xa8\xf6\x99y\xce\x9d\xdf9G\xb5\xcf\xd6\xea\xb2\x96O\xa1\xf2s}\xce\xf4\xdf\x14/\xce\xd92\xc1<#Lb\x92\xe4\x04\x9dE\xc8,\xbd\xbbT%\x153t`G\xc7\x84\xb7&\xdc\xf9h\xa5\x1ey\xbaF\xf3\x97$ulO\xb7\x8e\xef\xb9 i\xfc\x1d\xe4\xb2\x89\xf1\xda\xc5s\xc3:\xf9\xeaE-^]\xaa\xd2\x05EY\xff\xbc\xb5\xba\xac\xe5\xe3\x15kuY\xcb\xc7o\xd7{\xbd{\xf5<Q\xed\xb3\xb5\xba\xac\xe5C\xcevr\xce\x84\xb5\xba\xac\xe5\x13V~\xae\xcf\x99\xfe\x9b\x92\xeb9[&\xac\xcd\x8f\xb5|`\x0fo\xce4\r+6V\xc8u\xa5\xe6\x89>\xcb\xd1\x91V~\xaeR\x0b?:[+6U\\\xf9\xf2\xe8\xb0\xab\xe6\xa6\xce\xccc\xf2\xa4\xb9\xa9CK?Y\xaa\x86\x87\xcb\xf5\x8bo\xb5g\xfe\x83\xd6\xea\xb2\x96\x8fW\xac\xd5e-\x9f\x00L\xf9z\xf7\xeay\xa2\xdagkuY\xcb\'\x13\xe4l\x87\xb5\xba\xac\xe5\x13b~\xae\xcf\xd9\xfc\x9b2\xeds\xb6LX\x9b\x1fk\xf9\xc0\xa4+\xb7\nwr\xabpF\x8a\xcb\x12Z\xbf\xbdZ\xbf\x7f\xadW{\x9f=7a\xcc\x81\x1d\x9d\xeai\x1fRbfL\xe5\xd53u\xf0\xf9N5}\xb5Eg\x8e\xf4e\x15\x93\x0f\x9do\x0f\xea\x13\x9f\x9f\xa7\xca\x9bg\xe9\xd7\xdf?\x9f\xd5\xcfz]W\xae\xb7\x94X\xed\xb3\xb5\xba\xac\xe5\x13&\x99\xbc\xde\xbdz\x9e\xa8\xf6\x99y\xce]\x109G\xb5\xcf\xd6\xea\xb2\x96O!\xf2s}\xce\xf6\xdf\x94\\\xce\xd92\xc1<#t\x96\xdf[\xed\xd6\xadH\xb9\x1a\xbfa\x81\xc7u\x1e\xab7\xa7\xdc\xc6\xd6\xb4\xdb\xb0\xa1\xfc\xba\xb1\xb5\xab\xe6\xb8\x8d\xadi7U\x97\xcc)\xc6\xeb\xc7\x83O-r\x1b[\xd3nM\xbadZ?\xefU]\xeb\xb6T\xb9\x8d\xadiw\xd9\xba\xb2\x9c\xea\xb1\xd6gkuY\xcb\'L\x8fl^\xef^=OT\xfb\xcc<\xdb\xe9a&\x8f\xa8\xf6\xd9Z]\xd6\xf2)\xc4\x87\x9f\xeb\xf3t\xfeM\xc9\xf5\x9c\xcd\xcf\xf9a\x9ey\xe4\xfb\xc1\xad\xc2YJ\xd5%\xd5\xb2\xbfWGw_\xff\xb3\x1cG\x06\xc7\xd4}fPc#nN1^{\xedG\x1d\xba\xe9\xf6b-\xfaX\xb1Z\x9a{\xb3\xfeykuY\xcb\xc7+\xd6\xea\xca%\x9f\xb9\x0b\x8b\xf4\xf5}wL\xf83}]#\xdaV\x7f\xc8\\\x8c\x94\xdd\xeb}*~\xad\x1b^\xd6\xee\xb5B\x9b\xe7|\xb0\xd6\xc3L\x841\xe7LX\xab\xcbZ>a\xe2\xe7y\xddt\xfeM\xc9\xf5\x9c-\x13\xd6\xe6\xc7Z>\xb0\xc3Y~o\xb5\xdb\xdb5\xa0\x93\xfb\xa7\x7f\x1b\x1c0\x1d\xeb\xb6Ti\xf5\xe6\x94\x9e\xfb\xf2i\x1d\xde\x95\xbfw\xcc\xf3\x9b\xb5\xba,\xe43{nB\xf7\xff\xc5M\x13~o\xa8oL/<\xd9j.&\x8c\n\xa1\xf6\xb0\xccs\xd8Y\xe8s>X\xab\xcbZ>@.\x98g\xe4\x1bW\\\x11\x98\xb3\'\xfau\xfc\xe5\x0b\xea9;\xfd\xcf\xc5\xb4\xc8Z]\x16\xf2y\xbf{DM\x7f\xda\x12\xaa\x980*\x84\xda\xc32\xcfag\xa1\xcf\xf9`\xad.k\xf9\x00\xb9`\x9e\x91o\\q\x05\x00\x00\x00\x00\x98\x16\x0b:\x01\x00\x00\x00\x00\x00\xa6\xc2\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80i1\xc7q$\xc7\t:\x0fS\xd6m\xa9RckZ\xcb\xd6\x95\xe5\xfdw\xd5\xae\x9a\xa3\xc6\xd6\xb4Ru\xc9H\xc5\\\xb6dM\xa9\x1a[\xd3\xba\xef\x89\x85\xd7|\xcf\xab>{\x9ds\xae\xac\xd5\xc5<\xfb\x13C\x9f\xc3\xd5g?\xd6\x8d/\xfe\xb0N\xdbO7\xe8\x86\xca\x19\xd3M3k\xd6\xfa\x9c\tk3V\xc8\xf3\x1c\xd5\x18k}\xce\x95\xd7kKT\xfb\x1c\xd5\x98B\xc6\x15\xd7\x809\x97\x8e@,\x1e\xad\x98\xcb\xd2\x1b\xcb%I\x07vt^?x\x9a\xbc\xce\xd9\x8a0\xd6em\x0e\x83\x98y?X\xab=\x8c}\xf6#\x9f\xfdM\x9d\x8a%\x1c-__>\xbd\'\x98\x06k}\xce\x84\xb5\x19+\xe4y\x8ej\x8cW\xac\xe4\xe3\xf5\xdab\xa5.\xaf\xf3\x89jL!\x8b\x7f\xa8\xb6t\xebP\xff\x88:\xff\xad/\xe8\\\xccX\xb2\xa6T\xd5\xcbKttw\xb7\xce\x9d\xec\xcf\xcb\xefH\x149z\xf0\xa9j\xad\xfd\xb3\x9b\x14\x9f\xe1h\xf9C\xe5*\xab\x9a\xa9\xb7\xf6\xbd\xa7\xd1a7\xb41\x1f\x94,\x8dkC\xe3\x87\xd5v\xa8O\xbf\xfa\xeeY\xcf\xfb\x9c\x8f\x9c\xbd`\xad.\xe6\xd9\x9f\x99\xa7\xcf\xe1\xe8\xb3\x9f\xebFg\xcb\xa0V}a\x9e\xca\xabgj\xdf?\xbe\x9bu\xae\xd9\xb0\xd6g?s\x0ec\x8cW}\xb6V\x97\xb5\x18k}\xf6\x8aWkKT\xfb\x1c\xd5\x18H1\x89f\x04\xe1\xae\xcd)\xad|\xacR\x83}\xa3:\xf6R\xb7\xfa\xbaF\xd4\xb0\xa1B\xa9\xc5\xc9P\xc7|P\xfd\x03\xe5J\x149\xda\xdf\xd4\xe1e\xeb\xf2\x9a\xb3\x05a\xac\xcb\xda\x1c\x065\xf3\xf9f\xad\xf60\xf6\xd9\xcf|\x86\x07\xc6txW\x97R\x8b\x93\xaaZV\xecu)W\xb1\xd6g?s\x0ec\x0c=,\xcc>{\xc5\xab\xb5\xc5Z]\xd6\xe6\xc7Z\x0c\xa4\x84\xebJr\xd9\xbc\xfam^\xed,I\xd2\x91\x17\xbb\xf5\xc2\x93\xad\x8a\xc5\x1d\xa5\xeaf\xa9\xfd\xf5\xfeP\xc7|\xd0\x8a\x8d\xe5\x1a\x1e\x18\xd3\x91]\xdd\xd3o\xd4\x14\xf2\x91\xb3\x05a\xac\xcb\xda\x1c\x065\xf3\xf9f\xad\xf60\xf6\xd9\xef|\x9a\x9b:u\xe7g+\x95\xdeX\xa1\xb6\xc3\xf9\xbb\xb3\xc9Z\x9f\xfd\xcc9\x8c1^\xb1V\x97\xb5\x18\xafX\xcbG\xf2fm\xb1V\x97\xb5\xf9\xb1\x16\x03)\xbe\xe0\xf2\xad\xc2\xefp\xab\xf0e~\xdc"\x15\x9f\xe1\xe8\x8e\xfb\xcbTU_\xac\xdb\xd7\xce\xd5\xe8\xb0\xab7^\xe9\xb9\xea\x02x\x18c.\x9b\xbf$\xa9{\xbf\xb6P\x87wv\xe9\xd0\xce\xae\t{\x90k\x9f\xbd\xce\xd9+\xd6\xeab\x9e\xfd\x99y\xfa\x1c\x8e>\xfb\xbdn\xf4\xb4\x0fi\xd9g\xcaT\xbd\xbcD{\x9f=\xaf\xb1\xd1\xfc,>\xd6\xfa\xecg\xcea\x8c\xb9\xcc\xca<G5\xc6Z\x9f\xbd\xe4\xc5\xda\x12\xd5>G5\x06l\\\'\xe4\xc7?\xd8\xe7N\r\xa8\xfdD\xbfj\xd2%\xaa\xa8\x99\xa5[\xef\xb9Q\xb7\xdc]\xaac/_\xd0p\xffXhc.[\xf3\xf8\x02U7\x94h\xd7\xb66u\xb5\r\xe5\xa5\xcf^\xe7\xec\x15ku1\xcf\xfe\xcc<}\x0eG\x9f\x83X7\x8af\xc7t\xeb=7\xea\xec\x1b\xfd\xe6g\xe32\xe6\xb9\xb0\xe69\xaa1\xd6\xfa\xec\xb5\\\xd7\x96\xa8\xf69\xaa1`\xe3:!?\xfe\xc1\x96\xa4\xf3o\x0e\xe87?8\xaf\x8e\xd3\x83\xaa\xaa/\xd6\xbc\xda\xa4\xe4J\xa7\xf6^\x0cuL,\xeeh\xd335\xea\xeb\x1a\xd5\xcemm\x93\xd6\xefE\x9f\xbd\xca\xd9K\xd6\xeab\x9e\xf3\x1f#\xd1\xe70\xf5\xd9\xefu\xa3\xabmPwmN)yC\\\x07\x7f2\xf1\x1d(^\xb0\xd6g?s\x0ec\x8cdk\x9e\xa3\x1ac\xad\xcf^\xf2bm\x89j\x9f\xa3\x1aS\xe8\xf88\x9c\x80\xdc\xf9h\xa5\x1ey\xbaF\xf3\x97$ulO\xb7\x8e\xef\xb9 i\xfc\x9dx\xc3\x1c#IK\xef.UI\xc5\x0c\x1d\xd8\xd1\x91\xd7[\x1c\xbc\xcc\xd9\x920\xd6em\x0e\xfd\x9ey\xbfX\xab=\x8c}\x0e"\x9f\x8b\xe7\x86u\xf2\xd5\x8bZ\xbc\xbaT\xa5\x0b\x8ar/b\x02\xd6\xfa\xecg\xcea\x8c\xf1\x8a\xb5\xba\xac\xc5x\xc5Z>\x97\xe5\xba\xb6X\xab\xcb\xda\xfcX\x8b\x81\x94p\x1cI\x8e\x13t\x1e\x85\xc5\x91V~\xaeR\x0b?:[+6U\\\xf9\xf2\xe8\xb0\xab\xe6\xa6\xce\xf0\xc6\\\xb2bc\x85\\Wj\xce\xe3g\xb7z\x9d\xb3\x19a\xac\xcb\xda\x1c\x060\xf3\xbe\xb0V{\x18\xfb\x1c`>\xcdM\x1dZ\xfa\xc9R5<\\\xae_|\xab}\xda\xcf3!k}\xce\x84\xb5\x19+\xe4y\x8ej\x8cW\xac\xe5\xf3\x07\xa6\xbd\xb6X\xab\xcb\xda\xfcX\x8b\x81$)^s\xcb\xbc\xad\xee\xb0\xa3s\xad=A\xe7b\x86\x1f\xb7H\x1d\xd8\xd1\xa9\x9e\xf6!%f\xc6T^=S\x07\x9f\xefT\xd3W[t\xe6H_\xa8c\x8a\xcb\x12Z\xbf\xbdZ\xbf\x7f\xadW{\x9f=7e\x0fr\xed\xb3W9{\xcdZ]\xccs~c.\xa3\xcf\xe1\xe8sP\xebF\xe7\xdb\x83\xfa\xc4\xe7\xe7\xa9\xf2\xe6Y\xfa\xf5\xf7\xcfO\xfby&c\xad\xcf~\xe6\x1c\xc6\x98\xcb\xac\xccsTc\xac\xf59\x1frY[\xa2\xda\xe7\xa8\xc6@R,\x1es\xff\xdf\xff\xf6_]\x8d\xdf\xd4\xc9Cr\xd7m\xa9r\x1b[\xd3\xee\xb2uey\xff]\xb5\xab\xe6\xb8\x8d\xadi7U\x97\x8cD\xcc\xea\xcd)\xb7\xb15\xed6l(\xf7\xad\xcf^\xd5em~\xbc\xaa\x8by\xf6\'\x86>\x87\xab\xcfA\xac\x1b\x0f>\xb5\xc8mlM\xbb5\xe9\x12\xf3\xb3\xc1<\x17\xe6<G5\xc6Z\x9f\xbd~\xe4\xba\xb6D\xb5\xcfQ\x8d)\xe4G\xec\x8f\x1fX\xa6\xea\x85\x1f\x11\x82128\xa6\xee3\x83\x1a\x1bq#\x11\x93\xaaK\xaae\x7f\xaf\x8e\xee\xce\xcfg\xb7f\x9bO61\xd6\x84\xb1.+s\x18D\x8c\x9f\xac\xd5\x1e\xc6>\x07\x91\xcfk?\xeaP\xeb\xc1>-\xfaX\xb1\'\xcf7\x11k}\xce\x84\xb5\x19+\xe4y\x8ej\x8cW\xac\xe5sY\xaek\x8b\xb5\xba\xac\xcd\x8f\xb5\x98\x82\xf6\xe8\xe3k\xdd\xef|\xe7;\x81\xef\xa0-=\xfc\xfc/\xcd\x85\xfc\x88j\x9f\xad\xd5e-\x9f\xa8>\xe83}\x8e\xd2\x83>\xd3\xe7(=\xe83}\xe6\x11\x8dG\xac\x7f\xa0Occ|>\xd0\x07\x9d=\xd1\xaf\xe3/_P\xcf\xd9\x89?\x7f\x14\xde\x88j\x9f\xad\xd5e-\x9f\xa8\xa2\xcf\xfe\xa0\xcf\xfe\xa0\xcf\xfe\xa0\xcf\xfe\xa0\xcf\xfe\xa0\xcf\xc87g\xf5\x83\xb7\xb8\x9b\xee\xf9\xb2\xbe\xf2\x95\xaf\x04\x9d\x0b\x00\x00\x00\x00\x00\xd7\x88\xbd?\xd0\xa7\xe3\xa7\x0e\x05\x9d\x07\x00\x00\x00\x00\x00\x13\x8aI\xd2\xaf\xfeew\xd0y\x00\x00\x00\x00\x000\xa1\x98$\x8d\x8e\x8e\x04\x9d\x07\x00\x00\x00\x00\x00\x13\x8a\x05\x9d@\xd8|\xf1\x87u\xda~\xbaA7T\xce\x08:\x95\xc0-YS\xaa\xc6\xd6\xb4\xee{b\xe1\xb4~~\xdd\x96*5\xb6\xa6\xb5l]\x99\xc7\x99M.\xd7\x9c3\x11\xa6\xba\x98\xe7\xec\xd0g\x7fL\xd5\xe70\xbd\xbe2\x11\xe4l0\xcf\xd3W\xbbj\x8e\x1a[\xd3J\xd5%s\x8a\tb\x9e\xa3\xaa\x90\xd6\x8d\xcb\xac\xd5\xc5<{\xc7\x8f\xf9\t#6\xaeY\xda\xdf\xd4\xa9X\xc2\xd1\xf2\xf5\xe5A\xa7\x12\xb8\xf4\xc6\xf1\x1e\x1c\xd8\xd1\x19p&\x99\x0bc\xce\x99\x98n]\xccsv\xe8\xb3?\xac\xbdN\xf3\x99O\x90\xb3\xc1<O\x9fs\xe9\xec)\x16\xcf-\x06\xde)\xa4u#HQ\xad\xcb\x1a\xfa<16\xaeY:\xf6R\xb7\x06\xde\x1b\xbd2P\x85*Y\x1a\xd7m\x9f\x9e\xab\xb7\x0f\xf4\xea\xdd\xb7\x06\x82N\'#a\xcc9\x13\xb9\xd4\xc5<g\x8e>\xfb\xc3\xda\xeb4\xdf\xf9\x045\x1b\xcc\xf3\xf4$\x8a\x1c\xad\xffF\xb5\xbe\xf0\xbdZI\xd2\x97~\xbcT\x0fo\xafVQq,\xab\x18x\xab\xd0\xd6\x8d\xa0D\xb5.k\xe8\xf3\xe4XE\xb34<0\xa6\xc3\xbb\xba\x94Z\x9cT\xd5\xb2\xe2\xa0\xd3\tL\xfd\x03\xe5J\x149\xda\xdf\xd4\x11t*\x19\x0bc\xce\x99\xc8\xa5.\xe69s\xf4\xd9\x1f\xd6^\xa7\xf9\xce\'\xa8\xd9`\x9e\xa7\xe7\xae\xcd)\xad|\xacR\x83}\xa3:\xf6R\xb7\xfa\xbaF\xd4\xb0\xa1B\xa9\xc5\xc9\xacb\xe0\xadB[7\x82\x12\xd5\xba\xac\xa1\xcf\x93c\xe3:\r\xcdM\xe3\x97\xed\xd3\x1b+\x02\xce$8+6\x96kx`LGvu\x07\x9dJ\xc6\xc2\x98s&r\xad\x8by\xce\x0c}\xf6\x87\xb5\xd7\xa9\x1f\xf9\x041\x1b\xcc\xf3\xf4\xcc\xab\x9d%I:\xf2b\xb7\xfe\xf1?\xbf\xa5\xc6?>\xa6\xbf_\xf7;\xb5\x1d\xea\xcb*\x06\xde*\xc4u#\x08Q\xad\xcb\x1a\xfa<96\xae\xd3\xf0\xf6\xc1^\x9d\x7fk@\xf5\x0f\x94)1\xb3\xf0Z8\x7fIR7\xddQ\xac\xa3\xbb\xbb5\xd0;\x1at:\x19\tc\xce\x99\xf0\xa2\xaeB\x9f\xe7L\xd0g\x7fX{\x9d\xfa\x95\x8f\xdf\xb3\xc1<O\xdf\x89_\xf4H\x92\xfe\xe8\x0b\xf3\xf4_\x7fz\x9bVl\xaa\xd0\xd97\xfa\xb3\x8e\x81w\nu\xdd\xf0[T\xeb\xb2\x86>O-\xe6\xbaA\xa7\x10N\xcdM\x1dJ\xce\x89\xeb\xb6O\xdf\x18t*\xbeK?2\xfe_\xd8\x9bCt\x0bC\x18s\xce\x84Wu\x15\xf2<g\x82>\xfb\xc3\xda\xeb\xd4\xcf|\xfc\x9c\r\xe6y\xfa\x8e\xfc\xef\xf1\xab\xa8=\xedC\x9a\xbf$\xa9\x87\xb7W\xeb\xff\xd9y\x8b\x8a\xcb\x13Y\xc5\xc0;\x85\xbcn\xf8)\xaauYC\x9f\xa7V8\xff\x99\xd4c\x07\x9f\xef\xd4\xd8\xa8\xab\x15\x05\xf6\xe6\x14\xb1\xb8\xa3\xe5\xeb\xcb\xd4}fHo\xee{/\xe8t2\x12\xc6\x9c3\xe1e]\x85:\xcf\x99\xa0\xcf\xfe\xb0\xf6:\xf5;\x1f\xbff\x83y\xce\xdd\xb1\x97\xba\xb5\xfd\xae\xa3\xfa\xd1\x7f\xf9\xbdz\xda\x87t\xd3\x1d\xc5\xfa\xe3/\xce\xcf:\x06\xb9+\xf4u\xc3/Q\xad\xcb\x1a\xfa|}l\\\xa7\xe9\xe2\xb9a\x9d|\xf5\xa2\x16\xaf.U\xe9\x82\xa2\xa0\xd3\xf1\xcd\xd2\xbbKUR1C\x07vtH!\xb9Z\x1f\xc6\x9c3\xe1e]\x85:\xcf\x99\xa0\xcf\xfe\xb0\xf6:\xf5;\x1f\xbff\x83y\xce\xcd\x9d\x8fV\xea\x91\xa7k4\x7fIR\xc7\xf6t\xeb\xf8\x9e\x0b\x92\xc6\xdf\x054\x9b\x18x\xa3\xd0\xd7\r\xbfD\xb5.k\xe8\xf3\xf5\xb1q\xcdAsS\x87\x9c\x98\xd4\xf0p\xe1\xfc\xd7\xe6\x15\x1b+\xe4\xbaRs\x88>W*\x8c9g\xc2\xeb\xba\nq\x9e3A\x9f\xfda\xedu\x1aD>~\xcc\x06\xf3\x9c\x03GZ\xf9\xb9J\xad\xd8T\xa1\xff\xb2\xfbV\xfd\xf5\xeb\xcb\xf5G\xffq\x9eF\x87\xdd+oV\x95Q\x0c<\xc3\xba\xe1\x8f\xa8\xd6e\r}\xbe>6\xae9\xf8\xddO/\xe8\xfd\x0b#J?R\x00\xff`K*.Kh\xe9\xa7Ju\xfa\xb7\xef\xa9\xabu0\xe8t2\x12\xc6\x9c3\x91\x8f\xba\nm\x9e3A\x9f\xfda\xedu\x1aT>\xf9\x9e\r\xe69G\xae\xf4\xed\x87N\xe8\xf9\xaf\xbf\xadS\xff\xe7\xa2\xa4\xf1\xdb\xa5\xffn\xdd\xebz\xfb`o\xe61\xf0\x04\xeb\x86?\xa2Z\x975\xf493l\\s02\xe4\xea\xd0?w\xa9\xe2\xc3\xb3T\x93.\t:\x9d\xbc[\xfeP\xb9\xe2\tG\xcd\xff\x14\x9e?\x18\x0fc\xce\x99\xc8G]\x856\xcf\x99\xa0\xcf\xfe\xb0\xf6:\r*\x9f|\xcf\x06\xf3\x9c\xbb\x91\xc11\xfd\xf6\xb9w\xf5\xcb\xef\x9c\x95$\xfd\xf2;g\xd5\xfe\xbb\xf7\xb3\x8eA\xeeX7\xfc\x11\xd5\xba\xac\xa1\xcf\x99\xe1-\xeer\xf4\xda\x8f:t\xd3\xed\xc5Z\xf4\xb1b\xb54\xf7j\xee\xc2"}}\xdf\x1d\x13\xc6\xf6u\x8dh[\xfd\xa1P\xc6HR\xaa.\xa9\x96\xfd\xbd:\xba;\xbf\x9f+\x15\xc6\x9c3\x11\x86\xba\ne\x9e\xe9\xb3\x9d\x18\xc9\x9b>[\xcbg\xba\xf25\x1bR\xe1\xces>\x8c\x0c\x8e\xa9\xfb\xcc\xa0\xc6F&\xffC\xb4Lb&c\xad\x87\xd6b$\xd6\x8d\xa9X\xab\xcb\xda\xfcX\x8b\x91l\xcd\x8fe\t\xd7u\xf9\xfb\xdf\x1c\xbcs\xfc}}\xeb\xc1\xd7\xaf\xfc\xff\xc1\xf7\xc7&\xfd\xaf%C}c\xa1\x8d\x91\xa4\x1dO\xb4L\x18\xe3\xb50\xe6\x9c\x890\xd4U(\xf3L\x9f\xed\xc4H\xde\xf4\xd9Z>\xd3\x95\xaf\xd9\x90\nw\x9e\xf3\xa1\xa5\xb9W\xdf\xfc\xa3\xa39\xc7L\xc6Z\x0f\xad\xc5H\xac\x1bS\xb1V\x97\xb5\xf9\xb1\x16#\xd9\x9a\x1f\xcb\x9c\xfa\x7f\xb7\xd0\xed\xbb0\xacS\xfb\xcf\x07\x9d\x0b\n\xcc\x8aM\x15\xba\xf5\x9e\x1b\xf5\xca?\x9cU\xcb\xfe\xe8\xfc\xedOT\xeb\x02,\xe0\xf5\x85(a\x9e\xfd\x11\xd5>[\xab\xcbZ>\x88\x1e\xa7\xfe\xdf\xdd\xe4\xf6]\x18b\xe3\n\x00\x00\x00\x000)\x16\x8f\xc7\xe4\x04\x9d\x05\x00\x00\x00\x00\x00\x93\xe0]\x85\x01\x00\x00\x00\x00\xa6\xc5\\\xde\x99\t\x00\x00\x00\x00`\x18W\\\x01\x00\xc0\xb4-YS\xaa\xc6\xd6\xb4\xee{b\xe15\xdf\xab]5G\x8d\xadi\xa5\xea\x92\x93\xfe|&1^Y\xb7\xa5J\x8d\xadi-[W\x96\xd3\xf3X\xab\x0b\x00\n\x01\x1bW\x00\x000m\xe9\x8d\xe5\x92\xa4\x03;:\xaf\xf9\x9es\xe9,#\x16\x9f\xfc\xe73\x89\xb1&\xaau\x01\x80el\\\x01\x00\xc0\xb4$K\xe3\xba\xed\xd3s\xf5\xf6\x81^\xbd\xfb\xd6\xc0\x95\xaf\'\x8a\x1c\xad\xffF\xb5\xbe\xf0\xbdZI\xd2\x97~\xbcT\x0fo\xafVQq,\xab\x18k\xa2Z\x17\x00\x84\x01\xab(\x00\x00\x98\x96\xfa\x07\xca\x95(r\xb4\xbf\xa9\xe3\xaa\xaf\xdf\xb59\xa5\x95\x8fUj\xb0oT\xc7^\xeaV_\xd7\x88\x1a6T(\xb58\x99U\x8c5Q\xad\x0b\x00\xc2 \x11t\x02\x00\x00 \x9cVl,\xd7\xf0\xc0\x98\x8e\xec\xea\xbe\xea\xeb\xf3jgI\x92\x8e\xbc\xd8\xad\x17\x9elU,\xee(U7K\xed\xaf\xf7g\x15cMT\xeb\x02\x800\x88/\xa8-\xdd:\xd4?\xa2\xcew\xfa\x82\xce\x05\x00\x00\x84\xc4\xfc%I\xdd\xfb\xb5\x85:\xbc\xb3K\x87vv]\xf5\xbd\xf8\x0cGw\xdc_\xa6\xaa\xfab\xdd\xbev\xaeF\x87]\xbd\xf1J\x8f\xe4f\x17\xe3\xb5%kJU\xbd\xbcDGww\xeb\xdc\xc9\xec7\x92V\xeb\x02\x80B\xc0\xc6\x15\x00\x00dm\xcd\xe3\x0bT\xddP\xa2]\xdb\xda\xd4\xd56t\xd5\xf7\xce\x9d\x1aP\xfb\x89~\xd5\xa4KTQ3K\xb7\xdes\xa3n\xb9\xbbT\xc7^\xbe\xa0\xe1\xfe\xb1\x8cc\xbc\x96\xeb\xc6\xd5j]\x00P\x08\xd8\xb8\x02\x00\x80\xac\xc4\xe2\x8e6=S\xa3\xbe\xaeQ\xed\xdc\xd66a\xcc\xf97\x07\xf4\x9b\x1f\x9cW\xc7\xe9AU\xd5\x17k^mRr\xa5S{/f\x15\xe3\xa5\\7\xae\x92\xcd\xba\x00\xa0\x10\xc4\xee\xbc\xf96\xfd\xdf\xcbV\x05\x9d\x07\x00\x00\x08\x89\xa5w\x97\xaa\xa4b\x86\x0e\xec\xe8\x98\xf0\x16\xd8;\x1f\xad\xd4#O\xd7h\xfe\x92\xa4\x8e\xed\xe9\xd6\xf1=\x17$\x8d\xbf\x0bq61\xd6D\xb5.\x00\x08\x83\xc4\xad\x0bo\xd6\x80;\'\xe8<\x00\x00@H\xac\xd8X!\xd7\x95\x9a\'\xf8\xecV9\xd2\xca\xcfUj\xe1Ggk\xc5\xa6\x8a+_\x1e\x1dv\xd5\xdc\xd4\x99y\x8c5Q\xad\x0b\x00B"\xfe\xef?u\xe7\xd6\xf7\xfb\x06\xf5\x8b\xbd\xfb\x83\xce\x05\x00\x00\x18W\\\x96\xd0\xfa\xed\xd5\xfa\xfdk\xbd\xda\xfb\xec\xb9\tc\x0e\xec\xe8TO\xfb\x90\x123c*\xaf\x9e\xa9\x83\xcfw\xaa\xe9\xab-:s\xa4/\xab\x18\xaf\xe5z\xab\xb0\xd5\xba\x00\xa0\x10\xf0q8\x00\x00 c\xcb\x1f*W<\xe1\xa8\xf9\x9f:&\x8d\x19\x19\x1c\xd3o\x9f{W\x1d-\x83Z\xbcz\x8e~\xf9\x9d\xb3\xd7l\x143\x89\xb1&\xaau\x01@\x18\xc4\x82N\x00\x00\x00\x84G\xaa.\xa9\x96\xfd\xbd:\xba\xbb\xfb\xba\xb1#\x83c\xea>3\xa8\xb1\x91\xc9?\x0b&\x93\x18k\xa2Z\x17\x00X\xe6|\xebo\xbe\xe2\xf6\xf4\xba\xfa\xf3o~;\xe8\\\x00\x00\x00\xf2f\xdd\x96*\xad\xde\x9c\xd2s_>\xad\xc3\xbb\xba\xae\xff\x03\x00\x003\xb8\xe2\n\x00\x00\n\xc2\xd9\x13\xfd:\xfe\xf2\x05\xf5\x9c\x1d\xba~0\x00\xc0\x94\xf1\xbfqu\x02\xce\x02\x00\x00 \xcf\xf6\xff\xaf\x0e\xed\xff_\x93\xffm.\x00\xc0\xae\x98+\xc9a\xe3\n\x00\x00\x00\x000\x8a[\x85\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\\e\xdd\x96*5\xb6\xa6\xb5l]Y\xde\x7fW\xed\xaa9jlM+U\x97\x8cT\xcceK\xd6\x94\xaa\xb15\xad\xfb\x9eXx\xddX\x00\xc0\xe4\xd8\xb8\x02\x00\x80\xc08\x97\xceDb\xf1h\xc5\\\x96\xdeX.I:\xb0\xa3\xf3\xfa\xc1\x00\x80I\x8d/\xbdN\x06+/\x00\x00\x80G\x12E\x8e\xd6\x7f\xa3Z_\xf8^\xad$\xe9K?^\xaa\x87\xb7W\xab\xa88\x16\xea\x98\x0fJ\x96\xc6u\xdb\xa7\xe7\xea\xed\x03\xbdz\xf7\xad\x81\xe97\x0b\x00\xc0\x15W\x00\x00\xe0\xbf\xbb6\xa7\xb4\xf2\xb1J\r\xf6\x8d\xea\xd8K\xdd\xea\xeb\x1aQ\xc3\x86\n\xa5\x16\'C\x1d\xf3A\xf5\x0f\x94+Q\xe4h\x7f\x13\x1f\xc1\x03\x00\xb9J8\xe2\xe3p\x00\x00\x80\xbf\xe6\xd5\xce\x92$\x1dy\xb1[/<\xd9\xaaX\xdcQ\xaan\x96\xda_\xef\x0fu\xcc\x07\xad\xd8X\xae\xe1\x811\x1d\xd9\xd5=\xfdF\x01\x00$I\xf1\xb5\x9f\xfa\xf8\xd6\xa1\xd1\xb8~\xfe\xcao\x83\xce\x05\x00\x00\x18\xb0dM\xa9\xaa\x97\x97\xe8\xe8\xeen\x9d;9\xf1\xa6,W\xf1\x19\x8e\xee\xb8\xbfLU\xf5\xc5\xba}\xed\\\x8d\x0e\xbbz\xe3\x95\x1e\xc9\rw\xcce\xf3\x97$u\xef\xd7\x16\xea\xf0\xce.\x1d\xda\xd9\xe5]\xe3\x00\xa0@\xc5\xd7~\xea\xe3[\x87Gc\xfa\x19\x1bW\x00\x00 \x7f6\xae\xe7N\r\xa8\xfdD\xbfj\xd2%\xaa\xa8\x99\xa5[\xef\xb9Q\xb7\xdc]\xaac/_\xd0p\xffXhc.[\xf3\xf8\x02U7\x94h\xd7\xb66u\xb5\r\xe5\xa5\x87\x00PH\xc6\xaf\xb8\x8e%\xf4\xb3W\xf6\x05\x9d\x0b\x00\x000\xc0\x8f\x8d\xab$\x9d\x7fs@\xbf\xf9\xc1yu\x9c\x1eTU}\xb1\xe6\xd5&%W:\xb5\xf7b\xa8cbqG\x9b\x9e\xa9Q_\xd7\xa8vnk\xcbW\xfb\x00\xa0\xa0\xc4$\x89?q\x05\x00\x00~\xba\xf3\xd1J=\xf2t\x8d\xe6/I\xea\xd8\x9en\x1d\xdfsA\xd2\xf8;\xf1\x869F\x92\x96\xde]\xaa\x92\x8a\x19:\xb0\xa3c\xc2\xdb\x88\x01\x00\xd9KH\xe2\xdd\x99\x00\x00\x80\x7f\x1ci\xe5\xe7*\xb5\xf0\xa3\xb3\xb5bS\xc5\x95/\x8f\x0e\xbbjn\xea\x0co\xcc%+6V\xc8u\xa5f>\xbb\x15\x00<\x13_\xfb\xa9\x8fo\x1dqg\xe8\xa7\xbf\xfaM\xd0\xb9\x00\x00\x00\x03\xfc\xb8U\xf8\xc0\x8eN\xf5\xb4\x0f)13\xa6\xf2\xea\x99:\xf8|\xa7\x9a\xbe\xda\xa23G\xfaB\x1dS\\\x96\xd0\xfa\xed\xd5\xfa\xfdk\xbd\xda\xfb\xec\xb9\xbc\xf4\x0e\x00\n\xd1\xf8\x15Wn\x16\x06\x00\x00>\x1a\x19\x1c\xd3o\x9f{W\x1d-\x83Z\xbcz\x8e~\xf9\x9d\xb3\xd7l\x92\xc3\x18\xb3\xfc\xa1r\xc5\x13\x8e\x9a\xff\x89\xcfn\x05\x00/\xc5$\xfe\xfc\x02\x00\x00\x04cdpL\xddg\x06562\xf9\xd9H\x98bRuI\xb5\xec\xef\xd5\xd1\xdd|v+\x00x\xc9\xf9\xdb\xbf\xfa\x13\xf7\xbd~G\x7f\xf9\xcd\xef\x06\x9d\x0b\x00\x000`\xdd\x96*\xad\xde\x9c\xd2s_>\xad\xc3\xbb\xf8\x0cR\x00@\xf0\x12\xe7{\xba\xe5\x0c\xc7\xaf\x1f\t\x00\x00\n\xc2\xd9\x13\xfd:\xfe\xf2\x05\xf5\x9c\xe5\xf3G\x01\x0068\xab\xee\xadso\x9f_\xa7\xff\xef\x07/\x06\x9d\x0b\x00\x00\x00\x00\x00\xd7\x88]\xe8\xef\xd5K\xcd\xfb\x82\xce\x03\x00\x00\x00\x00\x80\t\xc5f\x14\xc54cf,\xe8<\x00\x00\x00\x00\x00\x98P\xcc\x89\xc5\xe4\xc4\xf98\x1c\x00\x00\x00\x00\x80M1\xc7\xe1S\\\x01\x00@f\xbe\xf8\xc3:m?\xdd\xa0\x1b*g\x04\x9dJ\xe0\x96\xac)UckZ\xf7=\xb10\xe8T\x00 \xf2\xb8G\x18\x00\x00dl\x7fS\xa7b\tG\xcb\xd7\x97\x07\x9dJ\xe0\xd2\x1b\xc7{p`Gg\xc0\x99\x00@\xf4\xc5\\W\x1a\x9b\xfc3\xb6\x01\x00\x00\xae8\xf6R\xb7\x06\xde\x1b\xbd\xb2i+T\xc9\xd2\xb8n\xfb\xf4\\\xbd}\xa0W\xef\xbe5\x10t:\x00\x10y\xe3W\\\xb9W\x18\x00\x00d`x`L\x87wu)\xb58\xa9\xaae\xc5A\xa7\x13\x98\xfa\x07\xca\x95(r\xb4\xbf\xa9#\xe8T\x00\xa0 \xc4$\xc9a\xe7\n\x00\x002\xd4\xdc4~klzcE\xc0\x99\x04g\xc5\xc6r\r\x0f\x8c\xe9\xc8\xae\xee\xa0S\x01\x80\x82\xc0\xdf\xb8\x02\x00\x80\xac\xbc}\xb0W\xe7\xdf\x1aP\xfd\x03eJ\x14\xe0G\xea\xcd_\x92\xd4Mw\x14\xeb\xe8\xeen\r\xf4\x8e\x06\x9d\x0e\x00\x14\x84\x98$\xb9\xe2\x8f\\\x01\x00@\xe6\x9a\x9b:\x94\x9c\x13\xd7m\x9f\xbe1\xe8T|\x97~d\xfcJs3\xb7\t\x03\x80o\n\xef?\x93\x02\x00\x80\x9c\x1d|\xbeSc\xa3\xaeV\x14\xd8\x9b4\xc5\xe2\x8e\x96\xaf/S\xf7\x99!\xbd\xb9\xef\xbd\xa0\xd3\x01\x80\x821\xbeq\xe5m\x85\x01\x00@\x16.\x9e\x1b\xd6\xc9W/j\xf1\xeaR\x95.(\n:\x1d\xdf,\xbd\xbbT%\x153t`G\x87\xb8a\r\x00\xfc\x13\xfb\x9f\x7f\xfe}}\xbc\xfe\xff\n:\x0f\x00\x00\x102\xcdM\x1drbR\xc3\xc3\x85s\xd5u\xc5\xc6\n\xb9\xae\xd4\xccg\xb7\x02\x80\xafb\xf1XB\x1a\x98\x19t\x1e\x00\x00 d~\xf7\xd3\x0bz\xff\xc2\x88\xd2\x8f\x14\xc6\xc6\xb5\xb8,\xa1\xa5\x9f*\xd5\xe9\xdf\xbe\xa7\xae\xd6\xc1\xa0\xd3\x01\x80\x82\x12\x93\xa4\xe6\xfd\xcdA\xe7\x01\x00\x00Bfd\xc8\xd5\xa1\x7f\xeeR\xc5\x87g\xa9&]\x12t:y\xb7\xfc\xa1r\xc5\x13\x8e\x9a\xff\x897e\x02\x00\xbf%\x82N\x00\x00\x00\x84\xd7k?\xea\xd0M\xb7\x17k\xd1\xc7\x8a\xd5\xd2\xdc\xab\xb9\x0b\x8b\xf4\xf5}wL\x18\xdb\xd75\xa2m\xf5\x87B\x19#I\xa9\xba\xa4Z\xf6\xf7\xea\xe8n>\xbb\x15\x00\xfc\xc6\xc6\x15\x00\x00L\xdb;\xc7\xdf\xd7\xb7\x1e|\xfd\xca\xff\x1f|\x7fl\xd2+\x92C}c\xa1\x8d\x91\xa4\x1dO\xb4L\x18\x03\x00\xc8?\xe7\xd5\xff\xf3\xaa\xfb\x9f6\xff\'\x9d<y2\xe8\\\x00\x00\x00\x00\x00\xb8\x06\x9f\xe3\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc04\xe7c\x9f^\xe4\x1e\xfeY\x9b\xc6\xc6\xf8\x14m\x00\x00\x00\x00\x80=1\xc7\x91j\x1b*\x83\xce\x03\x00\x00\x00\x00\x80\tq\xab0\x00\x00\x00\x00\xc0\xb4\x98$q\x930\x00\x00\x00\x00\xc0*\xae\xb8\x02\x00\x00\x00\x00Lc\xe3\n\x00\x00\x00\x000-&IN\xd0Y\x00\x00\x00\x00\x000\t\xae\xb8\x02\x00\x00\x00\x00Lc\xe3\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc046\xae\x00\x00\x00\x00\x00\xd3\xd8\xb8\x02\x00\x00\x00\x00Lc\xe3\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc046\xae\x00\x00\x00\x00\x00\xd3\xd8\xb8\x02\x00\x00\x00\x00Lc\xe3\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc0\xb4\x98\xe3\x04\x9d\x02\x00\x00\x00\x00\x00\x93\x8b\xb9n\xd0)\x00\x00\x00\x00\x0009n\x15\x06\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80i\xbc9\x13\x00\x00\x00\x00\xc0\xb4\xf1+\xae\xec^\x01\x00\x00\x00\x00F\xf1\xae\xc2\x00\x00\x00\x00\x00\xd3\xc6\xaf\xb8\xb2{\x05\x00\x00\x00\x00\x18\xc5\x9b3\x01\x00\x00\x00\x00L\x8b\xb9\\m\x05\x00\x00\x00\x00\x18\xc6\x15W\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xc5\x1c>\n\x07\x00\x00\x00\x00`\x18W\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6\xb1q\x05\x00\x00\x00\x00\x98\x16\x1b\x1b\x1b\x93\x1bt\x16\x00\x00\x00\x00\x00L\x82+\xae\x00\x00\x00\x00\x00\xd3\xd8\xb8\x02\x00\x00\x00\x00L\x8b9\x8e\x13t\x0e\x00\x00\x00\x00\x00L\x8a+\xae\x00\x00\x00\x00\x00\xd3b+?\xf2Q\xad]\xb6*\xe8<\x00\x00\x00\x00\x00\x98P\xe2\x96\x857k\xd0\x9d\x13t\x1e\x00\x00\x00\x00\x00L(\x16s\x1c\r\xbd\x7f1\xe8<\x00\x00\x00\x00\x00\x98\x10\x7f\xe3\n\x00\x00\x00\x000\x8d\x8d+\x00\x00\x00\x00\xc046\xae\x00\x00\x00\x00\x00\xd3\xd8\xb8\x02\x00\x00\x00\x00L\x1b\xdf\xb8:\x01g\x01\x00\x00\x00\x00\xc0$b\xae$\x87\x8d+\x00\x00\x00\x00\xc0(n\x15\x06\x00\x00\x00\x00\x98\xc6\xc6\x15\x00\x00\x00\x00`\x1a\x1bW\x00\x00\x00\x00\x80il\\\x01\x00\x00\x00\x00\xa6]zW\xe1x\xc0i\x00\x00\x00\x00\x0001\xae\xb8\x02\x00\x00\x00\x00L\x8b9\xe2\xe3p\x00\x00\x00\x00\x00v]\xbaU\x98\x0b\xaf\x00\x00\x00\x00\x00\x9bb\x92\xc4\x05W\x00\x00\x00\x00\x80U\\q\x05\x00\x00\x00\x00\x98\xc6\x15W\x00\x00\x00\x00\x80i\x97\xae\xb8\xb2u\x05\x00\x00\x00\x00\xd84~\xc5\x95\x8d+\x00\x00\x00\x00\xc0\xa8K\x7f\xdc\xca\xc6\x15\x00\x00\x00\x00`SL\x92\xdc\xa0\xb3\x00\x00\x00\x00\x00`\x12\xb1\x91\xd1\x11\r\r\xf4\x06\x9d\x07\x00\x00\x00\x00\x0e\xb6\xe4l\x00\x00\x00^IDAT\x00\x13J\x9c\xef\xe9\x963\x1c\x0f:\x0f\x00\x00\x00\x00\x00&\x14\xfb\xe5\x91\x03\xea\xec\xb9\x18t\x1e\x00\x00\x00\x00\x00L\xc8\xa9\xfbD\xa5\xdb\xd7=\xa4\x7f;\xd1\x13t.\x00\x00\x00\x00\x00\\#V43\xaeDQ,\xe8<\x00\x00\x00\x00\x00\x98P,1#\xa1D\x82\xbfq\x05\x00\x00\x00\x00\xd8\xf4\xff\x03\x93\xe2\xa96\x87\xa16\xfb\x00\x00\x00\x00IEND\xaeB`\x82'
| 24,647
| 49,293
| 0.736276
| 11,361
| 49,294
| 3.189156
| 0.157557
| 0.107308
| 0.102837
| 0.068889
| 0.368155
| 0.330481
| 0.290793
| 0.255548
| 0.226236
| 0.205481
| 0
| 0.229512
| 0.000669
| 49,294
| 1
| 49,294
| 49,294
| 0.505999
| 0
| 0
| 0
| 0
| 12
| 0.724328
| 0.723901
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
747c68f97d56c2e4371395809af8c95567f1c42e
| 10,460
|
py
|
Python
|
azulejo/test_azulejo.py
|
johnteslade/azulejo
|
3b1a35981360513b21f90d96afff10352b6363e6
|
[
"MIT"
] | 3
|
2015-07-17T09:35:22.000Z
|
2015-11-15T00:13:32.000Z
|
azulejo/test_azulejo.py
|
johnteslade/azulejo
|
3b1a35981360513b21f90d96afff10352b6363e6
|
[
"MIT"
] | 1
|
2015-07-17T09:36:45.000Z
|
2015-07-22T20:20:53.000Z
|
azulejo/test_azulejo.py
|
johnteslade/azulejo
|
3b1a35981360513b21f90d96afff10352b6363e6
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import unittest
from azulejo.azulejo import run
from .test.screen_mocks import SingleTestScreenMock
from .test.screen_mocks import MultipleTestScreenMock
from .test.key_binder import KeyBinderDummy
from .geometry import Geometry
class AzulejoTestBase(unittest.TestCase):
""" Base setup of tests """
@classmethod
def setUpClass(cls):
""" Constructor """
cls.screen = None
cls.keybinding_obj = None
class AzulejoTestSingle(AzulejoTestBase):
""" Test cases for single monitor setup """
def setUp(self):
""" Setup and start azulejo """
self.keybinding_obj = KeyBinderDummy()
self.screen = SingleTestScreenMock()
run(True, self.screen, self.keybinding_obj)
def test_left_side(self):
""" Test the left side moving of windows """
# Trigger a keypress
self.keybinding_obj.action_key('<Ctrl><Super>h')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=0, y=0, width=1000, height=1000)
)
# Trigger another keypress
self.keybinding_obj.action_key('<Ctrl><Super>h')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=0, y=0, width=600, height=1000)
)
# Trigger another keypress
self.keybinding_obj.action_key('<Ctrl><Super>h')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=0, y=0, width=1400, height=1000)
)
def test_maximise(self):
""" Test the maximising of active window """
# Trigger a keypress
self.keybinding_obj.action_key('<Ctrl><Super>1')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=0, y=0, width=2000, height=1000)
)
def test_side_by_side_2(self):
""" Test the side by side 2 windows """
# Trigger a 2 window side by side
self.keybinding_obj.action_key('<Ctrl><Super>2')
self.assertEqual(
self.screen.windows[0]['geometry'],
Geometry(x=0, y=0, width=1000, height=1000)
)
self.assertEqual(
self.screen.windows[1]['geometry'],
Geometry(x=1001, y=0, width=1000, height=1000)
)
def test_side_by_side_3(self):
""" Test the side by side 3 windows """
# Trigger a 3 window side by side
self.keybinding_obj.action_key('<Ctrl><Super>3')
self.assertEqual(
self.screen.windows[0]['geometry'],
Geometry(x=0, y=0, width=1000, height=1000)
)
self.assertEqual(
self.screen.windows[1]['geometry'],
Geometry(x=1001, y=0, width=1000, height=500)
)
self.assertEqual(
self.screen.windows[2]['geometry'],
Geometry(x=1001, y=501, width=1000, height=500)
)
def test_side_by_side_4(self):
""" Test the side by side 4 windows """
# Trigger a 4 window side by side
self.keybinding_obj.action_key('<Ctrl><Super>4')
self.assertEqual(
self.screen.windows[0]['geometry'],
Geometry(x=0, y=0, width=1000, height=500)
)
self.assertEqual(
self.screen.windows[1]['geometry'],
Geometry(x=1001, y=0, width=1000, height=500)
)
self.assertEqual(
self.screen.windows[2]['geometry'],
Geometry(x=0, y=501, width=1000, height=500)
)
self.assertEqual(
self.screen.windows[3]['geometry'],
Geometry(x=1001, y=501, width=1000, height=500)
)
def test_move_window(self):
""" Test the moving of a window on the self.screen """
# Move northwest
self.keybinding_obj.action_key('<Super>KP_7')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=0, y=0, width=50, height=100)
)
# Move southeast
self.keybinding_obj.action_key('<Super>KP_3')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=1950, y=900, width=50, height=100)
)
def test_multiple_window_moves(self):
""" Tests multiple window moves """
# Move side by side
self.keybinding_obj.action_key('<Ctrl><Super>2')
self.assertEqual(
self.screen.get_all_windows()[0]['geometry'],
Geometry(x=0, y=0, width=1000, height=1000)
)
self.assertEqual(
self.screen.get_all_windows()[1]['geometry'],
Geometry(x=1001, y=0, width=1000, height=1000)
)
# Move 4 pain
self.keybinding_obj.action_key('<Ctrl><Super>4')
self.assertEqual(
self.screen.get_all_windows()[0]['geometry'],
Geometry(x=0, y=0, width=1000, height=500)
)
self.assertEqual(
self.screen.get_all_windows()[1]['geometry'],
Geometry(x=1001, y=0, width=1000, height=500)
)
self.assertEqual(
self.screen.get_all_windows()[2]['geometry'],
Geometry(x=0, y=501, width=1000, height=500)
)
self.assertEqual(
self.screen.get_all_windows()[3]['geometry'],
Geometry(x=1001, y=501, width=1000, height=500)
)
def test_multimonitor(self):
"""Test multimonitor does nothing when only one screen."""
# Move left
self.keybinding_obj.action_key('<Ctrl><Super>q')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=50, y=80, width=50, height=100)
)
# Move right
self.keybinding_obj.action_key('<Ctrl><Super>w')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=50, y=80, width=50, height=100)
)
# Move left maximise
self.keybinding_obj.action_key('<Ctrl><Super>a')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=50, y=80, width=50, height=100)
)
# Move right maximise
self.keybinding_obj.action_key('<Ctrl><Super>s')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=50, y=80, width=50, height=100)
)
class AzulejoTestMultiple(AzulejoTestBase):
""" Test cases for multi monitor setup """
def setUp(self):
""" Setup and start azulejo """
self.keybinding_obj = KeyBinderDummy()
self.screen = MultipleTestScreenMock()
run(True, self.screen, self.keybinding_obj)
def test_left_side_multiple(self):
"""Test the left side moving of windows with multiple monitors."""
# Trigger a keypress
self.keybinding_obj.action_key('<Ctrl><Super>h')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=200, y=0, width=100, height=100)
)
def test_move_monitor(self):
""" Test the moving of window to a monitor """
self.assertEqual(self.screen.get_active_window_monitor(), 1)
# Move left
self.keybinding_obj.action_key('<Ctrl><Super>q')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=50, y=10, width=20, height=30)
)
self.assertEqual(self.screen.get_active_window_monitor(), 0)
# Move right
self.keybinding_obj.action_key('<Ctrl><Super>w')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=250, y=10, width=20, height=30)
)
self.assertEqual(self.screen.get_active_window_monitor(), 1)
def test_move_monitor_maximise(self):
""" Test the moving of window to a monitor and maximise """
self.assertEqual(self.screen.get_active_window_monitor(), 1)
# Move left
self.keybinding_obj.action_key('<Ctrl><Super>a')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=0, y=0, width=200, height=100)
)
self.assertEqual(self.screen.get_active_window_monitor(), 0)
# Move right
self.keybinding_obj.action_key('<Ctrl><Super>s')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=200, y=0, width=200, height=100)
)
self.assertEqual(self.screen.get_active_window_monitor(), 1)
def test_move_window_multi_monitor(self):
"""Test moving of a window on the self.screen with multiple monitors"""
# Move northwest
self.keybinding_obj.action_key('<Super>KP_7')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=200, y=0, width=20, height=30)
)
# Move southeast
self.keybinding_obj.action_key('<Super>KP_3')
self.assertEqual(
self.screen.get_active_window()['geometry'],
Geometry(x=380, y=70, width=20, height=30)
)
def test_multiple_window_multi_monitor(self):
""" Tests multiple window moves from multiple monitors """
# Move side by side
self.keybinding_obj.action_key('<Ctrl><Super>2')
self.assertEqual(
self.screen.get_all_windows()[0]['geometry'],
Geometry(x=200, y=0, width=100, height=100)
)
self.assertEqual(
self.screen.get_all_windows()[1]['geometry'],
Geometry(x=301, y=0, width=100, height=100)
)
# Move 4 pain
self.keybinding_obj.action_key('<Ctrl><Super>4')
self.assertEqual(
self.screen.get_all_windows()[0]['geometry'],
Geometry(x=200, y=0, width=100, height=50)
)
self.assertEqual(
self.screen.get_all_windows()[1]['geometry'],
Geometry(x=301, y=0, width=100, height=50)
)
self.assertEqual(
self.screen.get_all_windows()[2]['geometry'],
Geometry(x=200, y=51, width=100, height=50)
)
self.assertEqual(
self.screen.get_all_windows()[3]['geometry'],
Geometry(x=301, y=51, width=100, height=50)
)
| 28.27027
| 79
| 0.58652
| 1,264
| 10,460
| 4.712816
| 0.090981
| 0.083935
| 0.140339
| 0.184657
| 0.847742
| 0.823401
| 0.808629
| 0.801242
| 0.777069
| 0.765318
| 0
| 0.058572
| 0.285086
| 10,460
| 369
| 80
| 28.346883
| 0.738032
| 0.110803
| 0
| 0.603687
| 0
| 0
| 0.068529
| 0
| 0
| 0
| 0
| 0
| 0.202765
| 1
| 0.073733
| false
| 0
| 0.032258
| 0
| 0.119816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7482fbab191be88ef29ea4dd28de4428de877dbf
| 4,318
|
py
|
Python
|
nunchuk.py
|
mlegere1323/WiiNunchukMouse
|
cc4085ba3228e069e8b74c89c82e299d25c29eec
|
[
"MIT"
] | 1
|
2021-12-02T05:20:35.000Z
|
2021-12-02T05:20:35.000Z
|
nunchuk.py
|
mlegere1323/WiiNunchukMouse
|
cc4085ba3228e069e8b74c89c82e299d25c29eec
|
[
"MIT"
] | null | null | null |
nunchuk.py
|
mlegere1323/WiiNunchukMouse
|
cc4085ba3228e069e8b74c89c82e299d25c29eec
|
[
"MIT"
] | null | null | null |
import serial
import time
ser = serial.Serial('/dev/ttyACM0', 9600)
joy_x = 0
joy_y = 0
accel_x = 0
accel_y = 0
accel_z = 0
c_button = 0
z_button = 0
def map_num(x, in_min, in_max, out_min, out_max):
"""Will map a value x, bounded by in_min and in_max,
from out_min to out_max"""
ret_val = (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
return ret_val
def get_input():
while True:
reading = ser.readline()
read_values = reading.split(',')
#Need to check for initial startup weirdnesses, maintain integrity of data
if read_values[0] == 'H' and len(read_values) == 9:
""" JOY STICK """
#(220 - 26 = 97)
joy_x = map_num(int(read_values[1]), 26, 220, -48, 48)
#Custom calibration for center
if joy_x <= 4 and joy_x >= 0:
joy_x = 0
#(230 - 44 = 93)
joy_y = map_num(int(read_values[2]), 44, 230, -46, 46)
#Custom calibration for center
if joy_y >= -3 and joy_y <= 0:
joy_y = 0
""" ACCEL X """
#Want to bound values at 79, and 182, where 79 means
#the
chuk is completely turned left, and 182 means
#it's completely turned right
accel_x = int(read_values[3])
if accel_x < 79:
accel_x = 79
elif accel_x > 182:
accel_x = 182
#map to reasonable values (182 - 79 = 103)
accel_x = map_num(accel_x, 79, 182, -51, 51)
""" ACCEL Y """
#Repeat for accel_y
accel_y = int(read_values[4])
if accel_y < 74:
accel_y = 74
elif accel_y > 183:
accel_y = 183
#map to reasonable values (183 - 74 = 109)
accel_y = map_num(accel_y, 74, 183, -54, 54)
""" ACCEL Z """
accel_z = int(read_values[5])
""" C & Z BUTTONS """
z_button = int(read_values[6])
c_button = int(read_values[7])
return [joy_x, joy_y, accel_x, accel_y, accel_z, c_button, z_button]
def get_input_2():
while True:
reading = ser.readline()
read_values = reading.split(',')
#Need to check for initial startup weirdnesses, maintain integrity of data
if read_values[0] == 'H' and len(read_values) == 9:
""" JOY STICK """
#(220 - 26 = 97)
joy_x = map_num(int(read_values[1]), 26, 220, -48, 48)
#Custom calibration for center
if joy_x <= 4 and joy_x >= 0:
joy_x = 0
#(230 - 44 = 93)
joy_y = map_num(int(read_values[2]), 44, 230, -46, 46)
#Custom calibration for center
if joy_y >= -3 and joy_y <= 0:
joy_y = 0
""" ACCEL X """
#Want to bound values at 79, and 182, where 79 means
#the nunchuk is completely turned left, and 182 means
#it's completely turned right
accel_x = int(read_values[3])
if accel_x < 79:
accel_x = 79
elif accel_x > 182:
accel_x = 182
#map to reasonable values (182 - 79 = 103)
accel_x = map_num(accel_x, 79, 182, -51, 51)
""" ACCEL Y """
#Repeat for accel_y
accel_y = int(read_values[4])
if accel_y < 74:
accel_y = 74
elif accel_y > 183:
accel_y = 183
#map to reasonable values (183 - 74 = 109)
accel_y = map_num(accel_y, 74, 183, -54, 54)
""" ACCEL Z """
accel_z = int(read_values[5])
""" C & Z BUTTONS """
z_button = int(read_values[6])
c_button = int(read_values[7])
return str(joy_x)+","+str(joy_y)+","+str(accel_x)+\
","+str(accel_y)+","+str(accel_z)+","+str(c_button)+\
","+str(z_button)+","
| 32.466165
| 82
| 0.467346
| 567
| 4,318
| 3.345679
| 0.183422
| 0.066421
| 0.095941
| 0.027412
| 0.810754
| 0.810754
| 0.804428
| 0.804428
| 0.804428
| 0.804428
| 0
| 0.094796
| 0.425892
| 4,318
| 132
| 83
| 32.712121
| 0.670432
| 0.172302
| 0
| 0.742857
| 0
| 0
| 0.00697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.028571
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
74a2c655b33aaafb31842fe9fc04aef3969de0fb
| 3,630
|
py
|
Python
|
wg_utils.py
|
eddylin2015/mypy
|
7cba47efa159f3aa2e0c0e8bcd1905b5e63024be
|
[
"MIT"
] | null | null | null |
wg_utils.py
|
eddylin2015/mypy
|
7cba47efa159f3aa2e0c0e8bcd1905b5e63024be
|
[
"MIT"
] | null | null | null |
wg_utils.py
|
eddylin2015/mypy
|
7cba47efa159f3aa2e0c0e8bcd1905b5e63024be
|
[
"MIT"
] | null | null | null |
import sys
import http.client
import urllib
import ssl
def Auth_Login(host, path, param):
ssl._create_default_https_context=ssl._create_unverified_context
headers = {
'Connection': 'keep-alive',
'Content-Length': len(param),
'Cache-Control': 'max-age=0',
'Origin': 'https://' + host,
'Upgrade-Insecure-Requests': 1,
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4',
'Cookie': 'session_id=714b232ee2cbdc32bf66ffff226b7b4388026a8c'
}
conn = http.client.HTTPSConnection(host)
conn.request("POST",path, param, headers)
response = conn.getresponse()
print( response.status, response.reason)
print(response.headers)
session_id=response.headers['set-cookie']
print(session_id)
data = response.read()
print(data)
conn.close()
return session_id
#end auto_login
#HttpGet with Session_id
def HttpGet(host_, path_ , session_id):
ssl._create_default_https_context=ssl._create_unverified_context
headers ={ 'Cookie': session_id }
conn = http.client.HTTPSConnection(host_)
body=""
conn.request("GET",str( path_), body, headers)
response = conn.getresponse()
print(response.status, response.reason)
print(response.headers)
data = response.read()
print(data)
conn.close()
#HttpGet End
class WG:
#Common base class for all Install
wg_login_count=0
def __init__(self, host_):
self.session_id="none"
print(host_)
self.hostname=host_
WG.wg_login_count=WG.wg_login_count+1
def Auth_Login(host, path, param):
ssl._create_default_https_context=ssl._create_unverified_context
headers = {
'Connection': 'keep-alive',
'Content-Length': len(param),
'Cache-Control': 'max-age=0',
'Origin': 'https://' + host,
'Upgrade-Insecure-Requests': 1,
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4',
'Cookie': 'session_id=714b232ee2cbdc32bf66ffff226b7b4388026a8c'
}
conn = http.client.HTTPSConnection(host)
conn.request("POST",path, param, headers)
response = conn.getresponse()
print( response.status, response.reason)
print(response.headers)
self.session_id=response.headers['set-cookie']
print(session_id)
data = response.read()
print(data)
conn.close()
return self.session_id
def Auth_Login(path, param):
return self.Auth_Login(self.hostname,path,param)
def HttpGet(path_):
self.HttpGet(self.host,path_,self.session_id)
def HttpGet(host_, path_ , session_id):
ssl._create_default_https_context=ssl._create_unverified_context
headers ={ 'Cookie': session_id }
conn = http.client.HTTPSConnection(host_)
body=""
conn.request("GET",str( path_), body, headers)
response = conn.getresponse()
print(response.status, response.reason)
print(response.headers)
data = response.read()
print(data)
conn.close()
#HttpGet End
| 36.666667
| 125
| 0.671074
| 476
| 3,630
| 4.968487
| 0.241597
| 0.057082
| 0.027061
| 0.035518
| 0.860042
| 0.860042
| 0.860042
| 0.860042
| 0.860042
| 0.860042
| 0
| 0.040513
| 0.184022
| 3,630
| 98
| 126
| 37.040816
| 0.757934
| 0.025344
| 0
| 0.769231
| 0
| 0.065934
| 0.280861
| 0.123443
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.043956
| 0.010989
| 0.175824
| 0.164835
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
778e9f9cc92f7b3065fea638a0d4d74d19c178ea
| 6,079
|
py
|
Python
|
day9/sol.py
|
philipkiely/Advent2021
|
9c83dcc6ba69c38edc204cf3d68e74dff05d89e1
|
[
"MIT"
] | null | null | null |
day9/sol.py
|
philipkiely/Advent2021
|
9c83dcc6ba69c38edc204cf3d68e74dff05d89e1
|
[
"MIT"
] | null | null | null |
day9/sol.py
|
philipkiely/Advent2021
|
9c83dcc6ba69c38edc204cf3d68e74dff05d89e1
|
[
"MIT"
] | null | null | null |
def parse(data):
return len(data.split("\n")[0]), [int(d) for d in list(data) if d != "\n"]
def pretty_print(xlen, heights):
for row in range(int(len(heights)/xlen)):
s = ""
for i in range(xlen):
s += str(heights[row*xlen+i])
print(s)
print("")
def risk_levels(xlen, heights):
risk = 0
hlen = len(heights)
for i in range(hlen):
# Literally edge and corner cases, not funny
if i == 0: # Top left corner
if all(heights[i] < x for x in [heights[i+1], heights[i+xlen]]):
risk += (1 + heights[i])
elif i == xlen-1: # Top right corner
if all(heights[i] < x for x in [heights[i-1], heights[i+xlen]]):
risk += (1 + heights[i])
elif i == hlen-xlen: # Bottom left corner
if all(heights[i] < x for x in [heights[i+1], heights[i-xlen]]):
risk += (1 + heights[i])
elif i == hlen-1: # Bottom right corner
if all(heights[i] < x for x in [heights[i-1], heights[i-xlen]]):
risk += (1 + heights[i])
elif i < xlen: # Top Edge
if all(heights[i] < x for x in [heights[i-1], heights[i+1], heights[i+xlen]]):
risk += (1 + heights[i])
elif i % xlen == 0: # Left Edge
if all(heights[i] < x for x in [heights[i+1], heights[i-xlen], heights[i+xlen]]):
risk += (1 + heights[i])
elif (i+1) % xlen == 0: # Right Edge
if all(heights[i] < x for x in [heights[i-1], heights[i-xlen], heights[i+xlen]]):
risk += (1 + heights[i])
elif i > (hlen-xlen): # Bottom Edge
if all(heights[i] < x for x in [heights[i-1], heights[i+1], heights[i-xlen]]):
risk += (1 + heights[i])
else: # Middle
if all(heights[i] < x for x in [heights[i-1], heights[i+1], heights[i-xlen], heights[i+xlen]]):
risk += (1 + heights[i])
return risk
#################################
# PART 2 ATTEMPT (Doesn't Work) #
#################################
def nine_up(i, xlen, heights, s=0):
j = 1
if((i - (j*xlen)) > 0 and heights[i - (j*xlen)] < 9):
heights[i - (j*xlen)] = 9
l, heights = nine_left(i - (j*xlen), xlen, heights)
r, heights = nine_right(i - (j*xlen), xlen, heights)
s += 1 + l + r
s, heights = nine_up(i - (j*xlen), xlen, heights, s)
j += 1
return s, heights
def nine_left(i, xlen, heights, s=0):
j = 1
if((i - j) % xlen > 0 and heights[i - j] < 9):
heights[i - j] = 9
u, heights = nine_up(i - j, xlen, heights)
d, heights = nine_down(i - j, xlen, heights)
s += 1 + u + d
s, heights = nine_left(i - j, xlen, heights, s)
j += 1
return j-1, heights
def nine_right(i, xlen, heights, s=0):
j = 1
if((i + j) % xlen > 0 and heights[i + j] < 9):
heights[i + j] = 9
u, heights = nine_up(i + j, xlen, heights)
d, heights = nine_down(i + j, xlen, heights)
s += 1 + u + d
s, heights = nine_right(i + j, xlen, heights, s)
j += 1
return j-1, heights
def nine_down(i, xlen, heights, s=0):
j = 1
if((i + (j*xlen)) < len(heights) and heights[i + (j*xlen)] < 9):
heights[i - (j*xlen)] = 9
l, heights = nine_left(i - (j*xlen), xlen, heights)
r, heights = nine_right(i - (j*xlen), xlen, heights)
s += 1 + l + r
s, heights = nine_down(i + (j*xlen), xlen, heights, s)
j += 1
return s, heights
def basins(xlen, heights):
basins = []
hlen = len(heights)
for i in range(hlen):
basin_size = 1
# Literally edge and corner cases, not funny
if i == 0: # Top left corner
if all(heights[i] < x for x in [heights[i+1], heights[i+xlen]]):
pass
elif i == xlen-1: # Top right corner
if all(heights[i] < x for x in [heights[i-1], heights[i+xlen]]):
pass
elif i == hlen-xlen: # Bottom left corner
if all(heights[i] < x for x in [heights[i+1], heights[i-xlen]]):
pass
elif i == hlen-1: # Bottom right corner
if all(heights[i] < x for x in [heights[i-1], heights[i-xlen]]):
pass
elif i < xlen: # Top Edge
if all(heights[i] < x for x in [heights[i-1], heights[i+1], heights[i+xlen]]):
pass
elif i % xlen == 0: # Left Edge
if all(heights[i] < x for x in [heights[i+1], heights[i-xlen], heights[i+xlen]]):
pass
elif (i+1) % xlen == 0: # Right Edge
if all(heights[i] < x for x in [heights[i-1], heights[i-xlen], heights[i+xlen]]):
pass
elif i > (hlen-xlen): # Bottom Edge
if all(heights[i] < x for x in [heights[i-1], heights[i+1], heights[i-xlen]]):
pass
else: # Middle
if all(heights[i] < x for x in [heights[i-1], heights[i+1], heights[i-xlen], heights[i+xlen]]):
print(i)
pretty_print(xlen, heights)
heights[i] = 9
up, heights = nine_up(i, xlen, heights)
print(up)
print("")
pretty_print(xlen, heights)
left, heights = nine_left(i, xlen, heights)
print("")
pretty_print(xlen, heights)
right, heights = nine_right(i, xlen, heights)
print("")
pretty_print(xlen, heights)
down, heights = nine_down(i, xlen, heights)
print("")
pretty_print(xlen, heights)
basin_size += up + left + right + down
basins.append(basin_size)
return basins
####################
# PART 2 ATTEMPT 2 #
####################
if __name__=="__main__":
f = open("train.txt", "r")
data = f.read()
f.close()
xlen, heights = parse(data)
print(risk_levels(xlen, heights))
#print(basins(xlen, heights))
| 38.232704
| 107
| 0.487909
| 884
| 6,079
| 3.311086
| 0.079186
| 0.229587
| 0.101469
| 0.131192
| 0.820294
| 0.788521
| 0.783396
| 0.783396
| 0.723608
| 0.723608
| 0
| 0.020311
| 0.343971
| 6,079
| 158
| 108
| 38.474684
| 0.713641
| 0.066458
| 0
| 0.649635
| 0
| 0
| 0.003969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058394
| false
| 0.058394
| 0
| 0.007299
| 0.109489
| 0.109489
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
77a8cbbc5ac1b3cc28e2d81bac484112fcf2458a
| 1,809
|
py
|
Python
|
bfi/test/test_exceptions.py
|
eriknyquist/bfg
|
7f0f8c6451f7afb3d1b7d02a597f7ae17f5f85d9
|
[
"Apache-2.0"
] | 2
|
2019-10-09T16:02:49.000Z
|
2019-11-22T18:06:02.000Z
|
bfi/test/test_exceptions.py
|
eriknyquist/bfg
|
7f0f8c6451f7afb3d1b7d02a597f7ae17f5f85d9
|
[
"Apache-2.0"
] | null | null | null |
bfi/test/test_exceptions.py
|
eriknyquist/bfg
|
7f0f8c6451f7afb3d1b7d02a597f7ae17f5f85d9
|
[
"Apache-2.0"
] | 2
|
2020-06-26T09:29:03.000Z
|
2022-02-08T04:56:43.000Z
|
import unittest
from bfi import (interpret, BrainfuckSyntaxError)
class TestBFIExceptions(unittest.TestCase):
def test_syntax_unmatched_open(self):
self.assertRaises(BrainfuckSyntaxError, interpret, "[")
self.assertRaises(BrainfuckSyntaxError, interpret, "[[")
self.assertRaises(BrainfuckSyntaxError, interpret, "[][")
self.assertRaises(BrainfuckSyntaxError, interpret, "[[]")
self.assertRaises(BrainfuckSyntaxError, interpret, "[[[[[]]]]")
self.assertRaises(BrainfuckSyntaxError, interpret, "++++++>><<[")
self.assertRaises(BrainfuckSyntaxError, interpret, "[++++++>><<")
self.assertRaises(BrainfuckSyntaxError, interpret, "[++[+[++>>]<<]")
def test_syntax_unmatched_close(self):
self.assertRaises(BrainfuckSyntaxError, interpret, "]")
self.assertRaises(BrainfuckSyntaxError, interpret, "[]]")
self.assertRaises(BrainfuckSyntaxError, interpret, "[[]]]")
self.assertRaises(BrainfuckSyntaxError, interpret, "[[[]]]]")
self.assertRaises(BrainfuckSyntaxError, interpret, "++++++>><<]")
self.assertRaises(BrainfuckSyntaxError, interpret, "]++++++>><<")
self.assertRaises(BrainfuckSyntaxError, interpret, "[[++[+[++>>]]<<]")
def test_memory_error_high(self):
self.assertRaises(IndexError, interpret, ">>>>>>.", tape_size=5)
self.assertRaises(IndexError, interpret, "<<>>>>>>>>.", tape_size=5)
self.assertRaises(IndexError, interpret, ">>>>>>>><<>>.", tape_size=5)
def test_invalid_program(self):
self.assertRaises(BrainfuckSyntaxError, interpret, None)
self.assertRaises(BrainfuckSyntaxError, interpret, {})
self.assertRaises(BrainfuckSyntaxError, interpret, [])
self.assertRaises(BrainfuckSyntaxError, interpret, 56)
| 53.205882
| 78
| 0.670536
| 133
| 1,809
| 9.015038
| 0.210526
| 0.293578
| 0.570475
| 0.713094
| 0.844871
| 0.804003
| 0.804003
| 0.804003
| 0.804003
| 0.804003
| 0
| 0.003274
| 0.155887
| 1,809
| 33
| 79
| 54.818182
| 0.781925
| 0
| 0
| 0
| 0
| 0
| 0.076838
| 0
| 0
| 0
| 0
| 0
| 0.758621
| 1
| 0.137931
| false
| 0
| 0.068966
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
247bb1fbe71ec9f49fbc6019723cf9592e1eb81f
| 49
|
py
|
Python
|
Python/A complex expression/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 5
|
2020-08-29T15:15:31.000Z
|
2022-03-01T18:22:34.000Z
|
Python/A complex expression/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | null | null | null |
Python/A complex expression/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 1
|
2020-12-02T11:13:14.000Z
|
2020-12-02T11:13:14.000Z
|
n = int(input())
print((((n + n) * n) - n) // n)
| 16.333333
| 31
| 0.387755
| 9
| 49
| 2.111111
| 0.444444
| 0.421053
| 0.473684
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244898
| 49
| 2
| 32
| 24.5
| 0.513514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
249ecf6688481b13da234ca2d9a5328b192c2433
| 4,955
|
py
|
Python
|
tests/test_stream_xep_0059.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | 3
|
2019-02-01T06:50:08.000Z
|
2020-03-24T00:45:31.000Z
|
tests/test_stream_xep_0059.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | 1
|
2017-11-07T13:03:48.000Z
|
2017-11-07T13:03:48.000Z
|
tests/test_stream_xep_0059.py
|
elrond79/SleekXMPP
|
62ebbe2d7c37f55fa63cbe24b2a610c1e3eb7b9f
|
[
"BSD-3-Clause"
] | null | null | null |
import threading
from sleekxmpp.test import *
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.plugins.xep_0030 import DiscoItems
from sleekxmpp.plugins.xep_0059 import ResultIterator, Set
class TestStreamSet(SleekTest):
def setUp(self):
register_stanza_plugin(DiscoItems, Set)
def tearDown(self):
self.stream_close()
def iter(self, rev=False):
q = self.xmpp.Iq()
q['type'] = 'get'
it = ResultIterator(q, 'disco_items', amount='1', reverse=rev)
for i in it:
for j in i['disco_items']['items']:
self.items.append(j[0])
def testResultIterator(self):
self.items = []
self.stream_start(mode='client')
t = threading.Thread(target=self.iter)
t.start()
self.send("""
<iq type="get" id="2">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="2">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item1" />
<set xmlns="http://jabber.org/protocol/rsm">
<last>item1</last>
</set>
</query>
</iq>
""")
self.send("""
<iq type="get" id="3">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<after>item1</after>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="3">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item2" />
<set xmlns="http://jabber.org/protocol/rsm">
<last>item2</last>
</set>
</query>
</iq>
""")
self.send("""
<iq type="get" id="4">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<after>item2</after>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="4">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item2" />
<set xmlns="http://jabber.org/protocol/rsm">
</set>
</query>
</iq>
""")
t.join()
self.failUnless(self.items == ['item1', 'item2'])
def testResultIteratorReverse(self):
self.items = []
self.stream_start(mode='client')
t = threading.Thread(target=self.iter, args=(True,))
t.start()
self.send("""
<iq type="get" id="2">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<before />
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="2">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item2" />
<set xmlns="http://jabber.org/protocol/rsm">
<first>item2</first>
</set>
</query>
</iq>
""")
self.send("""
<iq type="get" id="3">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<before>item2</before>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="3">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item1" />
<set xmlns="http://jabber.org/protocol/rsm">
<first>item1</first>
</set>
</query>
</iq>
""")
self.send("""
<iq type="get" id="4">
<query xmlns="http://jabber.org/protocol/disco#items">
<set xmlns="http://jabber.org/protocol/rsm">
<max>1</max>
<before>item1</before>
</set>
</query>
</iq>
""")
self.recv("""
<iq type="result" id="4">
<query xmlns="http://jabber.org/protocol/disco#items">
<item jid="item1" />
<set xmlns="http://jabber.org/protocol/rsm">
</set>
</query>
</iq>
""")
t.join()
self.failUnless(self.items == ['item2', 'item1'])
suite = unittest.TestLoader().loadTestsFromTestCase(TestStreamSet)
| 30.398773
| 70
| 0.454692
| 502
| 4,955
| 4.466135
| 0.167331
| 0.096343
| 0.160571
| 0.192685
| 0.739518
| 0.739518
| 0.739518
| 0.739518
| 0.73149
| 0.73149
| 0
| 0.01481
| 0.373158
| 4,955
| 162
| 71
| 30.58642
| 0.707019
| 0
| 0
| 0.778523
| 0
| 0
| 0.707164
| 0.00888
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033557
| false
| 0
| 0.033557
| 0
| 0.073826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
24e0208530995a0b108b8c52f40a94d07c878077
| 2,978
|
py
|
Python
|
loman/test/test_visualization.py
|
pushingice/loman
|
19237f5d440db44bf526b813390faedd74b9345c
|
[
"BSD-3-Clause"
] | 48
|
2018-10-31T20:09:15.000Z
|
2022-02-14T20:38:53.000Z
|
loman/test/test_visualization.py
|
pushingice/loman
|
19237f5d440db44bf526b813390faedd74b9345c
|
[
"BSD-3-Clause"
] | 21
|
2017-04-07T02:41:28.000Z
|
2018-01-24T15:59:51.000Z
|
loman/test/test_visualization.py
|
pushingice/loman
|
19237f5d440db44bf526b813390faedd74b9345c
|
[
"BSD-3-Clause"
] | 3
|
2018-10-17T01:37:17.000Z
|
2019-12-05T11:51:50.000Z
|
import loman.visualization
from loman import Computation, States
import loman.computeengine
import six
def test_simple():
comp = Computation()
comp.add_node('a')
comp.add_node('b', lambda a: a + 1)
comp.add_node('c', lambda a: 2 * a)
comp.add_node('d', lambda b, c: b + c)
d = comp.to_pydot()
nodes = d.obj_dict['nodes']
label_to_name_mapping = {v[0]['attributes']['label']: k for k, v in six.iteritems(nodes)}
node = {label: nodes[name][0] for label, name in six.iteritems(label_to_name_mapping)}
assert node['a']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UNINITIALIZED]
assert node['a']['attributes']['style'] == 'filled'
assert node['b']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UNINITIALIZED]
assert node['b']['attributes']['style'] == 'filled'
assert node['c']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UNINITIALIZED]
assert node['c']['attributes']['style'] == 'filled'
assert node['d']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UNINITIALIZED]
assert node['d']['attributes']['style'] == 'filled'
comp.insert('a', 1)
d = comp.to_pydot()
nodes = d.obj_dict['nodes']
label_to_name_mapping = {v[0]['attributes']['label']: k for k, v in six.iteritems(nodes)}
node = {label: nodes[name][0] for label, name in six.iteritems(label_to_name_mapping)}
assert node['a']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UPTODATE]
assert node['a']['attributes']['style'] == 'filled'
assert node['b']['attributes']['fillcolor'] == loman.visualization.state_colors[States.COMPUTABLE]
assert node['b']['attributes']['style'] == 'filled'
assert node['c']['attributes']['fillcolor'] == loman.visualization.state_colors[States.COMPUTABLE]
assert node['c']['attributes']['style'] == 'filled'
assert node['d']['attributes']['fillcolor'] == loman.visualization.state_colors[States.STALE]
assert node['d']['attributes']['style'] == 'filled'
comp.compute_all()
d = comp.to_pydot()
nodes = d.obj_dict['nodes']
label_to_name_mapping = {v[0]['attributes']['label']: k for k, v in six.iteritems(nodes)}
node = {label: nodes[name][0] for label, name in six.iteritems(label_to_name_mapping)}
assert node['a']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UPTODATE]
assert node['a']['attributes']['style'] == 'filled'
assert node['b']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UPTODATE]
assert node['b']['attributes']['style'] == 'filled'
assert node['c']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UPTODATE]
assert node['c']['attributes']['style'] == 'filled'
assert node['d']['attributes']['fillcolor'] == loman.visualization.state_colors[States.UPTODATE]
assert node['d']['attributes']['style'] == 'filled'
| 50.474576
| 105
| 0.671927
| 378
| 2,978
| 5.18254
| 0.12963
| 0.122511
| 0.147014
| 0.226646
| 0.88974
| 0.88974
| 0.878509
| 0.846861
| 0.846861
| 0.846861
| 0
| 0.003476
| 0.130625
| 2,978
| 58
| 106
| 51.344828
| 0.753187
| 0
| 0
| 0.541667
| 0
| 0
| 0.191068
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.020833
| false
| 0
| 0.083333
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
24f81b8bb2e037bddd13950aa67d51e54f24718a
| 114
|
py
|
Python
|
mrrt/mri/sim/__init__.py
|
mritools/mrrt.mri
|
00829032f6d19d078a23d006b73f1028b3ec3902
|
[
"BSD-3-Clause"
] | 5
|
2020-02-01T22:06:32.000Z
|
2021-06-29T14:18:58.000Z
|
mrrt/mri/sim/__init__.py
|
mritools/mrrt.mri
|
00829032f6d19d078a23d006b73f1028b3ec3902
|
[
"BSD-3-Clause"
] | null | null | null |
mrrt/mri/sim/__init__.py
|
mritools/mrrt.mri
|
00829032f6d19d078a23d006b73f1028b3ec3902
|
[
"BSD-3-Clause"
] | 1
|
2020-03-31T11:53:40.000Z
|
2020-03-31T11:53:40.000Z
|
from ._mri_objects import * # noqa
from ._mri_fmap_sim import * # noqa
from ._mri_sensemap_sim import * # noqa
| 28.5
| 40
| 0.736842
| 17
| 114
| 4.470588
| 0.470588
| 0.276316
| 0.368421
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 114
| 3
| 41
| 38
| 0.817204
| 0.122807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7004c629b534c56c4aa40e401eda1aec2999f35e
| 542,861
|
py
|
Python
|
ion/services/sa/observatory/test/test_asset_tracking.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | 3
|
2016-09-20T09:50:06.000Z
|
2018-08-10T01:41:38.000Z
|
ion/services/sa/observatory/test/test_asset_tracking.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | null | null | null |
ion/services/sa/observatory/test/test_asset_tracking.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | 2
|
2016-03-16T22:25:49.000Z
|
2016-11-26T14:54:21.000Z
|
#!/usr/bin/env python
import unittest
from nose.plugins.attrib import attr
from pyon.core.exception import NotFound, BadRequest, Inconsistent
import binascii
import uuid
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.context import LocalContextMixin
from pyon.public import RT, PRED, OT, log, CFG, IonObject
from pyon.event.event import EventPublisher
from ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient
from interface.objects import ValueTypeEnum, EventCategoryEnum
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from interface.services.coi.iorg_management_service import OrgManagementServiceClient
from interface.services.sa.iobservatory_management_service import ObservatoryManagementServiceClient
from ion.processes.bootstrap.ion_loader import TESTED_DOC
class FakeProcess(LocalContextMixin):
name = ''
TEST_PATH = TESTED_DOC
TEST_XLS_FOLDER = './ion/services/sa/observatory/test_xls/'
@attr('INT', group='sa')
class TestAssetTracking(IonIntegrationTestCase):
def setUp(self):
# Start container
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
self.RR = ResourceRegistryServiceClient(node=self.container.node)
self.RR2 = EnhancedResourceRegistryClient(self.RR)
self.OMS = ObservatoryManagementServiceClient(node=self.container.node)
self.org_management_service = OrgManagementServiceClient(node=self.container.node)
self.event_publisher = EventPublisher()
def _perform_preload(self, load_cfg):
#load_cfg["ui_path"] = "res/preload/r2_ioc/ui_assets"
#load_cfg["path"] = "R2PreloadedResources.xlsx"
#load_cfg["assetmappings"] = "OOIPreload.xlsx"
self.container.spawn_process("Loader", "ion.processes.bootstrap.ion_loader", "IONLoader", config=load_cfg)
def _preload_scenario(self, scenario, path=TEST_PATH, idmap=False, **kwargs):
load_cfg = dict(op="load",
scenario=scenario,
attachments="res/preload/r2_ioc/attachments",
path=path,
idmap=idmap)
load_cfg.update(kwargs)
self._perform_preload(load_cfg)
"""
#--------------------------------------------------------------------
Marine Asset Management [hook]
Unit tests for Marine Asset Management
Base CRUD functions
test_create_asset_type
test_create_asset
test_create_asset_bad_altid - exercise create_asset, test altid existence, uniqueness and format
test_create_asset_value_types - add attributes value_type RealValue, CodeValue, StringValue, etc
test_create_event_duration_type
test_create_event_duration
test_create_event_bad_altid - exercise create_event_duration and altid existence, uniqueness and format
test_update_attribute_specifications - see details below
test_delete_attribute_specification - see details below
test_create_codespace - exercise many codespace services (see details below)
Prepare and extensions
test_create_asset_extension
test_create_asset_extension_with_prepare
test_create_event_duration_extension
test_create_event_duration_extension_with_prepare
Spread sheet upload and download:
test_upload_xls - single upload, all sheets
test_download_xls - single dump of system instances of marine tracking resources, types and code info
test_upload_xls_twice - multi pass test add and update
test_upload_codes - requires update and testing with CodeSpaces sheet included
test_download_codes - requires update and testing with CodeSpaces sheet included
test_upload_xls_with_codes - loading only code related then loading everything but code related
test_upload_xls_triple_codes - multi load exercises 'add', update and 'remove' (remove code 'pink')
test_upload_xls_triple_codes_only - multi load, no CodeSpaces sheet, only Codes (remove code 'pink')
test_upload_without_codespace_instance - multi load, utilize code space instance, if available
test_upload_remove_codeset
test_upload_xls_triple_assets - load system, add resources, remove and/or modify resources (assets)
test_upload_xls_triple_events - load system, add resources, remove and/or modify resources (events)
test_upload_all_sheets_twice - load xlsx (all sheets), reload same
test_attribute_value_encoding
test_get_picklist
test_asset_update_and_altid - update[asset|event_duration] ensure unique altid in namespace (res.name)
test_upload_new_attribute_specification - add new AttributeSpecification to existing type resource instance
Data input testing:
General:
test_empty_workbook - general
test_add_new_asset_type - add new asset type and include base type in spread sheet
test_add_new_asset_type_extend_wo_base - add asset type without base in spread sheets
test_add_new_asset_type_extend_from_device - add asset type which extends device
test_add_new_asset_type_extend_from_platform- add asset type which extends (leaf) platform
test_add_new_event_type - add event duration type which extends base (base in spread sheets)
test_add_new_event_type_wo_base - add event duration type which extends base (base not in spread sheets)
Asset, AssetType and Attribute tests:
test_new_asset_base - add new asset, extends Base AssetType (4 sheets)
test_new_asset_base_attributes - add new asset, extends Base
test_new_asset_base_attributes_short -
test_new_asset_base_attributes_short_update -
test_new_asset_base_one_attribute_update -
test_new_asset_base_one_attribute_no_types - attribute specification sheet; provide single 'descr' attribute value for 'NewAsset'; expect defaults for all attributes other than 'descr'
test_new_asset_base_one_attribute_only - no attribute specification sheet; single 'descr' attribute value, expect defaults for remaing values
test_add_new_asset_device
test_add_new_asset_platform
test_add_new_asset_NewType
test_alpha_preload - (dev only) used to verify alpha preload works (for UI support)
EventDuration to Asset Mapping Tests:
test_deployment_to_multiple_assets
test_update_attribute_specifications (Exercise RT.AttributeSpecification, RT.AssetType, and service
update_attribute_specifications)
test_delete_attribute_specification (Exercise RT.AttributeSpecification, RT.AssetType, and service
delete_attribute_specifications)
test_create_codespace (Exercise RT.CodeSpace, OT.Code, OT.CodeSet as well as services (6):
read_codes_by_name, read_codesets_by_name, update_ codes,
update_codesets, delete_ codes, delete_codesets
* indicates spreadsheet update required or test targeted (skip for now)
Helper functions used by unit tests:
load_marine_assets_from_xlsx - load system: code space, codes, codesets, assets, asset types, event durations and event durations types
create_value - used to create IntegerValue, RealValue, StringValue, BooleanValue
create_complex_value - used to create complex types, such as CodeValue, etc.
_create_attribute - create 'any old' attribute of specific value type
_create_attribute_specification - create 'any old' attribute specification of specific value type
_get_type_resource_by_name
#--------------------------------------------------------------------
"""
# -----
# ----- UNIT TEST: test_create_asset_type
# -----
@attr('UNIT', group='sa')
def test_create_asset_type(self):
log.debug('\n\n***** Start : *test_create_asset_type')
# Create test AssetType object
ion_asset_type = IonObject(RT.AssetType, name='TestAssetType')
asset_type_id = self.OMS.create_asset_type(ion_asset_type)
# Create attribute specification
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
attribute_specification = self._create_attribute_specification('StringValue', 's_name', asset_type_obj.name,None,None,None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
attribute_specification = self._create_attribute_specification('StringValue', 'descr', asset_type_obj.name,None,None,None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
asset_type = self.OMS.read_asset_type(asset_type_id)
# ---- cleanup
self.OMS.force_delete_asset_type(asset_type_id)
log.debug('\n\n***** Completed: test_create_asset_type')
# -----
# ----- UNIT TEST: test_create_asset
# -----
@attr('UNIT', group='sa')
def test_create_asset(self):
log.debug('\n\n***** Start : test_create_asset')
# ----- Create AssetType object with attribute specification
ion_asset_spec = IonObject(RT.AssetType, name='TestAssetType')
asset_type_id = self.OMS.create_asset_type(ion_asset_spec)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
attribute_specification = self._create_attribute_specification('StringValue', 's_name', asset_type_obj.name,None,None,None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
# ----- Create Asset object
asset_obj = IonObject(RT.Asset, name='Test Asset')
log.debug('\n\n[unit] calling create_asset...')
asset_id = self.OMS.create_asset(asset_obj, asset_type_id)
# ----- Read, create attribute and update Asset object
asset_obj = self.OMS.read_asset(asset_id)
# Create Attribute for Asset
attribute = self._create_attribute(value_type='StringValue', name='s_name', value='hello')
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
# ----- unassign association test
self.OMS.unassign_asset_type_from_asset(asset_type_id, asset_id)
# ----- cleanup
self.OMS.force_delete_asset_type(asset_type_id)
self.OMS.force_delete_asset(asset_id)
log.debug('\n\n***** Completed: test_create_asset')
# -----
# ----- UNIT TEST: test_create_asset_bad_altid
# -----
@attr('UNIT', group='sa')
def test_create_asset_bad_altid(self):
log.debug('\n\n***** Start : test_create_asset_bad_altid')
# ----- Create Asset object - negative tests re: alt_ids
asset_obj = IonObject(RT.Asset, name='Test Asset')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Overview: If alt_ids are provided during asset creation, verify
# they are well formed and unique
# Step 1. alt_ids with invalid namespace
# Step 2. alt_ids with empty namespace
# Step 3. alt_ids with empty name
# Step 4. alt_ids with multiple alt_ids provided (len != 1)
# Step 5. create Asset and then try to create another with same alt_ids
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 1. alt_ids with invalid namespace
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 1. alt_ids with empty name')
altid = RT.EventDuration + ':' + asset_obj.name
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_asset(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 2. alt_ids with empty namespace
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 2. alt_ids with empty name')
asset_obj.alt_ids = []
altid = ':' + asset_obj.name
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_asset(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 3. alt_ids with empty name
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 3. alt_ids with empty name')
asset_obj.alt_ids = []
altid = RT.Asset + ':'
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_asset(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 4. alt_ids with multiple alt_ids provided (len != 1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 4. alt_ids with multiple alt_ids provided (len != 1)')
asset_obj.alt_ids = []
altid = RT.Asset + ':' + 'fred'
asset_obj.alt_ids.append(altid)
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_asset(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 5. create Asset and then try to create another with same alt_ids
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 5. create Asset and then try to create another with same alt_ids')
asset_obj.alt_ids = []
altid = RT.Asset + ':' + asset_obj.name
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_asset(asset_obj)
asset_id = self.OMS.create_asset(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
log.debug('\n\n***** Completed: test_create_asset_bad_altid')
# -----
# ----- UNIT TEST: test_create_asset_bad_altid
# -----
@attr('UNIT', group='sa')
def test_create_event_bad_altid(self):
log.debug('\n\n***** Start : test_create_event_bad_altid')
# ----- Create Asset object - negative tests re: alt_ids
asset_obj = IonObject(RT.EventDuration, name='Test Event')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Overview: If alt_ids are provided during event duration creation, verify
# they are well formed and unique
# Step 1. alt_ids with invalid namespace
# Step 2. alt_ids with empty namespace
# Step 3. alt_ids with empty name
# Step 4. alt_ids with multiple alt_ids provided (len != 1)
# Step 5. create EventDuration and then try to create another with same alt_ids
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 1. alt_ids with invalid namespace
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 1. alt_ids with empty name')
altid = RT.Asset + ':' + asset_obj.name
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_event_duration(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 2. alt_ids with empty namespace
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 2. alt_ids with empty name')
asset_obj.alt_ids = []
altid = ':' + asset_obj.name
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_event_duration(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 3. alt_ids with empty name
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 3. alt_ids with empty name')
asset_obj.alt_ids = []
altid = RT.EventDuration + ':'
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_event_duration(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 4. alt_ids with multiple alt_ids provided (len != 1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 4. alt_ids with multiple alt_ids provided (len != 1)')
asset_obj.alt_ids = []
altid = RT.EventDuration + ':' + 'fred'
asset_obj.alt_ids.append(altid)
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_event_duration(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 5. create Asset and then try to create another with same alt_ids
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Step 5. create Event and then try to create another with same alt_ids')
asset_obj.alt_ids = []
altid = RT.EventDuration + ':' + asset_obj.name
asset_obj.alt_ids.append(altid)
try:
asset_id = self.OMS.create_event_duration(asset_obj)
asset_id = self.OMS.create_event_duration(asset_obj)
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest: %s', Arguments.get_error_message())
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound: %s', Arguments.get_error_message())
except Inconsistent, Arguments:
log.debug('\n\n[unit] Inconsistent: %s', Arguments.get_error_message())
except:
log.debug('\n\n[unit] failed ', exc_info=True)
log.debug('\n\n***** Completed: test_create_event_bad_altid')
# -----
# ----- UNIT TEST: test_create_asset_value_types (skip)
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_create_asset_value_types(self):
log.debug('\n\n***** Start : test_create_asset_value_types')
verbose = False
# Load resources into system from xlsx file
# Create AssetType with 2 AttributeSpecifications
# Create an Asset with 2 attributes
# AssetType [assign] Asset
# Create AssetExtension(using Asset id)
# Show associations for AssetExtension (verify one is displayed)
# Cleanup
fid = TEST_XLS_FOLDER + 'CodeSpaces150.xlsx' # CodeSpaces, Codes and CodeSets
code_space_ids = []
interactive = False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
response = self.load_marine_assets_from_xlsx(fid)
if response:
if verbose: log.debug('\n\n[unit] response: %s', response)
if response['status'] == 'ok' and not response['err_msg']:
if response['res_modified']:
if 'code_spaces' in response['res_modified']:
code_space_ids = response['res_modified']['code_spaces'][:]
else:
raise BadRequest('failed to process codespace related items...')
# set breakpoint for testing...
if interactive:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# ----- Create AssetType object
ion_asset_spec = IonObject(RT.AssetType, name='Test AssetType')
asset_type_id = self.OMS.create_asset_type(ion_asset_spec)
if verbose: log.debug('\n\n***** Creating first Attribute for Asset...')
# Create AttributeSpecification 1
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
value_type = ValueTypeEnum._str_map[ValueTypeEnum.RealValue]
attribute_specification = self._create_attribute_specification('RealValue', 'height', asset_type_obj.name,None,None,None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
# Create AttributeSpecification 2
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
attribute_specification = self._create_attribute_specification('StringValue', 's_name', asset_type_obj.name,None,None,None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
# Create AttributeSpecification 3
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
if verbose: log.debug('\n\n[unit] Create AttributeSpecification 3')
attribute_specification = self._create_attribute_specification('CodeValue', 'asset type',source=asset_type_obj.name,
constraints='set=MAM:asset type',pattern='asset type',
codeset_name='asset type')
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
# ----- Create Asset object
asset_obj = IonObject(RT.Asset, name='Test Asset')
asset_id = self.OMS.create_asset(asset_obj, asset_type_id)
asset_obj = self.OMS.read_asset(asset_id)
# set breakpoint for testing...
if interactive:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# Create Attribute 1
if verbose: log.debug('\n\n[unit] Create Attribute 1')
attribute = self._create_attribute('RealValue', 'height', value=10.1)
asset_obj = self.OMS.read_asset(asset_id)
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
# Create Attribute 2
if verbose: log.debug('\n\n[unit] Create Attribute 2')
attribute = self._create_attribute('StringValue', 's_name', value='some unique name')
asset_obj = self.OMS.read_asset(asset_id)
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
# Create Attribute 3
# Attribute({, 'name': 'op_stat', 'value': [CodeValue({, 'value': 'fully functioning'})]})
# Attribute({, 'name': 'asset type', 'value': [CodeValue({, 'value': 'Mooring riser component'})]})
if verbose: log.debug('\n\n[unit] Create Attribute 3')
attribute = self._create_attribute('CodeValue', 'asset type', value='Mooring riser component')
log.debug('\n\n[unit] attribute 3: %s', attribute)
asset_obj = self.OMS.read_asset(asset_id)
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
self.OMS.read_asset(asset_id)
if interactive:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
asset_obj = self.OMS.read_asset(asset_id)
attributes = asset_obj.asset_attrs
#log.debug('\n[unit] after set attributes: %s', attributes)
# update RealValue attribute
for name, attr in attributes.iteritems():
if name == 'Attribute real value':
# let's update value
attribute = IonObject(OT.Attribute)
attribute['name'] = 'Attribute real value'
value = self.create_value(20.8)
attribute['value'] = [value]
asset_obj.asset_attrs[name] = attribute
break
self.OMS.update_asset(asset_obj)
asset_obj = self.OMS.read_asset(asset_id)
if verbose: log.debug('\n\n***** updated \'real value\' attribute values....')
if interactive:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# Add CodeValue
asset_obj = self.OMS.read_asset(asset_id)
attribute = IonObject(OT.Attribute)
attribute['name'] = 'asset type'
value = self.create_complex_value('CodeValue', 'asset type', 'RSN Primary cable')
attribute['value'] = [value]
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
if verbose: log.debug('\n\n***** added \'code value\' attribute values....')
if interactive:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# Update CodeValue
asset_obj = self.OMS.read_asset(asset_id)
attribute = IonObject(OT.Attribute)
attribute['name'] = 'asset type'
value = self.create_complex_value('CodeValue', 'asset type', 'Platform')
attribute['value'] = [value]
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
if verbose: log.debug('\n\n***** update \'code value\' attribute values....')
if interactive:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# ----- Clean up
if verbose: log.debug('\n\n***** Cleanup........')
self.OMS.unassign_asset_type_from_asset(asset_type_id, asset_id)
self.OMS.force_delete_asset_type(asset_type_id)
self.OMS.force_delete_asset(asset_id)
log.debug('\n\n***** Completed: test_create_asset_value_types')
# -----
# ----- UNIT TEST: test_create_asset_extension
# -----
@attr('UNIT', group='sa')
def test_create_asset_extension(self):
log.debug('\n\n***** Start : test_create_asset_extension')
# Create AssetType with 2 AttributeSpecifications
# Create an Asset with 2 attributes
# AssetType [assign] Asset
# Create AssetExtension(using Asset id)
# Show associations for AssetExtension (verify one is displayed)
# Cleanup
verbose = True
# ----- Create AssetType object
ion_asset_spec = IonObject(RT.AssetType, name='Test AssetType')
asset_type_id = self.OMS.create_asset_type(ion_asset_spec)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
if verbose: log.debug('\n\n***** Creating first AttributeSpecification for Asset...')
# Create AttributeSpecification 1
attribute_specification = self._create_attribute_specification('StringValue', 'operator name', asset_type_obj.name,None,None,None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
# Create AttributeSpecification 2
attribute_specification = self._create_attribute_specification('RealValue', 'operator height', asset_type_obj.name,None,None,None)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
# ----- Create Asset object
asset_obj = IonObject(RT.Asset, name='Test Asset')
asset_id = self.OMS.create_asset(asset_obj, asset_type_id)
asset_obj = self.OMS.read_asset(asset_id)
# Create Attribute for Asset; update Asset
attribute = self._create_attribute('StringValue', 'operator name', 'nina recorder')
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
#attribute = IonObject(OT.Attribute)
attribute = self._create_attribute('RealValue', 'operator height', 1.0)
asset_obj = self.OMS.read_asset(asset_id)
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
asset_obj = self.OMS.read_asset(asset_id)
# ----- assign AssetType to Asset
#self.OMS.assign_asset_type_to_asset(asset_type_id, asset_id)
#if verbose: log.debug('\n\n***** Create Association: Asset (predicate=PRED.implementsAssetType) AssetType')
# - - - - - - - - - - - - - - - - - - - -
# Create an AssetExtension (using Asset id)
ae = self.OMS.get_asset_extension(asset_id)
if verbose: log.debug('\n\n***** Create and Display AssetExtension: %s', ae)
if verbose: log.debug('\n\n*****\n***** Note: AssetExtensionID: %s, AssetID: %s', ae._id, asset_id)
# ----- Review Associations (shows an association between Asset and AssetType)
if verbose: log.debug('\n\n***** Review Associations')
assetExtension_associations = self.container.resource_registry.find_associations(anyside=ae._id, id_only=False)
if verbose: log.debug('\n\n***** AssetExtension Associations(%d): %s ',
len(assetExtension_associations),assetExtension_associations)
# ----- Clean up
if verbose: log.debug('\n\n***** Cleanup........')
self.OMS.unassign_asset_type_from_asset(asset_type_id, asset_id)
self.OMS.force_delete_asset_type(asset_type_id)
self.OMS.force_delete_asset(asset_id)
log.debug('\n\n***** Completed: test_create_asset_extension')
# -----
# ----- UNIT TEST: test_create_asset_extension_with_prepare
# -----
@attr('UNIT', group='sa')
def test_create_asset_extension_with_prepare(self):
log.debug('\n\n***** Start : test_create_asset_extension_with_prepare')
verbose = True
# Create AssetType with 2 AttributeSpecifications
# Create an Asset with 2 attributes
# AssetType [assign] Asset
# Create AssetExtension(using Asset id)
# Show associations for AssetExtension (verify one is displayed)
# Cleanup
# ----- Create AssetType object
ion_asset_spec = IonObject(RT.AssetType, name='Test AssetType')
asset_type_id = self.OMS.create_asset_type(ion_asset_spec)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
if verbose: log.debug('\n\n***** Creating first Attribute for Asset...')
# Create AttributeSpecification 1
attribute_specification = self._create_attribute_specification('StringValue', 'operator name', asset_type_obj.name,None,None,None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
# Create AttributeSpecification 2
attribute_specification = self._create_attribute_specification('RealValue', 'operator height', asset_type_obj.name,None,None,None)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
# ----- Create Asset object
asset_obj = IonObject(RT.Asset, name='Test Asset')
asset_id = self.OMS.create_asset(asset_obj, asset_type_id) # test association
asset_obj = self.OMS.read_asset(asset_id)
if verbose: log.debug('\n\n***** Review Associations (on create)')
asset_associations = self.container.resource_registry.find_associations(anyside=asset_id, id_only=False)
if verbose: log.debug('\n\n***** Asset Associations(%d): %s ', len(asset_associations),asset_associations)
# Create Attribute for Asset; update Asset
attribute = self._create_attribute('StringValue', 'operator name', 'nina recorder')
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
#attribute = IonObject(OT.Attribute)
attribute = self._create_attribute('RealValue', 'operator height', 2.0)
asset_obj = self.OMS.read_asset(asset_id)
asset_obj.asset_attrs[attribute['name']] = attribute
self.OMS.update_asset(asset_obj)
# ----- assign AssetType to Asset
#self.OMS.assign_asset_type_to_asset(asset_type_id, asset_id)
if verbose: log.debug('\n\n***** Create Association: Asset (predicate=PRED.implementsAssetType) AssetType')
# - - - - - - - - - - - - - - - - - - - -
# Create an AssetExtension (using Asset id)
ae = self.OMS.get_asset_extension(asset_id)
if verbose: log.debug('\n\n***** Create and Display AssetExtension: %s', ae)
if verbose: log.debug('\n\n*****\n***** Note: AssetExtensionID: %s, AssetID: %s', ae._id, asset_id)
# - - - - - - - - - - - - - - - - - - - -
# Create an AssetPrepareSupport (using Asset id)
aps = self.OMS.prepare_asset_support(asset_id)
if verbose: log.debug('\n\n OMS.prepare_asset_support returned with %s', str(aps))
# ----- Review Associations (shows an association between Asset and AssetType)
if verbose: log.debug('\n\n***** Review Associations')
assetExtension_associations = self.container.resource_registry.find_associations(anyside=ae._id, id_only=False)
if verbose: log.debug('\n\n***** AssetExtension Associations(%d): %s ',
len(assetExtension_associations),assetExtension_associations)
# ----- Clean up
if verbose: log.debug('\n\n***** Cleanup........')
self.OMS.unassign_asset_type_from_asset(asset_type_id, asset_id)
self.OMS.force_delete_asset_type(asset_type_id)
self.OMS.force_delete_asset(asset_id)
log.debug('\n\n***** Completed: test_create_asset_extension_with_prepare')
# -----
# ----- UNIT TEST: test_get_assets_picklist
# -----
@attr('UNIT', group='sa')
def test_get_picklist(self):
try:
# picklist[ (res.name, res.id), ... ]
log.debug('\n\n***** Start : test_get_picklist')
interactive = False
fid = TEST_XLS_FOLDER + 'test500.xlsx'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
response = self.load_marine_assets_from_xlsx(fid)
if response:
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest(response['err_msg'])
response = self.load_marine_assets_from_xlsx(fid)
if response:
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest(response['err_msg'])
# set breakpoint for testing...
if interactive:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# get assets picklist
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Get Assets picklist.....')
picklist = []
picklist = self.OMS.get_assets_picklist(id_only='False')
self.assertEqual(4, len(picklist), msg='asset picklist failed')
if picklist:
log.debug('\n\n[unit] assets picklist(%d): %s', len(picklist),picklist)
else:
log.debug('\n\n[unit] assets picklist empty!')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# get events picklist
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
log.debug('\n\n[unit] Get Events picklist.....')
picklist = []
picklist = self.OMS.get_events_picklist(id_only='False')
self.assertEqual(8, len(picklist), msg='events picklist failed')
if picklist:
log.debug('\n\n[unit] events picklist (%d): %s', len(picklist), picklist)
else:
log.debug('\n\n[unit] events picklist empty!')
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] failed ', exc_info=True)
log.debug('\n\n***** Completed: test_get_picklist')
# -----
# ----- UNIT TEST: test_asset_update_and_altid - requires changes for altid uniqueness (new update_asset,etc.)
# -----
#@unittest.skip("targeting")
@attr('UNIT', group='sa')
def test_asset_update_and_altid(self):
# Step 1. create asset_type (name=Base)
# create asset (AssetUpdateTest) and asset_type association (no alt_id)
# update asset description ((rev 3)
# Result: One new asset with res.name='AssetUpdateTest'
# altid=resname, association to AssetType named 'Base'
#
# Step 2. create asset_type
# create asset (AssetUpdateTest) and asset_type association (no alt_id)
# update asset
# Result: One new asset with res.name='AssetUpdateTest'
# altid=(resname+'-" + id[:5]), association to AssetType named 'Base'
#
# Total: 2 Assets, 1 AssetType
#
# Step 3. create asset_type
# create asset (AssetUpdateTest) and asset_type association (no alt_id)
# update asset
# Result: One new asset with res.name='AssetUpdateTest'
# altid=(resname+'-" + id[:5]), association to AssetType named 'Base'
#
# Total: 3 Assets, 1 AssetType
#
# Step 4. negative test - expect failure (alter altid with invalid namespace
#
# Step 5. negative test - expect failure (add additional altid)
#
log.debug('\n\n***** Start : test_asset_update_and_altid')
verbose = False
step_number = 0
fid = TEST_XLS_FOLDER + 'test500-load-asset-types.xlsx'
self.load_marine_assets_from_xlsx(fid)
asset_type = self._get_type_resource_by_name('Base', RT.AssetType)
value_string = 'hello world'
value_real = '1.45'
value_date = '12/25/2014'
value_time = '23:17'
value_datetime = '12/25/2014 23:17'
value_integer = '5'
spec_attributes = asset_type.attribute_specifications
attributes = {}
attribute = {}
for name, spec in spec_attributes.iteritems():
value_type = spec['value_type']
if value_type == 'CodeValue':
label = spec['attr_label']
attribute = self._create_attribute(value_type, name, label)
else:
attribute = self._create_attribute(value_type, name, None)
if attribute:
attributes[name] = attribute
log.debug('\n\n[unit] attributes(%d): %s', len(attributes), attributes)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# create asset_type
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n[unit] create asset_type with single attribute specification.....')
# ----- Create AssetType object
asset_type_id = asset_type._id
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 1. Create first Asset with res.name == 'AssetUpdateTest'
# create asset (which doesn't have alt_id)
# expect asset to be created and on update have altid of 'Asset:AssetUpdateTest'
# Issue another update to modify description and verify altid processing fine.
# note: create asset (provide asset_type_id for association)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
log.debug('\n\n[unit] Step 1. Create first Asset with res.name == \'AssetUpdateTest\'')
step_number += 1
asset_obj = IonObject(RT.Asset, name='AssetUpdateTest')
asset_id = self.OMS.create_asset(asset_obj, asset_type_id)
asset_obj = self.OMS.read_asset(asset_id)
asset_associations = self.container.resource_registry.find_associations(anyside=asset_id, id_only=True)
self.assertEqual(1, len(asset_associations))
if verbose: log.debug('\n\n[unit] show asset_obj.alt_ids: %s', asset_obj.alt_ids)
test_description = 'step ' + str(step_number)
asset_obj.asset_attrs = attributes
asset_obj.description = test_description
self.OMS.update_asset(asset_obj)
asset_obj = self.OMS.read_asset(asset_id)
test_description = 'update asset description successfully!'
asset_obj.description = test_description
self.OMS.update_asset(asset_obj)
asset_obj = self.OMS.read_asset(asset_id)
msg = 'step ' + str(step_number) + ' description update failed'
self.assertEqual(asset_obj.description, test_description, msg=msg)
picklist = []
picklist = self.OMS.get_assets_picklist(id_only='False')
self.assertEqual(1, len(picklist), msg='should have 1 item(s) in pick list; assert failed')
if verbose: log.debug('\n\n[unit] step %d UPDATED asset_obj.alt_ids: %s', step_number, asset_obj.alt_ids)
self.assertEqual(1, len(asset_obj.alt_ids), msg='one and only one altid permitted for Asset resources')
value = asset_obj.alt_ids[0]
self.assertEqual('Asset:AssetUpdateTest', value, msg='alt_id assigned not what was expected (Asset:AssetUpdateTest)')
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] failed Step %d ', step_number, exc_info=True)
raise
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 2. Create second Asset with res.name == 'AssetUpdateTest'
# create asset
# Expect asset to be created and altid of 'Asset:(res.name)-12345' where 12345 are asset_obj._id[:5]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
log.debug('\n\n[unit] Step 2. Create second Asset with res.name == \'AssetUpdateTest\'')
step_number += 1
asset_obj = IonObject(RT.Asset, name='AssetUpdateTest', description='second AssetUpdateTest, push same attr_key_name...')
asset_id = self.OMS.create_asset(asset_obj, asset_type_id) # test association
asset_obj = self.OMS.read_asset(asset_id)
asset_associations = self.container.resource_registry.find_associations(anyside=asset_id, id_only=True)
self.assertEqual(1, len(asset_associations))
if verbose: log.debug('\n\n[unit] show asset_obj.alt_ids: %s', asset_obj.alt_ids)
test_description = 'step ' + str(step_number)
asset_obj.description = test_description
asset_obj.asset_attrs = attributes
self.OMS.update_asset(asset_obj)
asset_obj = self.OMS.read_asset(asset_id)
msg = 'step ' + str(step_number) + ' description update failed'
alt_id_name = RT.Asset + ":" + asset_obj.name + '-' + asset_obj._id[:5]
self.assertEqual(asset_obj.description, test_description, msg=msg)
picklist = []
picklist = self.OMS.get_assets_picklist(id_only='False')
self.assertEqual(2, len(picklist), msg='should have 2 item(s) in pick list; assert failed')
self.assertEqual(1, len(asset_obj.alt_ids), msg='should have 1 and only one item in alt_ids')
self.assertEqual(asset_obj.alt_ids[0], alt_id_name, msg='alt_id assigned not equal to expected')
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] failed Step %d ', step_number, exc_info=True)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 3. Create second Asset with res.name == 'AssetUpdateTest'
# create asset
# Expect asset to be created and altid of 'Asset:(res.name)-12345' where 12345 are asset_obj._id[:5]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
step_number += 1
log.debug('\n\n[unit] Step %d. Create third Asset with res.name == \'AssetUpdateTest\'', step_number)
asset_obj = IonObject(RT.Asset, name='AssetUpdateTest', description='description...')
asset_id = self.OMS.create_asset(asset_obj, asset_type_id) # test association
asset_obj = self.OMS.read_asset(asset_id)
test_description = 'step ' + str(step_number)
asset_obj.description = test_description
asset_obj.asset_attrs = attributes
self.OMS.update_asset(asset_obj)
asset_obj = self.OMS.read_asset(asset_id)
msg = 'step ' + str(step_number) + ' description update failed'
alt_id_name = RT.Asset + ":" + asset_obj.name + '-' + asset_obj._id[:5]
self.assertEqual(asset_obj.description, test_description, msg=msg)
picklist = []
picklist = self.OMS.get_assets_picklist(id_only='False')
self.assertEqual(3, len(picklist), msg='should have 3 item(s) in pick list; assert failed')
self.assertEqual(1, len(asset_obj.alt_ids), msg='should have 1 and only one item in alt_ids')
self.assertEqual(asset_obj.alt_ids[0], alt_id_name, msg='should have 1 and only one item in alt_ids')
unique = self.unique_altids(RT.Asset)
if unique != True:
log.debug('\n\n[unit] duplicate altids found')
raise
else:
log.debug('\n\n[unit] all altids unique')
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] failed Step %d ', step_number, exc_info=True)
raise # raise here to fail test case
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 4. Create Asset with res.name == 'AssetUpdateTest' (negative test)
# create asset
# Expect asset to be created and altid of 'Asset:(res.name)-12345' where 12345 are asset_obj._id[:5]
# Clear alt_ids, set fake altid with inconsistent namespace and issue update_asset - expect failure
# Error message:
# 'BadRequest: 400 - alt_id provided has invalid namespace (EventDuration); expected Asset'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
step_number += 1
log.debug('\n\n[unit] Step %d. Create Asset with res.name == \'AssetUpdateTest\' (expect failure)', step_number)
asset_obj = IonObject(RT.Asset, name='AssetUpdateTest', description='description...')
asset_id = self.OMS.create_asset(asset_obj, asset_type_id) # test association
asset_obj = self.OMS.read_asset(asset_id)
test_description = 'step ' + str(step_number)
asset_obj.description = test_description
asset_obj.alt_ids = []
fake_altid = RT.EventDuration + ':AssetUpdateTest' + asset_obj._id[:5]
asset_obj.alt_ids.append(fake_altid)
asset_obj.asset_attrs = attributes
self.OMS.update_asset(asset_obj)
asset_obj = self.OMS.read_asset(asset_id)
msg = 'step ' + str(step_number) + ' description update failed'
alt_id_name = RT.Asset + ":" + asset_obj.name + '-' + asset_obj._id[:5]
self.assertEqual(asset_obj.description, test_description, msg=msg)
picklist = []
picklist = self.OMS.get_assets_picklist(id_only='False')
if verbose: log.debug('\n\n[unit] asset picklist: %s', picklist)
self.assertEqual(3, len(picklist), msg='should have 3 item(s) in pick list; assert failed')
self.assertEqual(1, len(asset_obj.alt_ids), msg='should have 1 and only one item in alt_ids')
self.assertEqual(asset_obj.alt_ids[0], alt_id_name, msg='should have 1 and only one item in alt_ids')
if verbose: log.debug('\n\n[unit] step %d UPDATED asset_obj.alt_ids: %s', step_number, asset_obj.alt_ids)
if verbose: log.debug('\n\n[unit] step %d picklist: %s', step_number, picklist)
unique = self.unique_altids(RT.Asset)
if unique != True:
log.debug('\n\n[unit] duplicate altids found')
else:
log.debug('\n\n[unit] all altids unique')
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] failed Step %d ', step_number, exc_info=True)
#raise # raise here to fail test case
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Step 5. Create Asset with res.name == 'AssetUpdateTest' (negative test)
# create asset
# Expect asset to be created and altid of 'Asset:(res.name)-12345' where 12345 are asset_obj._id[:5]
# Leave alt_ids, add another altid (with consistent namespace) and issue update_asset - expect failure
# Error message:
# 'BadRequest: 400 - marine tracking resources require one and only one unique alt_id value'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
step_number += 1
log.debug('\n\n[unit] Step %d. Create Asset with res.name == \'AssetUpdateTest\' (expect failure)', step_number)
asset_obj = IonObject(RT.Asset, name='AssetUpdateTest', description='description...')
fake_altid = RT.Asset + ':AssetUpdateTest-1'
asset_obj.alt_ids.append(fake_altid)
asset_id = self.OMS.create_asset(asset_obj, asset_type_id) # test association
asset_obj = self.OMS.read_asset(asset_id)
test_description = 'step ' + str(step_number)
asset_obj.description = test_description
if verbose: log.debug('\n\n[unit] existing altids: %s', asset_obj.alt_ids)
fake_altid = RT.Asset + ':AssetUpdateTest-2' #+ asset_obj._id[:5]
asset_obj.alt_ids.append(fake_altid)
asset_obj.asset_attrs = attributes
self.OMS.update_asset(asset_obj)
asset_obj = self.OMS.read_asset(asset_id)
msg = 'step ' + str(step_number) + ' description update failed'
alt_id_name = RT.Asset + ":" + asset_obj.name + '-' + asset_obj._id[:5]
self.assertEqual(asset_obj.description, test_description, msg=msg)
picklist = []
picklist = self.OMS.get_assets_picklist(id_only='False')
if verbose: log.debug('\n\n[unit] asset picklist: %s', picklist)
self.assertEqual(3, len(picklist), msg='should have 3 item(s) in pick list; assert failed')
self.assertEqual(1, len(asset_obj.alt_ids), msg='should have 1 and only one item in alt_ids')
self.assertEqual(asset_obj.alt_ids[0], alt_id_name, msg='should have 1 and only one item in alt_ids')
if verbose: log.debug('\n\n[unit] step %d UPDATED asset_obj.alt_ids: %s', step_number, asset_obj.alt_ids)
if verbose: log.debug('\n\n[unit] step %d picklist: %s', step_number, picklist)
unique = self.unique_altids(RT.Asset)
if unique != True:
log.debug('\n\n[unit] duplicate altids found')
else:
log.debug('\n\n[unit] all altids unique')
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] failed Step %d ', step_number, exc_info=True)
#raise # raise here to fail test case
log.debug('\n\n***** Completed: test_asset_update_and_altid')
# -----
# ----- UNIT TEST: test_create_event_duration_extension
# -----
@attr('UNIT', group='sa')
def test_create_event_duration_extension(self):
log.debug('\n\n***** Start : test_create_event_duration_extension')
# Create EventDurationType with 2 AttributeSpecifications
# Create an EventDuration with 2 attributes
# EventDurationType [assign] EventDuration
# Create EventDurationExtension(using EventDuration id)
# Show associations for EventDurationExtension (verify one is displayed)
# Cleanup
verbose = True
# ----- Create EventDurationType object
ion_ed_type = IonObject(RT.EventDurationType, name='Test EventDurationType')
ed_type_id = self.OMS.create_event_duration_type(ion_ed_type)
ed_type_obj = self.OMS.read_event_duration_type(ed_type_id)
# Create AttributeSpecification 1
attribute_specification = self._create_attribute_specification('StringValue', 'operator name', ed_type_obj.name,None,None,None)
ed_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_event_duration_type(ed_type_obj)
# Create AttributeSpecification 2
attribute_specification = self._create_attribute_specification('RealValue', 'operator height', ed_type_obj.name,None,None,None)
ed_type_obj = self.OMS.read_event_duration_type(ed_type_id)
ed_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_event_duration_type(ed_type_obj)
ed_type_obj = self.OMS.read_event_duration_type(ed_type_id)
# ----- Create EventDuration
ed_obj = IonObject(RT.EventDuration, name='Test EventDuration')
ed_id = self.OMS.create_event_duration(ed_obj, ed_type_id)
ed_obj = self.OMS.read_event_duration(ed_id)
# Create Attribute for EventDuration; update EventDuration
if verbose: log.debug('\n\n***** creating attributes...')
attribute = self._create_attribute( 'StringValue', 'operator name', 'unique sysid')
ed_obj.event_duration_attrs[attribute['name']] = attribute
attribute = self._create_attribute( 'RealValue', 'operator height', 3.0)
ed_obj.event_duration_attrs[attribute['name']] = attribute
if verbose: log.debug('\n\n***** update_event_duration')
if verbose: log.debug('\n\n***** ed_obj: %s', ed_obj)
self.OMS.update_event_duration(ed_obj)
if verbose: log.debug('\n\n***** read_event_duration')
ed_obj = self.OMS.read_event_duration(ed_id)
# ----- assign EventDurationType to EventDuration
#self.OMS.assign_event_duration_type_to_event_duration(ed_type_id, ed_id)
#if verbose: log.debug('\n\n***** Create Association: EventDuration (predicate=PRED.implementsEventDurationType) EventDurationType')
# - - - - - - - - - - - - - - - - - - - -
# Create an EventDurationExtension (using EventDuration id)
ee = self.OMS.get_event_duration_extension(ed_id)
if verbose: log.debug('\n\n***** Create and Display EventDurationExtension: %s', ee)
if verbose: log.debug('\n\n*****\n***** Note: EventDurationExtensionID: %s, EventDurationID: %s', ee._id, ed_id)
# ----- Review Associations (shows an association between EventDuration and EventDurationType)
if verbose: log.debug('\n\n***** Review Associations')
extension_associations = self.container.resource_registry.find_associations(anyside=ee._id, id_only=False)
if verbose: log.debug('\n\n***** Extension Associations(%d): %s ',
len(extension_associations),extension_associations)
# ----- Clean up
if verbose: log.debug('\n\n***** Cleanup........')
self.OMS.unassign_event_duration_type_from_event_duration(ed_type_id, ed_id)
self.OMS.force_delete_event_duration_type(ed_type_id)
self.OMS.force_delete_event_duration(ed_id)
log.debug('\n\n***** Completed: test_create_event_duration_extension')
# -----
# ----- UNIT TEST: test_create_event_duration_extension_with_prepare
# -----
@attr('UNIT', group='sa')
def test_create_event_duration_extension_with_prepare(self):
log.debug('\n\n***** Start : test_create_event_duration_extension_with_prepare')
# Create EventDurationType with 2 AttributeSpecifications
# Create an EventDuration with 2 attributes
# EventDurationType [assign] EventDuration
# Create EventDurationExtension(using EventDuration id)
# Show associations for EventDurationExtension (verify one is displayed)
# Cleanup
verbose = False
# ----- Create EventDurationType object
ion_ed_type = IonObject(RT.EventDurationType, name='Test EventDurationType')
ed_type_id = self.OMS.create_event_duration_type(ion_ed_type)
ed_type_obj = self.OMS.read_event_duration_type(ed_type_id)
# Create AttributeSpecification 1
attribute_specification = self._create_attribute_specification('StringValue', 'operator name', ed_type_obj.name,None,None,None)
ed_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_event_duration_type(ed_type_obj)
# Create AttributeSpecification 2
attribute_specification = self._create_attribute_specification('RealValue', 'operator height', ed_type_obj.name,None,None,None)
ed_type_obj = self.OMS.read_event_duration_type(ed_type_id)
ed_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_event_duration_type(ed_type_obj)
ed_type_obj = self.OMS.read_event_duration_type(ed_type_id)
# ----- Create EventDuration
ed_obj = IonObject(RT.EventDuration, name='Test EventDuration')
ed_id = self.OMS.create_event_duration(ed_obj, ed_type_id) # add association on create
ed_obj = self.OMS.read_event_duration(ed_id)
if verbose: log.debug('\n\n***** Review Associations from Create')
associations = self.container.resource_registry.find_associations(anyside=ed_id, id_only=False)
if verbose: log.debug('\n\n***** Associations(%d): %s ',len(associations),associations)
self.assertEqual(1, len(associations), msg='association created at time of event duration create')
# hook
if verbose: log.debug('\n\n***** creating attributes...')
# Create Attribute for EventDuration; update EventDuration
attribute = self._create_attribute( 'StringValue', 'operator name', 'unique sysid')
ed_obj.event_duration_attrs[attribute['name']] = attribute
attribute = self._create_attribute( 'RealValue', 'operator height', 51.5)
ed_obj.event_duration_attrs[attribute['name']] = attribute
self.OMS.update_event_duration(ed_obj)
ed_obj = self.OMS.read_event_duration(ed_id)
# ----- assign EventDurationType to EventDuration
#self.OMS.assign_event_duration_type_to_event_duration(ed_type_id, ed_id)
#if verbose: log.debug('\n\n***** Create Association: EventDuration (predicate=PRED.implementsEventDurationType) EventDurationType')
# - - - - - - - - - - - - - - - - - - - -
# Create an EventDurationExtension (using EventDuration id)
ee = self.OMS.get_event_duration_extension(ed_id)
if verbose: log.debug('\n\n***** Create and Display EventDurationExtension: %s', ee)
if verbose: log.debug('\n\n*****\n***** Note: EventDurationExtensionID: %s, EventDurationID: %s', ee._id, ed_id)
# - - - - - - - - - - - - - - - - - - - -
# Create an AssetPrepareSupport (using Asset id)
edps = self.OMS.prepare_event_duration_support(ed_id)
if verbose: log.debug('\n\n OMS.prepare_event_duration_support returned with %s', str(edps))
# ----- Review Associations (shows an association between EventDuration and EventDurationType)
if verbose: log.debug('\n\n***** Review Associations')
extension_associations = self.container.resource_registry.find_associations(anyside=ee._id, id_only=False)
if verbose: log.debug('\n\n***** Extension Associations(%d): %s ',
len(extension_associations),extension_associations)
# ----- Clean up
if verbose: log.debug('\n\n***** Cleanup........')
self.OMS.unassign_event_duration_type_from_event_duration(ed_type_id, ed_id)
self.OMS.force_delete_event_duration_type(ed_type_id)
self.OMS.force_delete_event_duration(ed_id)
log.debug('\n\n***** Completed: test_create_event_duration_extension_with_prepare')
# -----
# ----- UNIT TEST: test_create_event_duration_type
# -----
@attr('UNIT', group='sa')
def test_create_event_duration_type(self):
log.debug("\n\n***** Start : test_create_event_duration_type")
# ----- create EventDurationType
event_duration_type_obj = IonObject(RT.EventDurationType,
name='TestEventDurationType',
description='a new EventDurationType')
event_duration_type_id = self.OMS.create_event_duration_type(event_duration_type_obj)
event_duration_type = self.OMS.read_event_duration_type(event_duration_type_id)
# Create AttributeSpecification and update EventDurationType
attr_spec_obj = IonObject(OT.AttributeSpecification)
attr_spec_obj['id'] = 's_name'
attr_spec_obj['description'] = 'some description'
attr_spec_obj['value_type'] = 'StringValue'
attr_spec_obj['group_label'] = 'a group_label'
attr_spec_obj['attr_label'] = 'a attr_label'
attr_spec_obj['rank'] = '1.1'
attr_spec_obj['visibility'] = 'True'
attr_spec_obj['value_constraints'] = ''
attr_spec_obj['default_value'] = 'some defaultValue'
attr_spec_obj['uom'] = 'some unitOfMeasure'
attr_spec_obj['value_pattern'] = '[\w - \.]{1,32}'
attr_spec_obj['cardinality'] = '0..1'
attr_spec_obj['editable'] = 'True'
attr_spec_obj['journal'] = 'False'
attr_spec_obj['_source_id'] = 'TestEventDurationType'
event_duration_type.attribute_specifications[attr_spec_obj['id']] = attr_spec_obj
self.OMS.update_event_duration_type(event_duration_type)
# ---- cleanup
self.OMS.force_delete_event_duration_type(event_duration_type_id)
log.debug("\n\n***** Completed: test_create_event_duration_type")
# -----
# ----- UNIT TEST: CreateEventDuration (note: make AttributeSpecification)
# -----
@attr('UNIT', group='sa')
def test_create_event_duration(self):
try:
log.debug('\n\n ***** Start : test_create_event_duration')
verbose = False
# ----- create EventDurationType object and read
event_duration_type_obj = IonObject(RT.EventDurationType, name='TestEventDurationType',
description='new EventDurationType')
event_duration_type_id = self.OMS.create_event_duration_type(event_duration_type_obj)
event_duration_type_obj = self.OMS.read_event_duration_type(event_duration_type_id)
# Create AttributeSpecification for s_name and update EventDurationType
attr_spec_obj = IonObject(OT.AttributeSpecification)
attr_spec_obj['id'] = 's_name'
attr_spec_obj['description'] = 'some description'
attr_spec_obj['value_type'] = 'StringValue'
attr_spec_obj['group_label'] = 'a group_label'
attr_spec_obj['attr_label'] = 'a attr_label'
attr_spec_obj['rank'] = '1.1'
attr_spec_obj['visibility'] = 'True'
attr_spec_obj['value_constraints'] = ''
attr_spec_obj['default_value'] = 'some defaultValue'
attr_spec_obj['uom'] = 'some unitOfMeasure'
attr_spec_obj['value_pattern'] = '[\w - \.]{1,32}'
attr_spec_obj['cardinality'] = '0..1'
attr_spec_obj['editable'] = 'True'
attr_spec_obj['journal'] = 'False'
attr_spec_obj['_source_id'] = 'TestEventDurationType'
event_duration_type_obj.attribute_specifications[attr_spec_obj['id']] = attr_spec_obj
self.OMS.update_event_duration_type(event_duration_type_obj)
event_duration_type_obj = self.OMS.read_event_duration_type(event_duration_type_id)
# Create AttributeSpecification for real value and update EventDurationType
attr_spec_obj = IonObject(OT.AttributeSpecification)
attr_spec_obj['id'] = 'real value'
attr_spec_obj['description'] = 'some description'
attr_spec_obj['value_type'] = 'RealValue'
attr_spec_obj['group_label'] = 'group_label for real value'
attr_spec_obj['attr_label'] = 'attr_label for real value'
attr_spec_obj['rank'] = '1.1'
attr_spec_obj['visibility'] = 'True'
attr_spec_obj['value_constraints'] = ''
attr_spec_obj['default_value'] = '1.0'
attr_spec_obj['uom'] = 'some unitOfMeasure'
attr_spec_obj['value_pattern'] = '\d*\.?\d*'
attr_spec_obj['cardinality'] = '0..1'
attr_spec_obj['editable'] = 'True'
attr_spec_obj['journal'] = 'False'
attr_spec_obj['_source_id'] = 'TestEventDurationType'
event_duration_type_obj.attribute_specifications[attr_spec_obj['id']] = attr_spec_obj
self.OMS.update_event_duration_type(event_duration_type_obj)
event_duration_type_obj = self.OMS.read_event_duration_type(event_duration_type_id)
if verbose: log.debug('\n\n***** \n***** EventDurationType: %s ',event_duration_type_obj)
# ----- create EventDuration object
event_duration_id = ''
event_duration_obj = IonObject(RT.EventDuration,name='EventDuration',description='new EventDuration')
try:
event_duration_id = self.OMS.create_event_duration(event_duration=event_duration_obj,
event_duration_type_id=event_duration_type_id)
except BadRequest, Argument:
log.debug('\n\n *** BadRequest: %s', Argument.get_error_message())
raise BadRequest(Argument.get_error_message())
except NotFound, Argument:
log.debug('\n\n *** NotFound: %s', Argument.get_error_message())
raise NotFound(Argument.get_error_message())
except Inconsistent, Argument:
log.debug('\n\n *** Inconsistent: %s', Argument.get_error_message())
raise Inconsistent(Argument.get_error_message())
except:
log.debug('\n\nfailed to create EventDuration obj with association')
if not event_duration_id:
raise BadRequest('create_event_duration failed to provide event_duration_id')
event_duration_obj = self.OMS.read_event_duration(event_duration_id)
# Populate the attribute(s)
attr_obj = IonObject(OT.Attribute, name='s_name')
attr_obj['name'] = 's_name'
values = []
value = self.create_value('unique sys id')
values.append(value)
value = self.create_value('a super secret high interest value?')
values.append(value)
attr_obj['value'] = values
event_duration_obj.event_duration_attrs[attr_obj['name']] = attr_obj
attr_obj = IonObject(OT.Attribute, name='Attribute real value')
attr_obj['name'] = 'real value'
values = []
value = self.create_value(2.078925)
values.append(value)
value = self.create_value(3.14)
values.append(value)
value = self.create_value(2114.94738)
values.append(value)
attr_obj['value'] = values
event_duration_obj.event_duration_attrs[attr_obj['name']] = attr_obj
self.OMS.update_event_duration(event_duration_obj)
# read EventDuration and verify len of value attribute 'real value' == 3
event_duration_obj = self.OMS.read_event_duration(event_duration_id)
if not event_duration_obj.event_duration_attrs:
raise BadRequest('failed to produce event_duration_attrs on update')
self.assertEqual(2, len(event_duration_obj.event_duration_attrs), msg='should be two attributes')
temp = event_duration_obj.event_duration_attrs['real value']
self.assertEqual(3, len(temp['value']), msg='should have 3 values in attribute[value] field, type RealValue')
# ----- determine associations
id = event_duration_id
asset_event_associations = self.container.resource_registry.find_associations(anyside=id, id_only=False)
# ----- unassign asset associations
self.OMS.unassign_event_duration_type_from_event_duration(event_duration_type_id, event_duration_id)
# ----- cleanup
self.OMS.force_delete_event_duration_type(event_duration_type_id)
self.OMS.force_delete_event_duration(event_duration_id)
log.debug('\n\n***** Completed: test_create_event_duration')
except BadRequest, Argument:
log.debug('\n\n *** BadRequest: %s', Argument.get_error_message())
except NotFound, Argument:
log.debug('\n\n *** NotFound: %s', Argument.get_error_message())
except Inconsistent, Argument:
log.debug('\n\n *** Inconsistent: %s', Argument.get_error_message())
except:
log.debug('\n\nfailed to create EventDuration resource with association')
# -------------------------------------------------------------------------
# ----- UNIT TEST: test_update_attribute_specifications
# -----
@attr('UNIT', group='sa')
def test_update_attribute_specifications(self):
log.debug('\n\n***** Start : * test_update_attribute_specifications')
verbose = False
try:
#---------------------------------------------------------------------------------------
# Update the AssetType AttributeSpecification for a given attribute
#---------------------------------------------------------------------------------------
# ----- Create AssetType object
ion_asset_spec = IonObject(RT.AssetType, name='Test AssetType')
asset_type_id = self.OMS.create_asset_type(ion_asset_spec)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
# Create AttributeSpecification 1
attribute_specification = self._create_attribute_specification('StringValue', 'operator name', asset_type_obj.name,None,None,None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
# Create AttributeSpecification 2
attribute_specification = self._create_attribute_specification('StringValue', 'operator height',asset_type_obj.name,None,None,None)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
# assert two AttributeSpecifications
self.assertEqual(2, len(asset_type_obj.attribute_specifications), msg='should have two AttributeSpecifications')
# Read attribute specifications, modify and update
attribute_specifications = asset_type_obj.attribute_specifications
# Modify those attribute_specification - change description
spec_dict = {}
attribute_specification = attribute_specifications['operator name']
attribute_specification['description'] = 'a new description!'
spec_dict['operator name'] = attribute_specification
attribute_specification = attribute_specifications['operator height']
attribute_specification['description'] = 'operator height - a new description!'
spec_dict['operator height'] = attribute_specification
# add another AttributeSpecification
attr_spec = IonObject(OT.AttributeSpecification)
attr_spec['id'] = 'Attribute New'
attr_spec['description'] = '- - - an AttributeSpecification not currently available to this AssetSpecification'
attr_spec['value_type'] = 'StringValue'
attr_spec['group_label'] = 'Group of New Attributes'
attr_spec['attr_label'] = 'silly label'
attr_spec['rank'] = '1.5'
attr_spec['visibility'] = 'True'
attr_spec['value_constraints'] = ''
attr_spec['value_pattern'] = '[\w - \.]{1,32}'
attr_spec['default_value'] = 'a default StringValue'
attr_spec['uom'] = ''
attr_spec['cardinality'] = '0..1'
attr_spec['editable'] = 'True'
attr_spec['journal'] = 'False'
attr_spec['_source_id'] = 'Test AssetType'
spec_dict[attr_spec['id']] = attr_spec
self.OMS.update_attribute_specifications(resource_id=asset_type_id, spec_dict=spec_dict)
# read updated AssetType attribute_specifications, verify each attribute has been updated
# and a new attribute_specification has been added
xobj = self.OMS.read_asset_type(asset_type_id)
if xobj:
self.assertEqual(3,len(xobj.attribute_specifications), msg='should be three AttributeSpecifications')
# Cleanup
self.OMS.delete_asset_type(asset_type_id)
except:
log.debug('\n\n[unit] test_update_attribute_specifications', exc_info=True)
log.debug('\n\n***** Test Completed: test_update_attribute_specifications')
# -------------------------------------------------------------------------
# ----- UNIT TEST: test_delete_attribute_specification
# -----
#@unittest.skip("targeting")
@attr('UNIT', group='sa')
def test_delete_attribute_specification(self):
log.debug('\n\n***** Start : * test_delete_attribute_specification')
#---------------------------------------------------------------------------------------
# Test service delete_attribute_specification
# 1. Create TypeResource object (AssetType)
# 2. Create and populate with two AttributeSpecifications
# 3. Exercise delete_attribute_specification
# 1. Send in empty list of AttributeSpecification names
# 2. Send in one valid AttributeSpecification name
# 3. Send in one invalid AttributeSpecification name ('junk name')
# 4. Send in last valid AttributeSpecification name
# 5. Send in resource_id for AssetType (with empty attribute_specifications - we deleted them above)
# and request an attribute be deleted.
#---------------------------------------------------------------------------------------
verbose = False
#---------------------------------------------------------------------------------------
# ----- Create TypeResource object (AssetType) and populate with 2 AttributeSpecifications
ion_asset_spec = IonObject(RT.AssetType, name='Test AssetType')
asset_type_id = self.OMS.create_asset_type(ion_asset_spec)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
# Create AttributeSpecification 1
# using attribute_specification = _create_attribute_specification(value_type, id,source, constraints, pattern, codeset_name)
attribute_specification = self._create_attribute_specification('StringValue', 'operator name', asset_type_obj.name,None, None, None)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
# Create AttributeSpecification 2
attribute_specification = self._create_attribute_specification('RealValue', 'operator height', asset_type_obj.name,None, None, None)
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
asset_type_obj.attribute_specifications[attribute_specification['id']] = attribute_specification
self.OMS.update_asset_type(asset_type_obj)
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - -- - - - - - ')
#---------------------------------------------------------------------------------------
# Exercise delete_attribute_specification
# 1. Send in empty list of AttributeSpecification names (receive BadRequest)
try:
attr_name_list = []
self.OMS.delete_attribute_specification(resource_id=asset_type_id, attr_spec_names=attr_name_list)
except BadRequest, Argument:
if verbose: log.debug('\n\n BadRequest: %s', Argument.get_error_message())
except NotFound, Argument:
if verbose: log.debug('\n\n NotFound: %s', Argument.get_error_message())
raise
except:
if verbose: log.debug('\n\n Exception!')
raise
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - -- - - - - - ')
# 2. Send in one valid AttributeSpecification name ('AttributeSpecification TWO')
try:
attr_name_list = ['operator name']
self.OMS.delete_attribute_specification(resource_id=asset_type_id, attr_spec_names=attr_name_list)
except BadRequest, Argument:
if verbose: log.debug('\n\n BadRequest: %s', Argument.get_error_message())
except NotFound, Argument:
if verbose: log.debug('\n\n NotFound: %s', Argument.get_error_message())
except:
if verbose: log.debug('\n\n Exception!')
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - -- - - - - - ')
# 3. Send in one invalid AttributeSpecification name ('junk name') silent
try:
attr_name_list = ['junk name'] # valid AttributeSpecification to delete
self.OMS.delete_attribute_specification(resource_id=asset_type_id, attr_spec_names=attr_name_list)
except BadRequest, Argument:
if verbose: log.debug('\n\n BadRequest: %s', Argument.get_error_message())
except NotFound, Argument:
if verbose: log.debug('\n\n NotFound: %s', Argument.get_error_message())
except:
if verbose: log.debug('\n\n Exception!')
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - -- - - - - - ')
# 4. Send in last valid AttributeSpecification name
try:
attr_name_list = ['operator height'] # valid AttributeSpecification to delete
self.OMS.delete_attribute_specification(resource_id=asset_type_id, attr_spec_names=attr_name_list)
except BadRequest, Argument:
if verbose: log.debug('\n\n BadRequest: %s', Argument.get_error_message())
except NotFound, Argument:
if verbose: log.debug('\n\n NotFound: %s', Argument.get_error_message())
except:
if verbose: log.debug('\n\n Exception!')
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - -- - - - - - ')
# 5. Send in resource_id for AssetSpecification (with empty attribute_specifications)
# and request an attribute be deleted. (receive NotFound)
try:
attr_name_list = ['operator name'] # No AttributeSpecification to delete
self.OMS.delete_attribute_specification(resource_id=asset_type_id, attr_spec_names=attr_name_list)
except BadRequest, Argument:
if verbose: log.debug('\n\n BadRequest: %s', Argument.get_error_message())
raise
except NotFound, Argument:
if verbose: log.debug('\n\n NotFound: %s', Argument.get_error_message())
except:
if verbose: log.debug('\n\n Exception!')
raise
#---------------------------------------------------------------------------------------
# Cleanup
self.OMS.force_delete_asset_type(asset_type_id)
log.debug('\n\n***** Test Completed: test_delete_attribute_specification')
#-------------------------------------------------------
# CodeSpaces, Codes and CodeSets unit tests start...
#-------------------------------------------------------
# -------------------------------------------------------------------------
# UNIT TEST: test_create_codespace
# Exercises following:
# OMS.create_code_space
# OMS.update_code_space
# OMS.read_code_space
# OMS.force_delete_code_space
# OMS.delete_code_space
# OMS.read_codes_by_name returns list of Codes
# OMS.read_codesets_by_name returns list of CodeSets
# OMS.update_codes
# OMS.update_codesets
# OMS.delete_codes returns list of Codes (?) todo: mods per discussion
# OMS.delete_codesets returns list of CodeSets (?)
#
#@unittest.skip("targeting")
@attr('UNIT', group='sa')
def test_create_codespace(self):
log.debug('\n\n***** Start : * test_create_codespace')
#---------------------------------------------------------------------------------------
# Process:
# 1. Create create CodeSpace, Code(s), create two CodeSets using codes
# 2. Request codesets by list of name(s) - one codeset name valid, one codeset name not
# 3. Request codes by list of name(s) - two code names valid, one code name invalid
# 4. Update codes (change description field for 'Repair Event')
# 5. Update codeset - add new code to CodeSpace, then CodeSet
# 6. Delete codes - delete code(s) in CodeSpace (uses list of code names to identify what to delete)
# 7. Delete codesets - delete codeset(s) in CodeSpace (uses list of codeset name(s) to identify what to delete)
# 8. Delete all codesets in CodeSpace
# 9. Cleanup
#---------------------------------------------------------------------------------------
verbose = False
#---------------------------------------------------------------------------------------
# 1. Create create CodeSpace, Code(s), create two CodeSets using codes
#---------------------------------------------------------------------------------------
code_space = IonObject(RT.CodeSpace, name='MAM', description='Marine Asset Management')
id = self.OMS.create_code_space(code_space)
code_space = self.OMS.read_code_space(id)
# - - - Create codes, create CodeSet with name 'DemoCodeSet', add codes to CodeSet
description = ''
name = 'demo code1'
code_space.codes[name] = IonObject(OT.Code,id=str(uuid.uuid4()), name=name, description=description)
name = 'cheese'
code_space.codes[name] = IonObject(OT.Code,id=str(uuid.uuid4()), name=name, description=description)
# Create a code in CodeSpace not utilized w/i any codeset
name = 'unused code'
code_space.codes[name] = IonObject(OT.Code,id=str(uuid.uuid4()), name=name, description=description)
self.OMS.update_code_space(code_space)
code_space = self.OMS.read_code_space(id)
# - - - Create codes and add to CodeSpace (used in CodeSet named 'event_type')
EventTypeCode = ['Return to Manufacturer Event', 'Deployment Event', 'Repair Event',
'Inoperability Event', 'Retirement Event', 'Integration Event', 'Test Event',
'Calibration Event','cheese'] # note 'cheese' is used in TWO codesets
# for all items in EventTypeCode list - if not already a Code, create code in CodeSpace
for name in EventTypeCode:
if name not in code_space.codes.keys():
code_space.codes[name] = IonObject(OT.Code,id=str(uuid.uuid4()), name=name, description=description)
else:
if verbose: log.debug('\n\n code name %s already in code_space', name)
self.OMS.update_code_space(code_space)
code_space = self.OMS.read_code_space(id)
number_of_codesets = len(code_space.codesets)
#log.debug('\n\n cs.codes(%d): %s', len(code_space.codes), code_space.codes)
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
# Create CodeSet 'event type', add list of names to enumeration; add CodeSet to CodeSpace
# CodeSet.enumeration is a list of Code names
try:
codeset1 = IonObject(OT.CodeSet,name='event type',description='Valid codes for EventDuration Attribute: event type.')
#log.debug('\n\n after creating codeset1...')
#log.debug('\n\n code_space.codes.keys()...%s', code_space.codes.keys())
for name in EventTypeCode:
#log.debug('\n\n name %s in EventTypeCode', name)
if name in code_space.codes.keys(): # name for a valid code
#codeset1.enumeration.append(code_space.codes[name])
if name not in codeset1.enumeration: # if name not already in enumeration
codeset1.enumeration.append(name)
#log.debug('\n\n[unit] add name %s to enumeration..', name)
else:
if verbose: log.debug('\n\n[unit] name %s already in enumeration..')
else:
if verbose: log.debug('\n\n name %s not in CodeSpace codes, do not add to enumeration', name)
# add 'cheese' to enumeration for test removal of 'cheese' from two code_sets
#codeset1.enumeration.append('cheese')
code_space.codesets[codeset1.name] = codeset1
self.OMS.update_code_space(code_space)
code_space = self.OMS.read_code_space(id)
# Create CodeSet 'DemoCodeSet', add codes; add CodeSet to CodeSpace
democodeset = IonObject(OT.CodeSet,name='DemoCodeSet',description='second [demo] code set')
#democodeset.enumeration = [code_space.codes['demo code1'], code_space.codes['cheese'] ]
democodeset.enumeration = ['demo code1', 'cheese' ]
code_space.codesets[democodeset.name] = democodeset
self.OMS.update_code_space(code_space)
code_space = self.OMS.read_code_space(id)
self.assertTrue(code_space.codesets[democodeset.name], msg='democodeset.name assert True')
self.assertEqual(2,len(code_space.codesets[democodeset.name].enumeration), msg='len of democodeset.enumeration' )
self.assertEqual(number_of_codesets+2, len(code_space.codesets),msg='number of codesets')
self.assertEqual(9, len(code_space.codesets['event type'].enumeration), msg='len of enumeration')
if verbose: log.debug('\n\n[unit] code_space.codesets: %s', code_space.codesets)
except:
log.debug('\n\n[unit] Failure: to create CodeSpace, Codes or CodeSets')
raise
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 2. Request codesets by list of name(s) - request one codeset name valid, one codeset name not
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
try:
request = []
request.append('event type')
request.append('non existent codeset name')
codesets = self.OMS.read_codesets_by_name(resource_id=id, names=request)
self.assertEqual(1, len(codesets))
except:
log.debug('\n\n[unit] Failure: read_codesets_by_name')
raise
#---------------------------------------------------------------------------------------
# 3. Request codes by list of name(s) - two code names valid, one code name invalid
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
try:
codes = []
request = []
request.append('Repair Event')
request.append('invalid code name')
request.append('Test Event')
codes = self.OMS.read_codes_by_name(resource_id=id, names=request, id_only=False)
self.assertEqual(2, len(codes))
except:
log.debug('\n\n[unit] Failure: read_codes_by_name')
raise
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 4. Update codes
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
try:
cs = self.OMS.read_code_space(id)
updated_description = '*** UPDATED DESCRIPTION ***'
name = 'Repair Event'
code = cs.codes[name]
code.description = updated_description
# Dictionary of codes
codes = {}
codes[code.name] = code
self.OMS.update_codes(id,codes)
cs = self.OMS.read_code_space(id)
self.assertEqual(cs.codes[name].description, updated_description)
except:
log.debug('\n\n[unit] Failure: update_codes')
raise
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 5. Update codeset - add new code to CodeSpace then CodeSet
# (using enumeration as list of Codes, NOT Code names) todo correct this
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
try:
# Make a new code called 'Demo Event' and add to CodeSpace
cs = self.OMS.read_code_space(id)
name = 'Demo Event'
description = ''
cs.codes[name] = IonObject(OT.Code,id=str(uuid.uuid4()), name=name, description=description)
self.OMS.update_code_space(cs)
# Add this code to enumeration for democodeset (add str not code) todo
#democodeset.enumeration.append(cs.codes[name])
democodeset.enumeration.append(name)
# Dictionary of codesets
codesets = {}
codesets[democodeset.name] = democodeset
self.OMS.update_codesets(id,codesets)
cs = self.OMS.read_code_space(id)
self.assertEqual(cs.codes[name].name, name)
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] Failure: update_codesets')
raise
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 6. Delete codes - delete code(s) in CodeSpace
#
# Otherwise, if deleting Code from CodeSpace...
# Determine if code is in use before delete; if in use in a codeset delete from codeset
# update codeset (todo correction per discussion with Matt)
#
# Discuss: code name values and use of codes
# id versus name. Codes are uniquely identified by the id value, not the name value.
# Discuss: do codes require a description field? If not recommend removing (todo?)
# Discuss: CodeSpace revisions - who's got what (delta between CodeSpaces on revision)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
try:
# Modify CodeSet name and description value for code with name 'cheese'
cs = self.OMS.read_code_space(id)
number_of_codes = len(cs.codes)
updated_description = 'french cheese'
updated_name = 'fromage'
# get the 'cheese' code from CodeSpace and modify name and descripition of the
# cheese code used in democodeset.enumeration. (todo make new code with same id, change name)
name = 'cheese'
code = cs.codes[name]
code.name = updated_name
code.description = updated_description
# Dictionary of codesets
codesets = {}
codesets[democodeset.name] = democodeset
self.OMS.update_codesets(id,codesets)
cs = self.OMS.read_code_space(id)
#log.debug('\n\n democodeset.enumeration: %s', cs.codesets[democodeset.name].enumeration)
# Request deletion of one or more codes; some existent, some not and some which
# exist are in use in CodeSets
tcodes = []
tcodes = cs.codes.keys()
if verbose: log.debug('\n\n[unit] codes available before delete(%d): %s', len(cs.codes.keys()),cs.codes.keys())
codes = [] # Names of Codes to be deleted
codes.append('unused code') # should be deleted
codes.append('non existent code') # doesn't exist to delete, but shouldn't fail service (ignore)
codes.append('cheese') # delete: from 2 CodeSet enumerations; Code from CodeSpace
codes.append('unused code') # should already be deleted, service should not fail
codes.append('Demo Event') # delete from DemoCodeSet, leaving one code names in enumeration
codes.append('demo code1') # delete from DemoCodeSet, leaving zero code names in enumeration
# Delete one code from CodeSpace; delete_codes returns (per spec, a list of Codes)
rcodes = []
rcodes = self.OMS.delete_codes(id, codes) # Should return dictionary of codes, not list of codes
names_of_codes = []
if rcodes:
for c in rcodes:
names_of_codes.append(c.name)
#if verbose: log.debug('\n\n***** returned list of codes: %s', rcodes)
if verbose: log.debug('\n\n[unit] names of codes returned(%d): %s', len(names_of_codes),names_of_codes)
cs = self.OMS.read_code_space(id)
self.assertTrue(cs.codes)
if verbose: log.debug('\n\n[unit] a. check number of codes(%d)', len(cs.codes))
# Try to delete a code name from codeset when there is an empty codeset enumeration
codes = [] # Names of Codes to be deleted;
codes.append('Repair Event') # is code in codespace, if so try to delete when
# empty code set enumeration in CodeSpace
rcodes = []
rcodes = self.OMS.delete_codes(id, codes)
cs = self.OMS.read_code_space(id)
if verbose: log.debug('\n\n[unit] b. check number of codes(%d)', len(cs.codes))
if verbose: log.debug('\n\n[unit] AFTER: cs.codes(%d): %s', len(cs.codes),cs.codes)
if verbose: log.debug('\n\n[unit] sample dictionary codesets(%d): %s', len(cs.codesets), cs.codesets)
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] Failure: delete codes')
raise
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 7. Delete codesets - delete codesets(s) in CodeSpace based on list of CodeSet name(s)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
try:
cs = self.OMS.read_code_space(id)
number_of_codesets = len(cs.codesets)
# Delete one existing codeset, if codeset does not exist then verify pass ok
codesets = [] # list CodeSet names to be returned
codesets.append('DemoCodeSet') # exists, should be deleted
codesets.append('non existent codeset') # doesn't exist to delete
rcodesets = [] # list of CodeSets returned
rcodesets = self.OMS.delete_codesets(id, codesets)
if verbose:
if rcodesets:
if verbose: log.debug('\n\n[unit] (after deleting one CodeSet) returned codesets (%d): %s', len(rcodesets), rcodesets)
cs = self.OMS.read_code_space(id)
self.assertEqual((number_of_codesets-1), len(cs.codesets))
except BadRequest, Argument:
log.debug('\n\n[unit] BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n[unit] NotFound: %s', Argument)
except:
log.debug('\n\n[unit] Failure: delete codesets')
raise
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 8. Delete all codesets in CodeSpace
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
try:
cs = self.OMS.read_code_space(id)
if cs.codesets:
if verbose: log.debug('\n\n***** before delete codesets (%d): %s', len(cs.codesets), cs.codesets)
codeset_name_list = cs.codesets.keys()
rcodesets = []
rcodesets = self.OMS.delete_codesets(id, codeset_name_list)
self.assertEqual(0, len(rcodesets))
else:
if verbose: log.debug('\n\n***** No codesets in CodeSpace to delete!')
except BadRequest, Argument:
log.debug('\n\n *** BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n *** NotFound: %s', Argument)
except Argument:
log.debug('\n\n *** Failure: delete all codesets, Argument: %s', Argument)
raise
#---------------------------------------------------------------------------------------
# 9. Cleanup
#---------------------------------------------------------------------------------------
if verbose: log.debug('\n\n - - - - - - - - - - - - - - - - - - -')
try:
if verbose: log.debug('\n\n[unit] force delete CodeSpace...')
self.OMS.force_delete_code_space(id)
#self.OMS.delete_code_space(id)
except BadRequest, Argument:
log.debug('\n\n *** BadRequest: %s', Argument)
except NotFound, Argument:
log.debug('\n\n *** NotFound: %s', Argument)
except Argument:
log.debug('\n\n *** Failure: force delete CodeSpace, Argument: %s', Argument)
raise
log.debug('\n\n***** Test Completed: test_create_codespace')
# -----
# ----- UNIT TEST: test_upload_codes
# -----
@attr('UNIT', group='sa')
def test_upload_codes(self):
# test service declare_asset_tracking_codes
# Load CodeSpace, Codes and CodeSets from xlsx, view resources using localhost:8080 at breakpoints
# Continue to delete resources objects created, use localhost:8080 to observe all have been
# deleted at cleanup.
#
# sample response:
# response:
# {
# 'status': 'ok',
# 'res_modified':
# {
# 'code_spaces': ['3c9b3df056c040b3aa3179aef7d19dd0'],
# 'codes': [],
# 'code_sets': []
# },
# 'err_msg': '',
# 'res_removed': {
# 'code_spaces': [],
# 'codes': [],
# 'code_sets': []
# }
# }
#
log.debug('\n\n***** Start : test_upload_codes')
verbose = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint1B = True
# Input and folder(s) and files for driving test
fid = TEST_XLS_FOLDER + 'CodeSpaces150.xlsx' # CodeSpaces, Codes and CodeSets
code_space_ids = []
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
response = self.load_marine_assets_from_xlsx(fid)
if response:
if verbose: log.debug('\n\n[unit] response: %s', response)
if response['status'] == 'ok' and not response['err_msg']:
if response['res_modified']:
if 'code_spaces' in response['res_modified']:
code_space_ids = response['res_modified']['code_spaces'][:]
if 'codes' in response['res_modified']:
code_names = response['res_modified']['codes'][:]
if 'code_sets' in response['res_modified']:
code_set_names = response['res_modified']['code_sets'][:]
if response['res_removed']:
if 'code_spaces' in response['res_modified']:
code_space_ids = response['res_modified']['code_spaces'][:]
if 'codes' in response['res_modified']:
code_names = response['res_modified']['codes'][:]
if 'code_sets' in response['res_modified']:
code_set_names = response['res_modified']['code_sets'][:]
if code_space_ids:
if len(code_space_ids) == 1:
code_space_obj = self.OMS.read_code_space(code_space_ids[0])
if code_space_obj:
if code_space_obj.codes:
if verbose: log.debug('\n\n code_space_obj.codes: %s\n\n', code_space_obj.codes.keys())
if code_space_obj.codesets:
if verbose: log.debug('\n\n code_space_obj.codes: %s\n\n', code_space_obj.codesets.keys())
else:
if verbose: log.debug('\n\n[service] more than one CodeSpace id returned, issue.')
raise BadRequest('[service] more than one CodeSpace id returned, issue.')
# set breakpoint for testing...
if breakpoint1A:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
if code_space_ids:
if code_space_ids[0]:
self.OMS.force_delete_code_space(code_space_ids[0])
# set breakpoint for testing...code_space should be deleted
if breakpoint1B:
log.debug('\n\n[unit] verify all code_space(s) which have been created are removed.')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', fid, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_codes')
# -----
# ----- UNIT TEST: test_download_codes
# -----
@attr('UNIT', group='sa')
def test_download_codes(self):
# test service(s) - use declare_asset_tracking_codes to declare CodeSpace, Codes and CodeSets
# in the system. Once resources are loaded, call real_download_xls to generate asset tracking report (xls)
# (Note: modifications required in service declare_asset_tracking_codes since addition of CodeSpaces sheet)
log.debug('\n\n***** Start : test_download_codes')
verbose = False
breakpointLoaded = False # after loading marine tracking resources
breakpointCleanup = False # after update pass
breakpointVerifyCleanup = False # after cleanup
# Input and folder(s) and files for driving test
fid = TEST_XLS_FOLDER + 'CodeSpaces150.xlsx'
output_file = TEST_XLS_FOLDER + 'CodeSpaces150_report.xls'
const_code_space_name = "MAM"
code_space_ids = code_ids = code_set_ids = []
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
response = self.load_marine_assets_from_xlsx(fid)
if response:
if verbose: log.debug('\n\n[unit] response: %s', response)
if response['status'] == 'ok' and not response['err_msg']:
if 'code_spaces' in response['res_modified']:
code_space_ids = response['res_modified']['code_spaces'][:]
if code_space_ids:
if len(code_space_ids) != 1:
raise BadRequest('[unit] more than one CodeSpace id returned, issue.')
res_objs, res_keys = self.container.resource_registry.find_resources_ext(alt_id_ns=RT.CodeSpace,
alt_id=const_code_space_name, id_only=False)
if res_keys:
self.assertEqual(1,len(res_keys), msg='more than one codespace key returned')
else:
raise BadRequest('failed to receive codespace_id in response')
# Breakpoint - Marine Asset code related resources loaded into system
if breakpointLoaded:
log.debug('\n\n[unit] Breakpoint - Marine Asset code related resources loaded into system')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# call asset_tracking_report service, report on marine tracking code space related resources in System
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n[unit] request Marine Asset tracking codes report ...\n')
response = self.OMS.asset_tracking_report()
if not response:
log.debug('\n\n[unit] Failed to generate marine asset tracking codes report.')
raise BadRequest('Failed to generate asset tracking codes report')
else:
# receive content from download_xls service, write to file
try:
f = open(output_file, 'wb')
except:
log.error('failed to open xls file for write: ', exc_info=True)
raise
try:
rcontent = binascii.a2b_hex(response)
f.write(rcontent)
f.close()
except:
log.error('[unit] failed to write xls content to output file (%s)', output_file)
log.debug('\n\n[unit] marine asset codes tracking report saved to file: %s\n\n', output_file)
if breakpointCleanup:
log.debug('\n\n[unit] Breakpoint - preparing to delete resources which were created')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Cleanup
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n[unit] cleanup...')
if code_space_ids:
for id in code_space_ids:
self.OMS.force_delete_code_space(id)
# set breakpoint for testing...assets and asset_type should be deleted
if breakpointVerifyCleanup:
log.debug('\n\n[unit] Breakpoint - verify all Marine Asset resources have been removed')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file %s)', fid, exc_info=True)
raise # raise here to fail test case
finally:
log.debug('\n\n***** Completed : test_download_codes')
# -----
# ----- UNIT TEST: test_upload_xls_with_codes
# -----
@attr('UNIT', group='sa')
def test_upload_xls_with_codes(self):
# test service declare_asset_tracking_resources with CodeSpace(s), Codes, or CodeSets loaded
# (Currently only testing single CodeSpace.) This test resembles a typical system engineering workflow,
# where changes are not CodeSpace, Code or CodeSet related but focused on introducing Assets, AssetTypes
# EventDuration and EventDurationTypes into the OOI system.
# Step 1. Load CodeSpaces, Codes and CodeSets only
# Step 2. Load everything except CodeSpaces, Codes and CodeSets
log.debug('\n\n***** Start : test_upload_xls_with_codes')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint_cleanup = False
breakpoint_after_cleanup = False
# Input and folder(s) and files for driving test
fid_codes = TEST_XLS_FOLDER + 'test500-code-related-only.xlsx' # CodeSpaces, Codes and CodeSets
fid = TEST_XLS_FOLDER + 'test500-no-code-related.xlsx' # no CodeSpaces, Codes or CodeSets
current_file = fid_codes
try:
code_space_ids = []
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine asset code related information into system (CodeSpace(s), Code(s), CodeSet(s))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = fid_codes
response = self.load_marine_assets_from_xlsx(fid_codes)
if response:
#if verbose: log.debug('\n\n[unit] response: %s', response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('[unit] Error: %s' % response['err_msg'])
if response['res_modified']:
if 'code_spaces' in response['res_modified']:
code_space_ids = response['res_modified']['code_spaces'][:]
if code_space_ids:
if len(code_space_ids) == 1:
code_space_obj = self.OMS.read_code_space(code_space_ids[0])
if verbose: log.debug('\n\n[unit] code_space_obj.codes: %s\n\n', code_space_obj.codes.keys())
if verbose: log.debug('\n\n[unit] code_space_obj.codes: %s\n\n', code_space_obj.codesets.keys())
if verbose: log.debug('\n\n[unit] codeset[event type].enumeration: %s\n\n',
code_space_obj.codesets['event type']['enumeration'])
elif len(code_space_ids) > 1:
if verbose: log.debug('\n\n[unit] more than one CodeSpace id returned, issue.')
raise BadRequest('[unit] more than one CodeSpace id returned, issue.')
else:
raise BadRequest('[unit] CodeSpace failed to load.')
_, res_keys = self.container.resource_registry.find_resources_ext(alt_id_ns=RT.CodeSpace,
id_only=False)
if res_keys:
if verbose: log.debug('\n\n[unit] res_keys: %s', res_keys)
# set breakpoint for testing...
if breakpoint1A:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = fid
result = self.load_marine_assets_from_xlsx(fid)
if verbose: log.debug('\n\n (pass 2) response: %s', result)
asset_type_ids = result['res_modified']['asset_types'] # ids of resources created
if asset_type_ids:
if verbose: log.debug('\n\n[unit] have %d asset_type_ids: %s', len(asset_type_ids), asset_type_ids)
else:
log.debug('\n\n[unit] Error no asset_types returned!')
raise BadRequest('Error no asset_types returned!')
# set breakpoint for testing...
if breakpoint_cleanup:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Cleanup marine asset resources and CodeSpace
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
total_resources_deleted = 0
asset_type_ids = result['res_modified']['asset_types'][:]
if asset_type_ids:
total_resources_deleted += len(asset_type_ids)
for id in asset_type_ids:
self.OMS.force_delete_asset_type(id)
event_type_ids = result['res_modified']['event_types'][:]
if event_type_ids:
total_resources_deleted += len(event_type_ids)
for id in event_type_ids:
self.OMS.force_delete_event_duration_type(id)
asset_ids = result['res_modified']['assets'][:]
if asset_ids:
total_resources_deleted += len(asset_ids)
for id in asset_ids:
self.OMS.force_delete_asset(id)
event_ids = result['res_modified']['events'][:]
if event_ids:
total_resources_deleted += len(event_ids)
for id in event_ids:
self.OMS.force_delete_event_duration(id)
if code_space_ids:
if verbose: log.debug('\n\n[unit] cleanup...code_space_ids...')
inx = 0
total_resources_deleted += len(code_space_ids)
for code_space_id in code_space_ids:
id = code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
# set breakpoint for testing...assets and asset_type should be deleted
if breakpoint_after_cleanup:
log.debug('\n\n[unit] verify all resources (%d) which have been created are removed.', total_resources_deleted)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_xls_with_codes')
#-------------------------------------------------------
# CodeSpaces, Codes and CodeSets unit tests end...
#-------------------------------------------------------
#------------------------------------------------------------------
# Section: Handle declarations from xlsx spreadsheets (start...)
#------------------------------------------------------------------
# -----
# ----- UNIT TEST: test_empty_workbook
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_empty_workbook(self):
# test OMS service declare_asset_tracking_resources
log.debug('\n\n***** Start : test_empty_workbook')
# Input and folder(s) and files for driving test
fid = TEST_XLS_FOLDER + 'EmptyWorkbook.xlsx' # negative test
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
response = self.load_marine_assets_from_xlsx(fid)
if response:
if response['status'] != 'ok' or response['err_msg']:
log.debug('\n\n[unit] Error: %s' % response['err_msg'])
else:
log.debug('\n\n[unit]Failed test - should have received an err_msg')
raise
except:
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_empty_workbook')
# -----
# ----- UNIT TEST: test_upload_xls
# -----
@attr('UNIT', group='sa')
def test_upload_xls(self):
# test OMS service declare_asset_tracking_resources
log.debug('\n\n***** Start : test_upload_xls')
self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint1B = True
# Input and folder(s) and files for driving test
fid = TEST_XLS_FOLDER + 'test500.xlsx'
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [], [], [], [], []
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
response = self.load_marine_assets_from_xlsx(fid)
if response:
if verbose: log.debug('\n\n[unit] response: %s', response)
if response['status'] != 'ok' or response['err_msg']:
if response['err_msg']:
raise BadRequest('[unit] Error: %s' % response['err_msg'])
elif response['status']:
raise BadRequest('[unit] Error: %s' % response['status'])
else:
raise BadRequest('[unit] Error: err_msg and status not populated')
if response['res_modified']:
#code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if 'codespaces' in response['res_modified']:
code_space_ids = response['res_modified']['codespaces'][:] # ids of resources created
if 'asset_types' in response['res_modified']:
asset_type_ids = response['res_modified']['asset_types'] # ids of resources created
if 'assets' in response['res_modified']:
asset_ids = response['res_modified']['assets'] # ids of resources created
if 'event_types' in response['res_modified']:
event_type_ids = response['res_modified']['event_types'] # ids of resources created
if 'events' in response['res_modified']:
event_ids = response['res_modified']['events'] # ids of resources created
# set breakpoint for testing...
if breakpoint1A:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
self.assertEqual(1, len(code_space_ids), msg='one and only one code space id')
self.assertEqual(4, len(asset_ids), msg='incorrect number - asset_ids')
self.assertEqual(4, len(asset_type_ids), msg='incorrect number - asset_type_ids')
self.assertEqual(8, len(event_ids), msg='incorrect number - event_ids')
self.assertEqual(9, len(event_type_ids), msg='incorrect number - event_type_ids')
code_space = self.OMS.read_code_space(code_space_ids[0])
if not code_space.codes:
raise BadRequest('code_space.codes is empty')
if not code_space.codesets:
raise BadRequest('code_space.codesets is empty')
# cleanup
total_resources_deleted = 0
if asset_type_ids:
total_resources_deleted += len(asset_type_ids)
for id in asset_type_ids:
self.OMS.force_delete_asset_type(id)
if event_type_ids:
total_resources_deleted += len(event_type_ids)
for id in event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if asset_ids:
total_resources_deleted += len(asset_ids)
for id in asset_ids:
self.OMS.force_delete_asset(id)
if event_ids:
total_resources_deleted += len(event_ids)
for id in event_ids:
self.OMS.force_delete_event_duration(id)
if code_space_ids:
inx = 0
total_resources_deleted += len(code_space_ids)
for code_space_id in code_space_ids:
id = code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
# set breakpoint for testing...assets and asset_type should be deleted
if breakpoint1B:
log.debug('\n\n[unit] verify all resources (%d) which have been created are removed.', total_resources_deleted)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', fid, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_xls')
# -----
# ----- UNIT TEST: test_upload_xls_master
# -----
@attr('UNIT', group='sa')
def test_upload_xls_master(self):
# test OMS service declare_asset_tracking_resources
log.debug('\n\n***** Start : test_upload_xls_master')
self._preload_scenario("BETA") # required
verbose = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint1B = True
# Input and folder(s) and files for driving test
fid = TEST_XLS_FOLDER + 'test500_master.xlsx'
code_space_ids = asset_type_ids = asset_ids = event_type_ids = event_ids = []
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
response = self.load_marine_assets_from_xlsx(fid)
if response:
if verbose: log.debug('\n\n[unit] response: %s', response)
if response['status'] != 'ok' or response['err_msg']:
if response['err_msg']:
raise BadRequest('[unit] Error: %s' % response['err_msg'])
elif response['status']:
raise BadRequest('[unit] Error: %s' % response['status'])
else:
raise BadRequest('[unit] Error: err_msg and status not populated')
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if 'codespaces' in response['res_modified']:
code_space_ids = response['res_modified']['codespaces']
if 'asset_types' in response['res_modified']:
asset_type_ids = response['res_modified']['asset_types']
if 'assets' in response['res_modified']:
asset_ids = response['res_modified']['assets']
if 'event_types' in response['res_modified']:
event_type_ids = response['res_modified']['event_types']
if 'events' in response['res_modified']:
event_ids = response['res_modified']['events']
# set breakpoint for testing...
if breakpoint1A:
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# 42 resources created...
self.assertEqual(1, len(code_space_ids), msg='one and only one code space id')
self.assertEqual(9, len(asset_ids), msg='incorrect number - asset_ids')
self.assertEqual(16,len(asset_type_ids), msg='incorrect number - asset_type_ids')
self.assertEqual(7, len(event_ids), msg='incorrect number - event_ids')
self.assertEqual(9, len(event_type_ids), msg='incorrect number - event_type_ids')
code_space = self.OMS.read_code_space(code_space_ids[0])
if not code_space.codes:
raise BadRequest('code_space.codes is empty')
if not code_space.codesets:
raise BadRequest('code_space.codesets is empty')
# cleanup
total_resources_deleted = 0
if asset_type_ids:
total_resources_deleted += len(asset_type_ids)
for id in asset_type_ids:
self.OMS.force_delete_asset_type(id)
if event_type_ids:
total_resources_deleted += len(event_type_ids)
for id in event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if asset_ids:
total_resources_deleted += len(asset_ids)
for id in asset_ids:
self.OMS.force_delete_asset(id)
if event_ids:
total_resources_deleted += len(event_ids)
for id in event_ids:
self.OMS.force_delete_event_duration(id)
if code_space_ids:
inx = 0
total_resources_deleted += len(code_space_ids)
for code_space_id in code_space_ids:
id = code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
# set breakpoint for testing...assets and asset_type should be deleted
if breakpoint1B:
log.debug('\n\n[unit] verify all resources (%d) which have been created are removed.', total_resources_deleted)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', fid, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_xls_master')
# -----
# ----- UNIT TEST: test_download_xls
# -----
@attr('UNIT', group='sa')
def test_download_xls(self):
# test service(s) - use service declare_asset_tracking_resources to declare marine tracking
# resources (instances) in the system. Once resources are loaded, call asset_tracking_report to
# report of all instances in system, including CodeSpace, Codes, and CodeSets (xls)
# Also reported are the event and asset associations
#
# Notes: Verification requirements indicate we must prove every marine tracking resource
# has unique system id. Suggest optional parameter with_ids={False | True} where default False.
# When called with parameter with_ids=True, then an additional column is added to output report for
# AssetTypes, EventDurationTypes, Assets and EventDurations with column name 'Unique ID' populated with
# actual OOI system id (altid) assigned for that resource instance.
#
# Notes: Integration. declare_asset_tracking_resources currently reports all marine tracking
# resources, namely everything in namespaces 'CodeSpaces' (includes Codes and CodeSets), 'AssetType',
# 'EventDurationType', 'Asset' and 'EventDuration'. One can see it to be a reasonable request to ask for
# 'just give me the Assets' or 'just give me the CodeSpaces,etc'- meaning a partial report.
# In addition, it is easy to envision users wanting a report constrained to asset tracking resources
# for their Org. (Partial reports would be most useful when dealing with CodeSpaces, Codes and CodeSets)
log.debug('\n\n***** Start : test_download_xls')
self._preload_scenario("BETA") # required
verbose = True
breakpointLoaded = False
breakpointCleanup = False
breakpointVerifyCleanup = False
interactive = False
if interactive:
verbose = True
breakpointLoaded = True
breakpointCleanup = True
breakpointVerifyCleanup = True
# Input and folder(s) and files for driving test
fid = TEST_XLS_FOLDER + 'test500.xlsx'
outfile = TEST_XLS_FOLDER + 'test500_download_report.xls'
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
current_file = ''
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = fid
response = self.load_marine_assets_from_xlsx(fid)
if response:
if verbose: log.debug('\n\n[unit] response: %s', response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('[unit] Error: %s' % response['err_msg'])
if response['res_modified']:
asset_type_ids = response['res_modified']['asset_types'][:]
asset_ids = response['res_modified']['assets'][:]
event_type_ids = response['res_modified']['event_types'][:]
event_ids = response['res_modified']['events'][:]
# Breakpoint - Marine Asset resources loaded into system
if breakpointLoaded:
log.debug('\n\n[unit] Breakpoint - Marine Asset resources loaded into system')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# call asset_tracking_report service, report on marine tracking resources in System
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if verbose: log.debug('\n\n[unit] request Marine Asset tracking resources ...\n')
response = self.OMS.asset_tracking_report()
if not response:
log.debug('\n\n[unit] Failed to generate marine asset tracking report.')
raise BadRequest('Failed to generate asset tracking report')
else:
# receive content from download_xls service, write to file
try:
f = open(outfile, 'wb')
except:
log.error('failed to open xlsx file for write: ', exc_info=True)
raise
try:
rcontent = binascii.a2b_hex(response)
f.write(rcontent)
f.close()
except:
log.error('[unit] failed to write xls content to output file (%s)', outfile)
log.debug('\n\n[unit] marine asset tracking report saved to file: %s\n\n', outfile)
# load outfile just created; verify same number or resources - check .rev_ on resources
current_file = outfile
response = self.load_marine_assets_from_xlsx(outfile)
if response:
if verbose: log.debug('\n\n[unit] response: %s', response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('[unit] Error: %s' % response['err_msg'])
if breakpointCleanup:
log.debug('\n\n[unit] Breakpoint - preparing to delete resources which were created')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Cleanup
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if asset_type_ids:
for id in asset_type_ids:
self.OMS.force_delete_asset_type(id)
if asset_ids:
for id in asset_ids:
self.OMS.force_delete_asset(id)
if event_type_ids:
for id in event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if event_ids:
for id in event_ids:
self.OMS.force_delete_event_duration(id)
# set breakpoint for testing...assets and asset_type should be deleted
if breakpointVerifyCleanup:
log.debug('\n\n[unit] Breakpoint - verify all Marine Asset resources have been removed')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
finally:
log.debug('\n\n***** Completed : test_download_xls')
# -----
# ----- unit test: test_upload_all_sheets_twice
# -----
@attr('UNIT', group='sa')
def test_upload_all_sheets_twice(self):
# Step 1. load a single spreadsheet with all sheets (test505.xlsx) when there is no CodeSpace instance available
# Step 2. load (again) same spread sheet
log.debug('\n\n***** Start : test_upload_all_sheets_twice')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
try:
code_space_ids = []
pass_count = 1
for fid in input_files:
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s\n\n', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
if response['res_modified']['codespaces']:
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
if response['res_removed']['codespaces']:
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two 'add' again (causing update) of all resources - full load; asserts specifically for this unit test
if pass_count == 2:
# Check uniqueness of alt_ids
unique = self.unique_altids(RT.Asset)
if unique != True:
if verbose: log.debug('\n\n[unit] duplicate Asset altids found')
raise BadRequest('duplicate Asset altids found!')
else:
if verbose: log.debug('\n\n[unit] all Asset altids unique')
picklist = self.OMS.get_assets_picklist(id_only='False')
if verbose: log.debug('\n\n[unit] Assets picklist(%d): %s', len(picklist), picklist)
altids = self.OMS.get_altids(RT.Asset)
if verbose: log.debug('\n\n[unit] Asset altids: %s', altids)
len_altids = 0
squish_len = 0
len_altids = len(altids)
squish_list = []
for item in picklist:
squish_list.append(item[2][0])
if verbose: log.debug('\n\n[unit] Asset squish_list: %s', squish_list)
len_squish = len(list(set(squish_list)))
if verbose: log.debug('\n\n[unit] Asset squish_list: %s', squish_list)
if verbose: log.debug('\n\n[unit] Asset len squish_list: %d', len_squish)
if len_squish != len_altids:
raise BadRequest('failed uniqueness test (squish)')
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
pass_count += 1
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids),msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids),msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted +=len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
log.debug('\n\n[unit] total number of resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_all_sheets_twice')
# -----
# ----- unit test: test_upload_xls_twice
# -----
@attr('UNIT', group='sa')
def test_upload_xls_twice(self):
# test service declare_asset_tracking_resources by calling twice to exercise create and update
# functionality in service.
#
# Scenario: OOI loaded with AssetTypes available to all Orgs, then Org A
# loads xlsx for their Assets using AssetTypes available in system; Org B uses xlsx to load their
# AssetTypes and Assets, maybe reusing existing AssetTypes etc.
#
# Pass 1. Load all resources, including code related.
# Pass 2. Only load some event and event types of xlsx on second load (per scenario outlined above)
log.debug('\n\n***** Start : test_upload_xls_twice')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint1B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-a.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
if response['res_removed']['codespaces']:
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two 'add' again (causing update) of 1 event resource and two (2) event types
# asserts specifically for this unit test
if pass_count == 2:
# response results...
self.assertEqual(1, len(code_space_ids), msg='pass 2: res_modified code_space_ids')
self.assertEqual(1, len(event_ids), msg='pass 2: res_modified event_ids')
self.assertEqual(2, len(event_type_ids), msg='pass 2: res_modified event_type_ids')
# totals summary (duplicates simply indicate 'touched' more than once
self.assertEqual(2, len(sum_code_space_ids), msg='pass 2: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 2: sum_asset_type_ids')
self.assertEqual(9, len(sum_event_ids), msg='pass 2: sum_event_ids')
self.assertEqual(11,len(sum_event_type_ids), msg='pass 2: sum_event_type_ids')
# resources removed...
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify asset tracking instances in system...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
log.debug('\n\n[unit] total number of resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint1B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_xls_twice')
# -----
# ----- unit test: test_upload_new_attribute_specification
# -----
@attr('UNIT', group='sa')
def test_upload_new_attribute_specification(self):
# test service declare_asset_tracking_resources by calling twice to exercise create and update
# functionality in service.
#
# Scenario: OOI loaded with AssetTypes available to all Orgs, then Org A
# loads xlsx for their Assets using AssetTypes available in system; Org B uses xlsx to load their
# AssetTypes and Assets, maybe reusing existing AssetTypes etc.
#
# Pass 1. Load all resources, including code related.
# Pass 2. Load platform type, asset and attr spec with NEW (additional) attribute specification
#
log.debug('\n\n***** Start : test_upload_new_attribute_specification')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
summary = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
summary = True
breakpoint1A = True
breakpoint1B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-new-attribute-specification.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
if response['res_removed']['codespaces']:
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# verify unique AttributeSpecification names in attribute_specifications {}
for id in asset_type_ids:
names = ''
unique_names = ''
type = self.OMS.read_asset_type(id)
if type.attribute_specifications:
if summary: log.debug('\n\n[unit] asset type: %s has %d specs', type.name, len(type.attribute_specifications))
names = type.attribute_specifications.keys()
if names:
unique_names = list(set(names))
if summary:
log.debug('\n\n[unit] len(names): %d len(unique_names): %d', len(names), len(unique_names))
outline = '\n\n '+ type.name + ' unique names...\n'
for uname in unique_names:
outline += uname + '\n'
log.debug('\n\n[unit] %s', outline)
self.assertEqual(len(names),len(unique_names), msg='duplicate names in attribute specification')
# pass two 'add' new attribute specification to platform type resource; asserts specifically for this unit test
if pass_count == 2:
# response results...
self.assertEqual(1, len(code_space_ids), msg='pass 2: res_modified code_space_ids')
self.assertEqual(0, len(event_ids), msg='pass 2: res_modified event_ids')
self.assertEqual(0, len(event_type_ids), msg='pass 2: res_modified event_type_ids')
# totals summary (duplicates simply indicate 'touched' more than once
self.assertEqual(2, len(sum_code_space_ids), msg='pass 2: sum_code_space_ids')
self.assertEqual(5, len(sum_asset_ids), msg='pass 2: sum_asset_ids')
self.assertEqual(7, len(sum_asset_type_ids), msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 2: sum_event_type_ids')
# resources removed...
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 2: del_sum_event_type_ids')
# verify unique AttributeSpecification names in attribute_specifications {}
for id in asset_type_ids:
names = ''
unique_names = ''
type = self.OMS.read_asset_type(id)
if type.attribute_specifications:
if summary: log.debug('\n\n[unit] asset type: %s has %d specs', type.name, len(type.attribute_specifications))
names = type.attribute_specifications.keys()
if names:
unique_names = list(set(names))
if summary:
log.debug('\n\n[unit] len(names): %d len(unique_names): %d', len(names), len(unique_names))
outline = '\n\n '+ type.name + ' unique names...\n'
for uname in unique_names:
outline += uname + '\n'
log.debug('\n\n[unit] %s', outline)
self.assertEqual(len(names),len(unique_names), msg='duplicate names in attribute specification')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify asset tracking instances in system...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total number of resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint1B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] bad request exception')
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] not found exception')
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except Inconsistent, Arguments:
log.debug('\n\n[unit] inconsistent exception')
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.debug('\n\n[unit] general exception')
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_new_attribute_specification')
# -----
# ----- unit test: test_upload_xls_triple_codes
# -----
@attr('UNIT', group='sa')
def test_upload_xls_triple_codes(self):
# test service declare_asset_tracking_resources by calling three times to exercise create and update
# functionality in service - specifically for CodeSpaces, Codes, CodeSets, Events, EventTypes,
# Event attribute specs and event attributes. 'remove' functionality tested for Codes.
#
# Scenario: OOI loaded with AssetTypes available to all Orgs, then Org A
# loads xlsx for their Assets using AssetTypes available in system; Org B uses xlsx to load their
# AssetTypes and Assets, maybe reusing existing AssetTypes etc. Consider delete also, since an AssetType
# available to all Orgs and used by more than zero Orgs when deleted will affect Orgs which use it.
# Thnk about how to handle deletion of types and instances.
#
# This unit test loads three different xlsx spreadsheets to accomplish the following:
# load 1 - load all sheets, including (test505.xslx)
# CodeSpaces, Codes, CodeSets, Assets, Events, AssetTypes EventTypes, Attribute Specs and Attributes
#
# load 2 'add'
# 'add' new codes and codeset for colors (test505-a.xslx);
# modify EventDuration instance ReturnToManufacturer instance attributes:
# 'event description' == 'device damaged by trawler'
# 'recording operator name' == 'Nina Recorder'
# 'RTM return authorization number' == 'RTM-RAN-43'
# (loads sheets CodeSpaces, Codes, CodeSets, Events, EventTypes, Event Attribute Specs and Event Attributes)
#
# load 3 'remove' Code 'pink' from CodeSpace; verify CodeSet 'colors' is updated. (test505-b.xslx; sheets CodeSpaces and Codes)
#
log.debug('\n\n***** Start : test_upload_xls_triple_codes')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint1B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-a.xlsx', 'test505-b.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
#---------------------------------------------------------------------------------
# Pass 1.
# pass one 'add' all resources - full load
# asserts specifically for pass 1 of this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 2.
# pass two 'add' again (causing update) of 1 event resource and two (2) event types
# asserts specifically for pass 2 of this unit test
if pass_count == 2:
# What changed.........
# what changed through action=='add' this pass...
self.assertEqual(1, len(code_space_ids), msg='pass 2: res_modified code_space_ids')
self.assertEqual(1, len(event_ids), msg='pass 2: res_modified event_ids')
self.assertEqual(2, len(event_type_ids), msg='pass 2: res_modified event_type_ids')
self.assertEqual(0, len(asset_ids), msg='pass 2: res_modified asset_ids')
self.assertEqual(0, len(asset_type_ids), msg='pass 2: res_modified asset_type_ids')
# what changed through action=='remove' this pass...
self.assertEqual(0, len(rem_code_space_ids), msg='pass 3: res_removed code_space_ids')
self.assertEqual(0, len(rem_event_ids), msg='pass 3: res_removed event_ids')
self.assertEqual(0, len(rem_event_type_ids), msg='pass 3: res_removed event_type_ids')
self.assertEqual(0, len(rem_asset_ids), msg='pass 2: res_removed asset_ids')
self.assertEqual(0, len(rem_asset_type_ids), msg='pass 2: res_removed asset_type_ids')
#--------------------------------------------------------------
# Verify detailed field changes/updated and removals
#--------------------------------------------------------------
# Verify EventDuration instance has expected modifications:
# Verify modifications to EventDuration instance ReturnToManufacturer instance attributes:
# 'event description' == 'device damaged by trawler'
# 'recording operator name' == 'Nina Recorder'
# 'RTM return authorization number' == 'RTM-RAN-43'
event_obj2 = self.RR2.read(event_ids[0],specific_type=RT.EventDuration)
#log.debug('\n\n[unit] event_obj2: %s', event_obj2)
if event_obj2:
if event_obj2.event_duration_attrs:
if 'event description' in event_obj2.event_duration_attrs:
attr = event_obj2.event_duration_attrs['event description']['value']
#log.debug('\n\n[unit] event description attr: %s', attr)
attr_value = attr[0]['value']
#log.debug('\n\n[unit] event description attr_value: %s', attr_value)
self.assertEqual('device damaged by trawler',
attr_value,
msg='failed to update "event description" ')
if 'recording operator name' in event_obj2.event_duration_attrs:
attr = event_obj2.event_duration_attrs['recording operator name']['value']
attr_value = attr[0]['value']
self.assertEqual('Nina Recorder',
attr_value,
msg='failed to update "recording operator name" ')
if 'RTM return authorization number' in event_obj2.event_duration_attrs:
attr = event_obj2.event_duration_attrs['RTM return authorization number']['value']
attr_value = attr[0]['value']
self.assertEqual('RTM-RAN-43',
attr_value,
msg='failed to update "RTM return authorization number" ')
else:
raise BadRequest('pass count 2: event_duration_attrs empty; should be populated')
else:
raise BadRequest('pass count 2: event_duration object empty; should be populated')
cs = self.OMS.read_code_space(code_space_ids[0])
if cs:
if cs.codesets:
if 'colors' in cs.codesets:
log.debug('\n\n[unit]codeset \'colors\' has been created')
codeset_colors = cs.codesets['colors']
if codeset_colors:
if codeset_colors['enumeration']:
log.debug('\n\n[unit] codespace.codeset[colors] enumeration: %s',
codeset_colors['enumeration'])
else:
raise BadRequest('pass count 2: failed to create codeset \'colors\'; should be populated')
else:
raise BadRequest('pass count 2: cd.codesets empty; should be populated')
else:
raise BadRequest('pass count 2: read_code_space failed to return code_space; should be populated')
# Running totals.....
# totals summary res_modified (duplicates simply indicate 'touched' more than once during multiple passes)
self.assertEqual(2, len(sum_code_space_ids), msg='pass 2: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 2: sum_asset_type_ids')
self.assertEqual(9, len(sum_event_ids), msg='pass 2: sum_event_ids')
self.assertEqual(11,len(sum_event_type_ids), msg='pass 2: sum_event_type_ids')
# totals summary of res_removed - summary of resources removed during multiple passes
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 3.
# pass three 'remove' codeset (causing update) of 1 CodeSpace resource
# asserts specifically for pass 3 of this unit test
if pass_count == 3:
# what changed through action=='add' this pass...
self.assertEqual(1, len(code_space_ids), msg='pass 3: res_modified code_space_ids')
self.assertEqual(0, len(event_ids), msg='pass 3: res_modified event_ids')
self.assertEqual(0, len(event_type_ids), msg='pass 3: res_modified event_type_ids')
self.assertEqual(0, len(asset_ids), msg='pass 3: res_modified asset_ids')
self.assertEqual(0, len(asset_type_ids), msg='pass 3: res_modified asset_type_ids')
# what changed through action=='remove' this pass...
self.assertEqual(0, len(rem_code_space_ids), msg='pass 3: res_removed code_space_ids')
self.assertEqual(0, len(rem_event_ids), msg='pass 3: res_removed event_ids')
self.assertEqual(0, len(rem_event_type_ids), msg='pass 3: res_removed event_type_ids')
self.assertEqual(0, len(rem_asset_ids), msg='pass 3: res_removed asset_ids')
self.assertEqual(0, len(rem_asset_type_ids), msg='pass 3: res_removed asset_type_ids')
#--------------------------------------------------------------
# Verify detailed field changes/updated and removals
#--------------------------------------------------------------
# Verify codeset 'colors' had 'pink' removed from codeset:
cs = self.OMS.read_code_space(code_space_ids[0])
if cs:
if cs.codesets:
if 'colors' in cs.codesets:
log.debug('\n\n[unit]codeset \'colors\' present')
codeset_colors = cs.codesets['colors']
if codeset_colors:
if codeset_colors['enumeration']:
log.debug('\n\n[unit] codespace.codeset[\'colors\'] enumeration: %s',
codeset_colors['enumeration'])
if 'pink' not in codeset_colors['enumeration']:
log.debug('\n\n[unit] \'pink\' successfully removed from codeset \'colors\' enumeration')
else:
raise BadRequest('pass count 3: remove failed - \'pink\' still in codeset \'colors\'')
else:
raise BadRequest('pass count 3: cd.codesets empty; should be populated')
else:
raise BadRequest('pass count 3: read_code_space failed to return code_space; should be populated')
# totals summary resources 'add'ed during multiple passes
self.assertEqual(3, len(sum_code_space_ids), msg='pass 3: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 3: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 3: sum_asset_type_ids')
self.assertEqual(9, len(sum_event_ids), msg='pass 3: sum_event_ids')
self.assertEqual(11,len(sum_event_type_ids), msg='pass 3: sum_event_type_ids')
# totals of resources 'removed'ed during multiple passes
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 3: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 3: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 3: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 3: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 3: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify asset tracking instances in system...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids),msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids),msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total number of resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint1B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_xls_triple_codes')
# -----
# ----- unit test: test_upload_xls_triple_codes_only
# -----
@attr('UNIT', group='sa')
def test_upload_xls_triple_codes_only(self):
# test service declare_asset_tracking_resources by calling three times to exercise create and update
# functionality in service - specifically for CodeSpaces, Codes, CodeSets, Events, EventTypes,
# Event attribute specs and event attributes. 'remove functionality tested for Codes.
#
# Scenario: OOI loaded with AssetTypes available to all Orgs, then Org A
# loads xlsx for their Assets using AssetTypes available in system; Org B uses xlsx to load their
# AssetTypes and Assets, maybe reusing existing AssetTypes etc. Consider delete also, since an AssetType
# available to all Orgs and used by more than zero Orgs when deleted will affect Orgs which use it.
# Thnk about how to handle deletion of types and instances.
#
# This unit test three different xlsx spreadsheets to accomplish the following:
# load 1 - load all sheets, including (test505.xslx)
# CodeSpaces, Codes, CodeSets, Assets, Events, AssetTypes EventTypes, Attribute Specs and Attributes
#
# load 2 'add'
# new codes and codeset for colors (test505-a.xslx);
# modify EventDuration instance ReturnToManufacturer instance attributes:
# 'event description' == 'device damaged by trawler'
# 'recording operator name' == 'Nina Recorder'
# 'RTM return authorization number' == 'RTM-RAN-43'
# (loads sheets CodeSpaces, Codes, CodeSets, Events, EventTypes, Event Attribute Specs and Event Attributes)
#
# load 3 'remove' Code 'pink' from Codespace; verify CodeSet colors is updated. (test505-b.xslx; sheets Codes (only))
# Test the removal of pink from CodeSet colors by removal of code 'pink' fom codes - resulting in
# CodeSet reflecting the removal of enumeration value 'pink'
#
#
log.debug('\n\n***** Start : test_upload_xls_triple_codes_only')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint1B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-a.xlsx', 'test505-c.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
#---------------------------------------------------------------------------------
# Pass 1.
# pass one 'add' all resources - full load; asserts specifically for pass 1 of this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 2.
# pass two 'add' again (causing update) of 1 event resource and two (2) event types
# asserts specifically for pass 2 of this unit test
if pass_count == 2:
# What changed through action=='add' this pass...
self.assertEqual(1, len(code_space_ids), msg='pass 2: res_modified code_space_ids')
self.assertEqual(1, len(event_ids), msg='pass 2: res_modified event_ids')
self.assertEqual(2, len(event_type_ids), msg='pass 2: res_modified event_type_ids')
self.assertEqual(0, len(asset_ids), msg='pass 2: res_modified asset_ids')
self.assertEqual(0, len(asset_type_ids), msg='pass 2: res_modified asset_type_ids')
# what changed through action=='remove' this pass...
self.assertEqual(0, len(rem_code_space_ids), msg='pass 2: res_removed code_space_ids')
self.assertEqual(0, len(rem_event_ids), msg='pass 2: res_removed event_ids')
self.assertEqual(0, len(rem_event_type_ids), msg='pass 2: res_removed event_type_ids')
self.assertEqual(0, len(rem_asset_ids), msg='pass 2: res_removed asset_ids')
self.assertEqual(0, len(rem_asset_type_ids), msg='pass 2: res_removed asset_type_ids')
#--------------------------------------------------------------
# Verify detailed field changes/updated and removals
#--------------------------------------------------------------
# Verify EventDuration instance has expected modifications:
# Verify modifications to EventDuration instance ReturnToManufacturer instance attributes:
# 'event description' == 'device damaged by trawler'
# 'recording operator name' == 'Nina Recorder'
# 'RTM return authorization number' == 'RTM-RAN-43'
event_obj2 = self.RR2.read(event_ids[0],specific_type=RT.EventDuration)
#log.debug('\n\n[unit] event_obj2: %s', event_obj2)
if event_obj2:
if event_obj2.event_duration_attrs:
if 'event description' in event_obj2.event_duration_attrs:
attr = event_obj2.event_duration_attrs['event description']['value']
attr_value = attr[0]['value']
self.assertEqual('device damaged by trawler',
attr_value,
msg='failed to update "event description" ')
if 'recording operator name' in event_obj2.event_duration_attrs:
attr = event_obj2.event_duration_attrs['recording operator name']['value']
attr_value = attr[0]['value']
self.assertEqual('Nina Recorder',
attr_value,
msg='failed to update "recording operator name" ')
if 'RTM return authorization number' in event_obj2.event_duration_attrs:
attr = event_obj2.event_duration_attrs['RTM return authorization number']['value']
attr_value = attr[0]['value']
self.assertEqual('RTM-RAN-43',
attr_value,
msg='failed to update "RTM return authorization number" ')
cs = self.OMS.read_code_space(code_space_ids[0])
if cs:
if cs.codesets:
if 'colors' in cs.codesets:
log.debug('\n\n[unit]codeset \'colors\' has been created')
codeset_colors = cs.codesets['colors']
if codeset_colors:
if codeset_colors['enumeration']:
log.debug('\n\n[unit] codespace.codeset[solors] enumeration: %s',
codeset_colors['enumeration'])
else:
raise BadRequest('pass count 2: cd.codesets empty; should be populated')
else:
raise BadRequest('pass count 2: read_code_space failed to return code_space; should be populated')
# Running totals.....
# totals summary res_modified (duplicates simply indicate 'touched' more than once during multiple passes)
self.assertEqual(2, len(sum_code_space_ids), msg='pass 2: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 2: sum_asset_type_ids')
self.assertEqual(9, len(sum_event_ids), msg='pass 2: sum_event_ids')
self.assertEqual(11,len(sum_event_type_ids), msg='pass 2: sum_event_type_ids')
# totals summary of res_removed - summary of resources removed during multiple passes
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 3.
# pass three 'remove' codeset (causing update) of 1 CodeSpace resource
# asserts specifically for pass 3 of this unit test
if pass_count == 3:
# what changed through action=='add' this pass...
self.assertEqual(1, len(code_space_ids), msg='pass 3: res_modified code_space_ids')
self.assertEqual(0, len(event_ids), msg='pass 3: res_modified event_ids')
self.assertEqual(0, len(event_type_ids), msg='pass 3: res_modified event_type_ids')
self.assertEqual(0, len(asset_ids), msg='pass 3: res_modified asset_ids')
self.assertEqual(0, len(asset_type_ids), msg='pass 3: res_modified asset_type_ids')
# what changed through action=='remove' this pass...
self.assertEqual(0, len(rem_code_space_ids), msg='pass 3: res_removed code_space_ids')
self.assertEqual(0, len(rem_event_ids), msg='pass 3: res_removed event_ids')
self.assertEqual(0, len(rem_event_type_ids), msg='pass 3: res_removed event_type_ids')
self.assertEqual(0, len(rem_asset_ids), msg='pass 3: res_removed asset_ids')
self.assertEqual(0, len(rem_asset_type_ids), msg='pass 3: res_removed asset_type_ids')
#--------------------------------------------------------------
# Verify detailed field changes/updated and removals
#--------------------------------------------------------------
# Verify codeset 'colors' had 'pink' removed from codeset:
cs = self.OMS.read_code_space(code_space_ids[0])
if cs:
if cs.codesets:
if 'colors' in cs.codesets:
log.debug('\n\n[unit]codeset \'colors\' present')
codeset_colors = cs.codesets['colors']
if codeset_colors:
if codeset_colors['enumeration']:
log.debug('\n\n[unit] codespace.codeset[\'colors\'] enumeration: %s',
codeset_colors['enumeration'])
if 'pink' not in codeset_colors['enumeration']:
log.debug('\n\n[unit] \'pink\' successfully removed from codeset \'colors\' enumeration')
else:
raise BadRequest('pass count 3: codesets does not contain codeset \'colors\' ; should be populated')
else:
raise BadRequest('pass count 3: cd.codesets empty; should be populated')
else:
raise BadRequest('pass count 3: read_code_space failed to return code_space; should be populated')
# totals summary resources 'add'ed during multiple passes
self.assertEqual(3, len(sum_code_space_ids), msg='pass 3: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 3: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 3: sum_asset_type_ids')
self.assertEqual(9, len(sum_event_ids), msg='pass 3: sum_event_ids')
self.assertEqual(11,len(sum_event_type_ids), msg='pass 3: sum_event_type_ids')
# totals of resources 'removed'ed during multiple passes
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 3: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 3: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 3: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 3: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 3: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
if verbose: log.debug('\n\n[unit] verify asset tracking instances in system...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total number of resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint1B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_xls_triple_codes_only')
# -----
# ----- unit test: test_upload_remove_codeset
# -----
@attr('UNIT', group='sa')
def test_upload_remove_codeset(self):
# test service declare_asset_tracking_resources by calling four times to exercise create and update
# functionality in service. Also exercise 'remove' action to perform delete. (load 4)
#
# Scenario: OOI loaded with AssetTypes available to all Orgs, then Org A
# loads xlsx for their Assets using AssetTypes available in system; Org B uses xlsx to load their
# AssetTypes and Assets, maybe reusing existing AssetTypes etc.
#
# This unit test three different xlsx spreadsheets to accomplish the following:
# load 1 - load all sheets, including (test505.xslx)
# CodeSpaces, Codes, CodeSets, Assets, Events, AssetTypes EventTypes, Attribute Specs and Attributes
#
# load 2 'add' new codes and codeset for colors (test505-add-codeset.xslx); also modify Event RTM attributes
# (loads sheets CodeSpaces, Codes, CodeSets, Events, EventTypes, Event Attribute Specs and Event Attributes)
#
# load 3 - change CodeSet 'color' change enumeration to not have 'yellow' and 'green' (test505-change-codeset.xlsx)
#
# load 4 'remove' CodeSet 'colors'. (test505-rm-codeset.xslx; sheet CodeSet)
#
log.debug('\n\n***** Start : test_upload_remove_codeset')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint1B = True
# Input: folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-add-codeset.xlsx', 'test505-change-codeset.xlsx', 'test505-rm-codeset.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
#---------------------------------------------------------------------------------
# Pass 1. 'add' all resources - full load; asserts specifically for pass 1 of this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 2. 'add' again (causing update) of 1 event resource and two (2) event types
# asserts specifically for pass 2 of this unit test
if pass_count == 2:
# what changed through action=='add' this pass...
self.assertEqual(1, len(code_space_ids), msg='pass 2: res_modified code_space_ids')
self.assertEqual(0, len(event_ids), msg='pass 2: res_modified event_ids')
self.assertEqual(0, len(event_type_ids), msg='pass 2: res_modified event_type_ids')
self.assertEqual(0, len(asset_ids), msg='pass 2: res_modified asset_ids')
self.assertEqual(0, len(asset_type_ids), msg='pass 2: res_modified asset_type_ids')
# what changed through action=='remove' this pass...
self.assertEqual(0, len(rem_code_space_ids), msg='pass 2: res_removed code_space_ids')
self.assertEqual(0, len(rem_event_ids), msg='pass 2: res_removed event_ids')
self.assertEqual(0, len(rem_event_type_ids), msg='pass 2: res_removed event_type_ids')
self.assertEqual(0, len(rem_asset_ids), msg='pass 2: res_removed asset_ids')
self.assertEqual(0, len(rem_asset_type_ids), msg='pass 2: res_removed asset_type_ids')
#--------------------------------------------------------------
# Verify detailed field changes/updated and removals
#--------------------------------------------------------------
cs = self.OMS.read_code_space(code_space_ids[0])
if cs:
if cs.codesets:
if 'colors' in cs.codesets:
if verbose: log.debug('\n\n[unit]codeset \'colors\' has been created')
codeset_colors = cs.codesets['colors']
if codeset_colors:
if codeset_colors['enumeration']:
if verbose: log.debug('\n\n[unit] codespace.codeset[colors] enumeration: %s',
codeset_colors['enumeration'])
else:
raise BadRequest('pass count 2: codesets does not contain codeset \'colors\' ; should be populated')
else:
raise BadRequest('pass count 2: cd.codesets empty; should be populated')
else:
raise BadRequest('pass count 2: read_code_space failed to return code_space; should be populated')
# Running totals.....
# totals summary res_modified (duplicates simply indicate 'touched' more than once during multiple passes)
self.assertEqual(2, len(sum_code_space_ids), msg='pass 2: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 2: sum_event_ids')
self.assertEqual(9,len(sum_event_type_ids), msg='pass 2: sum_event_type_ids')
# totals summary of res_removed - summary of resources removed during multiple passes
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 3. asserts specifically for pass 3 of this unit test
if pass_count == 3:
# what changed through action=='add' this pass...
self.assertEqual(1, len(code_space_ids), msg='pass 3: res_modified code_space_ids')
self.assertEqual(0, len(event_ids), msg='pass 3: res_modified event_ids')
self.assertEqual(0, len(event_type_ids), msg='pass 3: res_modified event_type_ids')
self.assertEqual(0, len(asset_ids), msg='pass 3: res_modified asset_ids')
self.assertEqual(0, len(asset_type_ids), msg='pass 3: res_modified asset_type_ids')
# what changed through action=='remove' this pass...
self.assertEqual(0, len(rem_code_space_ids), msg='pass 3: res_removed code_space_ids')
self.assertEqual(0, len(rem_event_ids), msg='pass 3: res_removed event_ids')
self.assertEqual(0, len(rem_event_type_ids), msg='pass 3: res_removed event_type_ids')
self.assertEqual(0, len(rem_asset_ids), msg='pass 3: res_removed asset_ids')
self.assertEqual(0, len(rem_asset_type_ids), msg='pass 3: res_removed asset_type_ids')
#--------------------------------------------------------------
# Verify detailed field changes/updated and removals
#--------------------------------------------------------------
# Verify codeset 'colors' had 'pink' removed from codeset:
cs = self.OMS.read_code_space(code_space_ids[0])
if cs:
if cs.codesets:
if 'colors' in cs.codesets:
if verbose: log.debug('\n\n[unit]codeset \'colors\' present')
codeset_colors = cs.codesets['colors']
if codeset_colors:
if codeset_colors['enumeration']:
if verbose: log.debug('\n\n[unit] codespace.codeset[\'colors\'] enumeration: %s',
codeset_colors['enumeration'])
if 'yellow' not in codeset_colors['enumeration']:
if verbose: log.debug('\n\n[unit] \'yellow\' successfully removed from codeset \'colors\' enumeration')
if 'green' not in codeset_colors['enumeration']:
if verbose: log.debug('\n\n[unit] \'green\' successfully removed from codeset \'colors\' enumeration')
else:
raise BadRequest('pass count 3: codesets does not contain codeset \'colors\' ; should be populated')
else:
raise BadRequest('pass count 3: cd.codesets empty; should be populated')
else:
raise BadRequest('pass count 3: read_code_space failed to return code_space; should be populated')
# totals summary resources 'add'ed during multiple passes
self.assertEqual(3, len(sum_code_space_ids), msg='pass 3: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 3: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 3: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 3: sum_event_ids')
self.assertEqual(9,len(sum_event_type_ids), msg='pass 3: sum_event_type_ids')
# totals of resources 'removed'ed during multiple passes
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 3: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 3: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 3: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 3: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 3: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
if verbose: log.debug('\n\n[unit] verify asset tracking instances in system...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint1B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_remove_codeset')
# -----
# ----- unit test: test_upload_without_codespace_instance
# -----
@attr('UNIT', group='sa')
def test_upload_without_codespace_instance(self):
# Step 1. load a single spreadsheet (test505-no-codespace.xlsx) when there is no CodeSpace instance available
# (and no CodeSpace sheet in upload xlsx). Expect to receive this err_msg in response:
#
# 'err_msg': "Unable to locate CodeSpace instance named 'Marine Asset Management'"
#
# Sample response:
# {'res_removed': {'asset_types': [], 'assets': [], 'events': [], 'event_types': []},
# 'status': 'error', 'res_modified': {'assets': [], 'asset_types': [], 'codespaces': [], 'events': [],
# 'event_types': []}, 'err_msg': "Unable to locate CodeSpace instance named 'Marine Asset Management'"}
#
# Step 2. Then load CodeSpaces (only sheet in xlsx; filename: test505-codespace.xlsx).
# Step 3. Load the xlsx which previously failed in step 1 (filename: test505-no-codespace.xlsx).
#
log.debug('\n\n***** Start : test_upload_without_codespace_instance')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint1B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint1B = True
# Folder(s) and files for driving test
input_files= ['test505-no-codespace.xlsx', 'test505-codespace.xlsx', 'test505-no-codespace.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
if pass_count != 1:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
#---------------------------------------------------------------------------------
# Pass 1. 'add' all resources - full load;asserts specifically for pass 1 of this unit test
if pass_count == 1:
self.assertEqual(0, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(0, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(0, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(0, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(0, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 1: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 2. 'add' again (causing update) of 1 event resource and two (2) event types
# asserts specifically for pass 2 of this unit test
if pass_count == 2:
# what changed through action=='add' this pass...
self.assertEqual(1, len(code_space_ids), msg='pass 2: res_modified code_space_ids')
self.assertEqual(0, len(event_ids), msg='pass 2: res_modified event_ids')
self.assertEqual(0, len(event_type_ids), msg='pass 2: res_modified event_type_ids')
self.assertEqual(0, len(asset_ids), msg='pass 2: res_modified asset_ids')
self.assertEqual(0, len(asset_type_ids), msg='pass 2: res_modified asset_type_ids')
# what changed through action=='remove' this pass...
self.assertEqual(0, len(rem_code_space_ids), msg='pass 2: res_removed code_space_ids')
self.assertEqual(0, len(rem_event_ids), msg='pass 2: res_removed event_ids')
self.assertEqual(0, len(rem_event_type_ids), msg='pass 2: res_removed event_type_ids')
self.assertEqual(0, len(rem_asset_ids), msg='pass 2: res_removed asset_ids')
self.assertEqual(0, len(rem_asset_type_ids), msg='pass 2: res_removed asset_type_ids')
#--------------------------------------------------------------
# Verify detailed field changes/updated and removals
#--------------------------------------------------------------
cs = self.OMS.read_code_space(code_space_ids[0])
if cs:
if verbose: log.debug('\n\n[unit] CodeSpace loaded')
# Running totals.....
# totals summary res_modified (duplicates simply indicate 'touched' more than once during multiple passes)
self.assertEqual(1, len(sum_code_space_ids), msg='pass 2: sum_code_space_ids')
self.assertEqual(0, len(sum_asset_ids), msg='pass 2: sum_asset_ids')
self.assertEqual(0, len(sum_asset_type_ids), msg='pass 2: sum_asset_type_ids')
self.assertEqual(0, len(sum_event_ids), msg='pass 2: sum_event_ids')
self.assertEqual(0, len(sum_event_type_ids), msg='pass 2: sum_event_type_ids')
# totals summary of res_removed - summary of resources removed during multiple passes
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 3. 'remove' codeset (causing update) of 1 CodeSpace resource; asserts specifically for pass 3
if pass_count == 3:
# what changed through action=='add' this pass...
self.assertEqual(1, len(code_space_ids), msg='pass 3: res_modified code_space_ids')
self.assertEqual(8, len(event_ids), msg='pass 3: res_modified event_ids')
self.assertEqual(9, len(event_type_ids), msg='pass 3: res_modified event_type_ids')
self.assertEqual(4, len(asset_ids), msg='pass 3: res_modified asset_ids')
self.assertEqual(4, len(asset_type_ids), msg='pass 3: res_modified asset_type_ids')
# what changed through action=='remove' this pass...
self.assertEqual(0, len(rem_code_space_ids), msg='pass 3: res_removed code_space_ids')
self.assertEqual(0, len(rem_event_ids), msg='pass 3: res_removed event_ids')
self.assertEqual(0, len(rem_event_type_ids), msg='pass 3: res_removed event_type_ids')
self.assertEqual(0, len(rem_asset_ids), msg='pass 3: res_removed asset_ids')
self.assertEqual(0, len(rem_asset_type_ids), msg='pass 3: res_removed asset_type_ids')
#--------------------------------------------------------------
# Verify detailed field changes/updated and removals
#--------------------------------------------------------------
# Verify codeset 'colors' had 'pink' removed from codeset:
cs = self.OMS.read_code_space(code_space_ids[0])
if cs:
if cs.codesets:
if 'colors' in cs.codesets:
if verbose: log.debug('\n\n[unit]codeset \'colors\' present')
codeset_colors = cs.codesets['colors']
if codeset_colors:
if codeset_colors['enumeration']:
if verbose: log.debug('\n\n[unit] codespace.codeset[\'colors\'] enumeration: %s',
codeset_colors['enumeration'])
if 'pink' not in codeset_colors['enumeration']:
if verbose: log.debug('\n\n[unit] \'pink\' successfully removed from codeset \'colors\' enumeration')
else:
raise BadRequest('pass count 3: cd.codesets empty; should be populated')
else:
raise BadRequest('pass count 3: read_code_space failed to return code_space; should be populated')
# totals summary resources 'add'ed during multiple passes
self.assertEqual(2, len(sum_code_space_ids), msg='pass 3: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 3: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 3: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 3: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 3: sum_event_type_ids')
# totals of resources 'removed'ed during multiple passes
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 3: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 3: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 3: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 3: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 3: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
if verbose: log.debug('\n\n[unit] verify asset tracking instances in system...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint1B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_without_codespace_instance')
# -----
# ----- unit test: test_upload_xls_triple_assets
# -----
@attr('UNIT', group='sa')
def test_upload_xls_triple_assets(self):
# test service declare_asset_tracking_resources by calling multiple (3) times to exercise create, update
# and remove functionality in service - specifically for CodeSpaces, Codes, CodeSets, Events, EventTypes,
# Event attribute specs and event attributes. 'remove' functionality tested for Assets.
# This unit test uses three different xlsx spreadsheets to accomplish the following:
# load 1 - load all sheets, including (test505.xslx)
# CodeSpaces, Codes, CodeSets, Assets, Events, AssetTypes EventTypes, Attribute Specs and Attributes
#
# load 2 'add' assets, asset types etc (test505-assets.xslx); also modify xxxx
# (loads sheets CodeSpaces, Codes, CodeSets, AssetTypes, Assets, AssetAttributeSpecs, AssetAttributes)
#
# load 3 test 'remove' and modify (add')
# 'remove' AssetType 'Platform' and Asset 'Pioneer 1 Platform'; (test-505-rm-assets.xlsx)
# modify 'Iridium SIM card' attributes:
# s_name (to) 'Updated SIM card' StringValue
# op_stat (to) 'not functioning') StringValue
# exp_date (to) '05/25/2016') DateValue
# weight (to) '121.03' RealValue
#
# (test505-rm-assets.xslx contains sheets: AssetTypes, Assets, AssetAttributeSpecs, AssetAttributes)
#
#
# Review: Removing TypeResources - (rule) a request to remove a type resource will only be honored if the TypeResource
# is not engaged in an association with another TypeResource as the object of the extends. (review and discuss)
#
# Review: Scenario: OOI loaded with AssetTypes available to all Orgs, then Org A
# loads xlsx for their Assets using AssetTypes available in system; Org B uses xlsx to load their
# AssetTypes and Assets, maybe reusing existing AssetTypes etc. Consider delete also, since an AssetType
# available to all Orgs and used by more than zero Orgs when deleted will affect Orgs which use it.
# Review: Think about how to handle deletion of types and instances.
#
# sample response - third pass:
# {
# 'status': 'ok',
# 'err_msg': '',
# 'res_modified': {
# 'assets': ['f217b77115194a88bb73b13128def639', '8c770046dd1046478b8630875d63be2a', '203a82050943455b96feca6c96c14239'],
# 'asset_types': ['8018654cecf54dadbc786d5b77974088', 'b845ed83a5bd42ad977ac10d849f2d7e', '276830c9267b4ec0a4a14121845cd746'],
# 'codespaces': [],
# 'events': [],
# 'event_types': []
# },
# 'res_removed': {
# 'asset_types': ['4d82d3ccf0c84c11bad9c6d2c028b378'],
# 'assets': ['2b0b8781e45c4d8ea449de790c6290d0'],
# 'events': [],
# 'event_types': []
# }
# }
#
log.debug('\n\n***** Start : test_upload_xls_triple_assets')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint3B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint3B = True
# Input and folder(s) and files for driving test
input_files = ['test505.xlsx', 'test505-assets.xlsx', 'test505-rm-assets.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] (pass: %d) response: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('[unit] Error: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
#---------------------------------------------------------------------------------
# Pass 1. 'add' all resources - full load; asserts for pass 1 of this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 1: del_sum_event_type_ids')
if pass_count == 2:
self.assertEqual(1, len(list(set(sum_code_space_ids))), msg='pass 2: sum_code_space_ids')
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))), msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))), msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# check _rev for any asset, should be == 1
if asset_ids:
asset_id = asset_ids[0][:]
asset_obj = self.OMS.read_asset(asset_id)
if asset_obj:
if verbose: log.debug('\n\n[unit] pass count (%d) asset_obj._rev: %s', pass_count, asset_obj._rev)
if asset_obj._rev != '1':
raise BadRequest('_rev (%s) for asset object named \'%s\' should 1' % (asset_obj._rev, asset_obj.name))
if pass_count == 3:
self.assertEqual(1, len(list(set(sum_code_space_ids))), msg='pass 3: sum_code_space_ids')
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 3: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))), msg='pass 3: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 3: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))), msg='pass 3: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 3: del_sum_code_space_ids')
self.assertEqual(1, len(del_sum_asset_ids), msg='pass 3: del_sum_asset_ids')
self.assertEqual(1, len(del_sum_asset_type_ids), msg='pass 3: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 3: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 3: del_sum_event_type_ids')
for id in asset_ids:
asset_obj = None
asset_obj = self.OMS.read_asset(id)
if asset_obj:
if verbose: log.debug('\n\n[unit] (pass %d) name: %s asset_obj._rev: %s', pass_count, asset_obj.name, asset_obj._rev)
# 'Iridium SIM card' should be rev_ == 2; other assets were not updated
if asset_obj.name == 'Iridium SIM card':
if asset_obj._rev != '2':
raise BadRequest('revision for asset object named \'Iridium SIM card\' should 2')
else:
if asset_obj._rev != '1':
raise BadRequest('_rev (%s) for asset object named \'%s\' should 1' % (asset_obj._rev, asset_obj.name))
# set breakpoint for testing...
if breakpoint1A:
if verbose: log.debug('\n\n[unit] verify asset tracking instances in system...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint3B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception file(%s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_xls_triple_assets')
# -----
# ----- unit test: test_upload_xls_triple_assets
# -----
@attr('UNIT', group='sa')
def test_upload_xls_triple_events(self):
# test service declare_asset_tracking_resources by calling three times to exercise create and update
# functionality in service - specifically for CodeSpaces, Codes, CodeSets, Events, EventTypes,
# Event attribute specs and event attributes. 'remove functionality tested for Codes.
#
# Scenario: OOI loaded with AssetTypes available to all Orgs, then Org A
# loads xlsx for their Assets using AssetTypes available in system; Org B uses xlsx to load their
# AssetTypes and Assets, maybe reusing existing AssetTypes etc. Consider delete also, since an AssetType
# available to all Orgs and used by more than zero Orgs when deleted will affect Orgs which use it.
# Think about how to handle deletion of types and instances.
#
# This unit test three different xlsx spreadsheets to accomplish the following:
# load 1 - load all sheets, including (test505.xlsx)
# CodeSpaces, Codes, CodeSets, Assets, Events, AssetTypes EventTypes, Attribute Specs and Attributes
#
# load 2 - 'add' events, event types etc (test505-events.xlsx); also modify Base and RTM type description values;
# also ReturnToManufacturer event description.
# (loads sheets CodeSpaces, Codes, CodeSets, AssetTypes, Assets, AssetAttributeSpecs, AssetAttributes)
#
# load 3 - 'remove' EventDurationType 'Calibration', EventDuration 'Calibration';
# modify 'ReturnToManufacturer' attributes 'event description', 'recording operator name' for update
# (test505-rm-events.xlsx; sheets EventTypes, Events, EventAttributeSpecs, EventAttributes)
#
# Removing TypeResources - (rule) a request to remove a type resource will only be honored if the TypeResource
# is not engaged in an association with another TypeResource as the object of the extends. (OBE)
#
log.debug('\n\n***** Start : test_upload_xls_triple_events')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint3A = False
breakpoint3B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint3A = True
breakpoint3B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-events.xlsx', 'test505-rm-events.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - first pass: %s', response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('[unit] Error: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
#---------------------------------------------------------------------------------
# Pass 1. 'add' all resources - full load; asserts specifically for pass 1 of this unit test
if pass_count == 1:
log.debug('\n\n[unit] sum_code_space_ids: %d', len(sum_code_space_ids))
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
#---------------------------------------------------------------------------------
# Pass 2. 'add' all resources - full load; asserts specifically for pass 1 of this unit test
# set breakpoint for testing...
if pass_count == 2:
self.assertEqual(1, len(list(set(sum_code_space_ids))), msg='pass 2: sum_code_space_ids')
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))), msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))), msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# check _rev for any event duration
if event_ids:
id = event_ids[0][:]
obj = self.OMS.read_event_duration(id)
if obj:
if verbose: log.debug('\n\n[unit] pass count (%d) obj._rev: %s', pass_count, obj._rev)
if obj.name == 'ReturnToManufacturer':
if obj._rev != '2':
raise BadRequest('_rev (%s) for event duration object named \'%s\' should 2' % (obj._rev, obj.name))
elif obj._rev != '1':
raise BadRequest('_rev (%s) for event duration object named \'%s\' should 1' % (obj._rev, obj.name))
#---------------------------------------------------------------------------------
# Pass 3. 'add' all resources - full load; asserts specifically for pass 1 of this unit test
if pass_count == 3:
self.assertEqual(1, len(list(set(sum_code_space_ids))), msg='pass 3: sum_code_space_ids')
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 3: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))), msg='pass 3: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 3: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))), msg='pass 3: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 3: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 3: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 3: del_sum_asset_type_ids')
self.assertEqual(1, len(del_sum_event_ids), msg='pass 3: del_sum_event_ids')
self.assertEqual(1, len(del_sum_event_type_ids), msg='pass 3: del_sum_event_type_ids')
for id in event_ids:
obj = None
obj = self.OMS.read_event_duration(id)
if obj:
if verbose: log.debug('\n\n[unit] (pass %d) name: %s obj._rev: %s', pass_count, obj.name, obj._rev)
# 'ReturnToManufacturer' should be rev_ == 3 (just updated); other resources were not updated
if obj.name == 'ReturnToManufacturer':
if obj._rev != '3':
raise BadRequest('_rev (%s) for event duration object named \'%s\' should 2' % (obj._rev, obj.name))
else:
if obj._rev != '1':
raise BadRequest('_rev (%s) for event duration object named \'%s\' should 1' % (obj._rev, obj.name))
if breakpoint1A:
log.debug('\n\n[unit] verify asset tracking instances in system...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# cleanup
total_resources_to_delete = 0
sum_code_space_ids = list(set(sum_code_space_ids))
sum_asset_ids = list(set(sum_asset_ids))
sum_asset_type_ids = list(set(sum_asset_type_ids))
sum_event_ids = list(set(sum_event_ids))
sum_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(sum_code_space_ids) + len(sum_asset_ids) + len(sum_asset_type_ids) + \
len(sum_event_ids) + len(sum_event_type_ids)
del_sum_code_space_ids = list(set(del_sum_code_space_ids))
del_sum_asset_ids = list(set(del_sum_asset_ids))
del_sum_asset_type_ids = list(set(del_sum_asset_type_ids))
del_sum_event_ids = list(set(del_sum_event_ids))
del_sum_event_type_ids = list(set(del_sum_event_type_ids))
rm_code_space_ids = list(set(sum_code_space_ids) - set(del_sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids) - set(del_sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids) - set(del_sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids) - set(del_sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids) - set(del_sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total resources to delete: %d', total_resources_to_delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
cnt = 1
if rm_event_type_ids:
if verbose: log.debug('\n\n[unit] cleanup...event_duration_types...')
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
cnt += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint3B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_xls_triple_events')
# -----
# ----- unit test: test_upload_twice
# -----
@attr('UNIT', group='sa')
def test_upload_twice(self):
# Step 1. load a single spreadsheet with all sheets (test505.xlsx) when there is no CodeSpace instance available
# Step 2. load (again) same spread sheet
log.debug('\n\n***** Start : test_upload_twice')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_twice')
# -----
# ----- unit test: test_add_new_asset_type
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_add_new_asset_type(self):
# Create a new asset type instance by providing two (2) sheets: AssetTypes and AssetAttributeSpecs
# Step 1. load a single spreadsheet with all sheets (test505.xlsx)
# Step 2. load spread sheet with single asset type and corresponding attribute specification - with base type
# defined in the spread sheet (test505-add-new-asset-type-1.xlsx)
log.debug('\n\n***** Start : test_add_new_asset_type')
verbose = False
breakpoint1A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-add-new-asset-type-1.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xlsx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
#log.debug('\n\n[service] number of unique asset type ids: %d', len(list(set(sum_asset_type_ids))))
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(5, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(5, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_add_new_asset_type')
# -----
# ----- unit test: test_add_new_asset_type
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_add_new_asset_type_wo_base(self):
# Create a new asset type instance by providing two (2) sheets: AssetTypes and AssetAttributeSpecs
# Step 1. load a single spreadsheet with all sheets (test505.xlsx)
# Step 2. load spread sheet with single asset type and corresponding attribute specification - without base type
# being defined in the spread sheet (which the new asset type extends) (test505-add-new-asset-type-2.xlsx)
# # (create new asset type through extend of root type (Base); concrete == False for 'Base'
log.debug('\n\n***** Start : test_add_new_asset_type_wo_base')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-add-new-asset-type-2.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
#log.debug('\n\n[service] number of unique asset type ids: %d', len(list(set(sum_asset_type_ids))))
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(5, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(5, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_add_new_asset_type_wo_base')
# -----
# ----- unit test: test_add_new_asset_type_extend_from_device
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_add_new_asset_type_extend_from_device(self):
# Create a new asset type instance by providing two (2) sheets: AssetTypes and AssetAttributeSpecs
# Step 1. load a single spreadsheet with all sheets (test505.xlsx)
# Step 2. load spread sheet with single asset type and corresponding attribute specification - without base type
# being defined in the spread sheet (which the new asset type extends) (test505-add-new-asset-type-3.xlsx)
# (create new asset type through extend of type resource which is not the root type resource; concrete == True (Device)
log.debug('\n\n***** Start : test_add_new_asset_type_extend_from_device')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505-asset-only.xlsx', 'test505-add-new-asset-type-3.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(0, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(0, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(5, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(0, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(0, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(5, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(0, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(0, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(10, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_add_new_asset_type_extend_from_device')
# -----
# ----- unit test: test_add_new_asset_type_extend_from_platform
# -----
@attr('UNIT', group='sa')
def test_add_new_asset_type_extend_from_platform(self):
# Create a new asset type instance by providing two (2) sheets: AssetTypes and AssetAttributeSpecs
# Step 1. load a single spreadsheet with all sheets (test505.xlsx)
# Step 2. load spread sheet with single asset type and corresponding attribute specification - without base type
# being defined in the spread sheet (which the new asset type extends) (test505-add-new-asset-type-4.xlsx)
# (create new asset type through extend of out leaf type resource and not the root type resource; concrete == True (Platform)
log.debug('\n\n***** Start : test_add_new_asset_type_extend_from_platform')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505-asset-only.xlsx', 'test505-add-new-asset-type-4.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(0, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(0, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
#self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
#self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(5, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(0, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(0, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(5, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(0, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(0, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(10, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_add_new_asset_type_extend_from_platform')
# -----
# ----- unit test: test_add_new_asset_type
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_add_new_event_type(self):
# Create a new event duration type instance by providing two (2) sheets: EventTypes and EventAttributeSpecs
# Step 1. load a single spreadsheet with all sheets (test505.xlsx)
# Step 2. load spread sheet with single asset type and base type (which the new asset type extends)
# corresponding attribute specification (extends Base) (test505-add-new-event-type-1.xlsx)
log.debug('\n\n***** Start : test_add_new_event_type')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-add-new-event-type-1.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
#log.debug('\n\n[service] number of unique asset type ids: %d', len(list(set(sum_asset_type_ids))))
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))), msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(10, len(list(set(sum_event_type_ids))), msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(10, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_add_new_event_type')
# -----
# ----- unit test: test_add_new_event_type_wo_base
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_add_new_event_type_wo_base(self):
# Create a new event duration type instance by providing two (2) sheets: EventTypes and EventAttributeSpecs
# Step 1. load a single spreadsheet with all sheets (test505.xlsx)
# Step 2. load spread sheet with single asset type and base type (which the new asset type extends)
# corresponding attribute specification (extends Base) (test505-add-new-event-type-2.xlsx)
log.debug('\n\n***** Start : test_add_new_event_type_wo_base')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-add-new-event-type-2.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
#log.debug('\n\n[service] number of unique asset type ids: %d', len(list(set(sum_asset_type_ids))))
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))), msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(10, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(10, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_add_new_event_type_wo_base')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Asset, AssetType and Attribute Tests (START)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# -----
# ----- unit test: test_new_asset_base
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_new_asset_base(self):
# Create a new asset ('NewAsset') - no attributes and extends base
# Step 1. load a single spreadsheet with all sheets (test500.xlsx)
# Step 2. load spread sheet with single asset type and base type (which the new asset type extends)
# corresponding attribute specification (extends Base) (test500-add-new-asset-base.xlsx)
log.debug('\n\n***** Start : test_new_asset_base')
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-add-new-asset-base.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_one_asset_ids = response['res_modified']['assets'][:]
if verbose: log.debug('\n\n[unit] Pass %d - pass_one_asset_ids: %s', pass_count, pass_one_asset_ids)
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(5, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_two_asset_ids = response['res_modified']['assets'][:]
new_asset_ids = set(pass_two_asset_ids) - set(pass_one_asset_ids)
list_new_asset_ids = list(new_asset_ids)
self.assertEqual(1, len(list_new_asset_ids), msg='one new asset added in pass two (NewAsset)')
asset_id = list_new_asset_ids[0]
asset_obj = self.OMS.read_asset(asset_id)
attributes = asset_obj.asset_attrs
predicate=PRED.implementsAssetType
associations = self.container.resource_registry.find_associations(subject=asset_id,predicate=predicate,id_only=False)
self.assertEqual(1, len(associations), msg='one and only one associated type resource')
asset_type_id = ''
asset_type_id = associations[0].o
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
base_names = asset_type_obj.attribute_specifications.keys()
attribute_keys = attributes.keys()
self.assertEqual(len(base_names), len(attributes), msg='number of attributes should equal len base attributes')
# verify base attribute specification names are each in newly created NewAsset attributes
for name in base_names:
if name not in attribute_keys:
raise BadRequest('all attribute names in NewAsset must match Base type resource names')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(5, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_new_asset_base')
# ----- unit test: test_new_asset_base_attributes
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_new_asset_base_attributes(self):
# Create a new asset ('NewAsset') - provide attributes and extends base
# Step 1. load a single spreadsheet with all sheets (test500.xlsx)
# Step 2. load spread sheet with two assets - do not provide attributes for 'NewAsset'; expect defaults
# corresponding attribute specification (extends Base) (test500-add-new-asset-base-attributes.xlsx)
log.debug('\n\n***** Start : test_new_asset_base_attributes')
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-add-new-asset-base-attributes.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_one_asset_ids = response['res_modified']['assets'][:]
if verbose: log.debug('\n\n[unit] Pass %d - pass_one_asset_ids: %s', pass_count, pass_one_asset_ids)
# pass two - asserts specifically for this unit test
if pass_count == 2:
#log.debug('\n\n[service] number of unique asset type ids: %d', len(list(set(sum_asset_type_ids))))
self.assertEqual(5, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_two_asset_ids = response['res_modified']['assets'][:]
if not pass_two_asset_ids:
raise BadRequest('failed to return res_modified with assets')
new_asset_ids = set(pass_two_asset_ids) - set(pass_one_asset_ids)
list_new_asset_ids = list(new_asset_ids)
self.assertEqual(1, len(list_new_asset_ids), msg='one new asset added in pass two (NewAsset)')
asset_id = list_new_asset_ids[0]
asset_obj = self.OMS.read_asset(asset_id)
attributes = asset_obj.asset_attrs
predicate = PRED.implementsAssetType
associations = self.container.resource_registry.find_associations(subject=asset_id,predicate=predicate,id_only=False)
self.assertEqual(1, len(associations), msg='one and only one associated type resource')
asset_type_id = ''
asset_type_id = associations[0].o
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
base_names = asset_type_obj.attribute_specifications.keys()
attribute_keys = attributes.keys()
self.assertEqual(len(base_names), len(attributes), msg='number of attributes should equal len base attributes')
# verify base attribute specification names are each in newly created NewAsset attributes
for name in base_names:
if name not in attribute_keys:
raise BadRequest('all attribute names in NewAsset must match Base type resource names')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(5, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_new_asset_base_attributes')
# ----- unit test: test_new_asset_base_attributes_short
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_new_asset_base_attributes_short(self):
# Create a new asset ('NewAsset') - provide attributes and extends base
# Step 1. load a single spreadsheet with all sheets (test500.xlsx)
# Step 2. load spread sheet with two assets - provide attributes for 'NewAsset' except do not provide 'description' value;
# expect defaults. corresponding attribute specification (extends Base) (test500-add-new-asset-base.xlsx)
log.debug('\n\n***** Start : test_new_asset_base_attributes_short')
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-add-new-asset-base-attributes-short.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_one_asset_ids = response['res_modified']['assets'][:]
if verbose: log.debug('\n\n[unit] Pass %d - pass_one_asset_ids: %s', pass_count, pass_one_asset_ids)
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(5, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_two_asset_ids = response['res_modified']['assets'][:]
new_asset_ids = set(pass_two_asset_ids) - set(pass_one_asset_ids)
list_new_asset_ids = list(new_asset_ids)
self.assertEqual(1, len(list_new_asset_ids), msg='one new asset added in pass two (NewAsset)')
asset_id = list_new_asset_ids[0]
asset_obj = self.OMS.read_asset(asset_id)
attributes = asset_obj.asset_attrs
predicate = PRED.implementsAssetType
associations = self.container.resource_registry.find_associations(subject=asset_id,predicate=predicate,id_only=False)
self.assertEqual(1, len(associations), msg='one and only one associated type resource')
asset_type_id = ''
asset_type_id = associations[0].o
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
base_names = asset_type_obj.attribute_specifications.keys()
attribute_keys = attributes.keys()
self.assertEqual(len(base_names), len(attributes), msg='number of attributes should equal len base attributes')
# verify base attribute specification names are each in newly created NewAsset attributes
for name in base_names:
if name not in attribute_keys:
raise BadRequest('all attribute names in NewAsset must match Base type resource names')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(5, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_new_asset_base_attributes_short')
# ----- unit test: test_new_asset_base_attributes_short_update
# -----
unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_new_asset_base_attributes_short_update(self):
# Create a new asset ('NewAsset') - provide attributes and extends base
# Step 1. load a single spreadsheet with all sheets (test500.xlsx)
# Step 2. load spread sheet with two assets - provide attributes for 'NewAsset' provide 'descr' value;
# Step 3. load spread sheet with one asset - provide attributes for 'NewAsset' except do not provide 'descr' value;
# Verify the default value is NOT used but the current descr value (provided in Step 2) is retained.
#
log.debug('\n\n***** Start : test_new_asset_base_attributes_short_update')
verbose = False
breakpoint1A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-add-new-asset-base-attributes.xlsx', 'test500-add-new-asset-base-attributes-short.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
save_asset_id = ''
try:
pass_one_asset_ids = ''
pass_two_asset_ids = ''
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
pass_one_asset_ids = ''
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_one_asset_ids = response['res_modified']['assets'][:]
# pass two - asserts specifically for this unit test
if pass_count == 2:
#log.debug('\n\n[service] number of unique asset type ids: %d', len(list(set(sum_asset_type_ids))))
self.assertEqual(5, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_two_asset_ids = response['res_modified']['assets'][:]
new_asset_ids = set(pass_two_asset_ids) - set(pass_one_asset_ids)
list_new_asset_ids = list(new_asset_ids)
self.assertEqual(1, len(list_new_asset_ids), msg='one new asset added in pass two (NewAsset)')
asset_id = list_new_asset_ids[0]
save_asset_id = asset_id
if verbose: log.debug('\n\n[unit] Pass %d: asset_id: %s', pass_count, asset_id)
asset_obj = self.OMS.read_asset(asset_id)
attributes = asset_obj.asset_attrs
if 'descr' in attributes:
value = attributes['descr']
if verbose: log.debug('\n\n[unit] Pass %d: value of \'descr\' attribute: %s', pass_count, value)
predicate = PRED.implementsAssetType
associations = self.container.resource_registry.find_associations(subject=asset_id,predicate=predicate,id_only=False)
self.assertEqual(1, len(associations), msg='one and only one associated type resource')
asset_type_id = ''
asset_type_id = associations[0].o
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
base_names = asset_type_obj.attribute_specifications.keys()
attribute_keys = attributes.keys()
self.assertEqual(len(base_names), len(attributes), msg='number of attributes should equal len base attributes')
# verify base attribute specification names are each in newly created NewAsset attributes
for name in base_names:
if name not in attribute_keys:
raise BadRequest('all attribute names in NewAsset must match Base type resource names')
# pass three - asserts specifically for this unit test
if pass_count == 3:
if verbose: log.debug('\n\n[unit] Pass %d Description: Review contents of asset \'NewAsset\' attribute \'descr\' ' +
'and determine it matches value from previous pass', pass_count)
if verbose: log.debug('\n\n[unit] Pass %d: asset_id: %s', pass_count, save_asset_id)
asset_obj2 = self.OMS.read_asset(save_asset_id)
attributes2 = asset_obj2.asset_attrs
self.assertEqual(len(base_names), len(attributes2), msg='number of attributes should equal len base attributes')
if 'descr' in attributes2:
value = attributes2['descr']
if verbose: log.debug('\n\n[unit] Pass %d: value of \'descr\' attribute: %s', pass_count, value)
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(5, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_new_asset_base_attributes_short_update')
# ----- unit test: test_add_new_asset_base_one_attribute_update
# -----
#unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_new_asset_base_one_attribute_update(self):
# Create a new asset ('NewAsset') - provide attributes and extends base
# Step 1. load a single spreadsheet with all sheets (test500.xlsx)
# Step 2. load spread sheet with two assets - provide attributes for 'NewAsset' provide 'descr' value
# Step 3. load spread sheet with one asset - provide one attribute for 'NewAsset', the 'descr' value;
# Verify the values provided in Step 2 are used for everything except 'descr' which will have a
# new value (provided in Step 2) is retained.
log.debug('\n\n***** Start : test_new_asset_base_one_attribute_update')
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-add-new-asset-base-attributes.xlsx', 'test500-add-new-asset-base-one_attribute.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
save_asset_id = ''
try:
pass_one_asset_ids = ''
pass_two_asset_ids = ''
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_one_asset_ids = response['res_modified']['assets'][:]
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(5, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_two_asset_ids = response['res_modified']['assets'][:]
# difference between passes should provide single asset id for 'NewAsset'
new_asset_ids = set(pass_two_asset_ids) - set(pass_one_asset_ids)
list_new_asset_ids = list(new_asset_ids)
log.debug('\n\n[unit] list_new_asset_ids: %s', list_new_asset_ids)
self.assertEqual(1, len(list_new_asset_ids), msg='one new asset added in pass two (NewAsset)')
asset_id = list_new_asset_ids[0]
save_asset_id = asset_id
if verbose: log.debug('\n\n[unit] Pass %d: asset_id: %s', pass_count, asset_id)
asset_obj = self.OMS.read_asset(asset_id)
attributes = asset_obj.asset_attrs
if 'descr' in attributes:
value = attributes['descr']
if verbose: log.debug('\n\n[unit] Pass %d: value of \'descr\' attribute: %s', pass_count, value)
predicate = PRED.implementsAssetType
associations = self.container.resource_registry.find_associations(subject=asset_id,predicate=predicate,id_only=False)
self.assertEqual(1, len(associations), msg='one and only one associated type resource')
asset_type_id = ''
asset_type_id = associations[0].o
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
base_names = asset_type_obj.attribute_specifications.keys()
attribute_keys = attributes.keys()
self.assertEqual(len(base_names), len(attributes), msg='number of attributes should equal len base attributes')
# verify base attribute specification names are each in newly created NewAsset attributes
for name in base_names:
if name not in attribute_keys:
raise BadRequest('all attribute names in NewAsset must match Base type resource names')
# pass three
# asserts specifically for this unit test
if pass_count == 3:
if verbose: log.debug('\n\n[unit] Pass %d Description: Review contents of asset \'NewAsset\' (other than attribute \'descr\') ' +
'and determine each matches value from previous pass', pass_count)
if verbose: log.debug('\n\n[unit] Pass %d: asset_id: %s', pass_count, save_asset_id)
asset_obj2 = self.OMS.read_asset(save_asset_id)
attributes2 = asset_obj2.asset_attrs
self.assertEqual(len(attribute_keys), len(attributes2), msg='number of attributes should equal len base attributes')
if 'descr' in attributes2:
value1 = attributes['descr']
value2 = attributes2['descr']
self.assertNotEqual(value1, value2, msg='descr changed, values should differ')
if verbose: log.debug('\n\n[unit] Pass %d: value of \'descr\' attribute: %s', (pass_count-1), value1)
if verbose: log.debug('\n\n[unit] Pass %d: value of \'descr\' attribute: %s', pass_count, value2)
for name in attributes:
value1 = attributes[name]
value2 = attributes2[name]
if name != 'descr':
self.assertEqual(value1, value2, msg='update should not change existing values unless value explicitly provided')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(5, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_new_asset_base_one_attribute_update')
# ----- unit test: test_new_asset_base_one_attribute_no_types
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_new_asset_base_one_attribute_no_types(self):
# Create a new asset ('NewAsset') - provide attributes and extends base
# Step 1. load a single spreadsheet with all sheets (test500.xlsx)
# Step 2. load spread sheet with one asset - provide single 'descr' attribute value for 'NewAsset';
# expect defaults for all attributes other than 'descr'
# NOTE: still have attributespecifications sheet!!
log.debug('\n\n***** Start : test_new_asset_base_one_attribute_no_types')
verbose = False
breakpoint1A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-new-asset-base-one-attribute-no-types.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
pass_one_asset_ids = ''
pass_two_asset_ids = ''
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load
# asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_one_asset_ids = response['res_modified']['assets'][:]
# pass two - asserts specifically for this unit test
if pass_count == 2:
#log.debug('\n\n[service] number of unique asset type ids: %d', len(list(set(sum_asset_type_ids))))
self.assertEqual(5, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_two_asset_ids = response['res_modified']['assets'][:]
new_asset_ids = set(pass_two_asset_ids) - set(pass_one_asset_ids)
list_new_asset_ids = list(new_asset_ids)
self.assertEqual(1, len(list_new_asset_ids), msg='one new asset added in pass two (NewAsset)')
asset_id = list_new_asset_ids[0]
asset_obj = self.OMS.read_asset(asset_id)
attributes = asset_obj.asset_attrs
predicate = PRED.implementsAssetType
associations = self.container.resource_registry.find_associations(subject=asset_id,predicate=predicate,id_only=False)
self.assertEqual(1, len(associations), msg='one and only one associated type resource')
asset_type_id = ''
asset_type_id = associations[0].o
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
base_names = asset_type_obj.attribute_specifications.keys()
attribute_keys = attributes.keys()
self.assertEqual(len(base_names), len(attributes), msg='number of attributes should equal len base attributes')
# verify base attribute specification names are each in newly created NewAsset attributes
for name in base_names:
if name not in attribute_keys:
raise BadRequest('all attribute names in NewAsset must match Base type resource names')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(5, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_new_asset_base_one_attribute_no_types')
# ----- unit test: test_new_asset_base_one_attribute_only
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_new_asset_base_one_attribute_only(self):
# Create a new asset ('NewAsset') - provide attributes and extends base
# Step 1. load a single spreadsheet with all sheets (test500.xlsx)
# Step 2. load spread sheet with (two sheets - Assets, AssetAttributes) providing
# one asset ('NewAsset') and single 'descr' attribute value for 'NewAsset';
# expect defaults for all attributes other than 'descr'
# NOTE: No attribute specifications sheet!!
log.debug('\n\n***** Start : test_new_asset_base_one_attribute_only')
verbose = False
breakpoint1A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-new-asset-base-one-attribute-only.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
pass_one_asset_ids = ''
pass_two_asset_ids = ''
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_one_asset_ids = response['res_modified']['assets'][:]
# pass two - asserts specifically for this unit test
if pass_count == 2:
#log.debug('\n\n[service] number of unique asset type ids: %d', len(list(set(sum_asset_type_ids))))
self.assertEqual(5, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
if response['res_modified']:
if 'assets' in response['res_modified']:
pass_two_asset_ids = response['res_modified']['assets'][:]
new_asset_ids = set(pass_two_asset_ids) - set(pass_one_asset_ids)
list_new_asset_ids = list(new_asset_ids)
self.assertEqual(1, len(list_new_asset_ids), msg='one new asset added in pass two (NewAsset)')
asset_id = list_new_asset_ids[0]
asset_obj = self.OMS.read_asset(asset_id)
attributes = asset_obj.asset_attrs
predicate = PRED.implementsAssetType
associations = self.container.resource_registry.find_associations(subject=asset_id,predicate=predicate,id_only=False)
self.assertEqual(1, len(associations), msg='one and only one associated type resource')
asset_type_id = ''
asset_type_id = associations[0].o
asset_type_obj = self.OMS.read_asset_type(asset_type_id)
base_names = asset_type_obj.attribute_specifications.keys()
attribute_keys = attributes.keys()
self.assertEqual(len(base_names), len(attributes), msg='number of attributes should equal len base attributes')
# verify base attribute specification names are each in newly created NewAsset attributes
for name in base_names:
if name not in attribute_keys:
raise BadRequest('all attribute names in NewAsset must match Base type resource names')
# set breakpoint for testing...
if breakpoint1A:
if verbose: log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(5, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(27, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_new_asset_base_one_attribute_only')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Asset, AssetType and Attribute tests (END)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# -----
# ----- unit test: test_xls_mod_attribute
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_xls_mod_attribute(self):
# Load spreadsheet to populate instances, then perform a second load to modify attribute
# Step 1. load a single spreadsheet with all sheets (test505.xlsx) when there is no CodeSpace instance available
# Step 2. load different spread sheet to modify attribute (test505-mod-attribute.xlsx)
# Modify description attribute for SIM card to be 'Hot pink SIM card!'
# Review: add detailed asserts to check description value after modification
log.debug('\n\n***** Start : test_xls_mod_attribute')
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test505-mod-attribute.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
raise BadRequest('Error in response: %s' % response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
if rm_asset_type_ids:
total_resources_to_delete += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_to_delete += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_to_delete += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_to_delete += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_to_delete += len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] BadRequest (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] NotFound (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_xls_mod_attribute')
# test for development understanding only
#@unittest.skip('targeting - attribute value encoding')
@attr('UNIT', group='sa')
def test_attribute_value_encoding(self):
log.debug('\n\n***** Start : test_attribute_value_encoding')
attr_data = [('muss', True) ,('foo', 5) , ('foo', 6), ('blah', 1.2), ('shu', 'Hello World')]
asset = IonObject(RT.Asset,name="Test Asset")
for k,v in attr_data :
value = self.create_value(v)
if k in asset.asset_attrs :
att = asset.asset_attrs[k]
att.value.append(value)
else:
att = IonObject(OT.Attribute,name=k)
att.value.append(value) # add list in Attribute definition
asset.asset_attrs[k] = att
import json
json_dumps = json.dumps
#Used by json encoder
def ion_object_encoder(obj):
return obj.__dict__
encoding = json_dumps(asset, default=ion_object_encoder, indent=2)
log.debug('\n\n***** Completed : test_attribute_value_encoding')
"""
This is example interface discovery
def create_value(value_type=None,value_str='',value_constraints=[]) :
parsed_value = parse_value(value_str,value_type)
validated_value = constrain_value(parsed_value, value_constraints)
return IonObject(value_type,value=validated_value)
value = create_value(attr_spec.value_type,value_str=input_value_str,attr_spec.value_constraints)
assert(type(value).__name__ == attr_spec.value_type)
def get_attr_value(attr_dict,attr_name,index)
assert(attr_name in)
assert(index within len)
dynamic_attrs[attr_name].value[index].value
"""
# -----
# ----- unit test: test_upload_all_sheets_twice
# -----
@attr('UNIT', group='sa')
def test_upload_event_asset_remove(self):
# Test sheet EventAssetMap - 'add' association (Step 1); 'remove' association (Step 2); more in sheets 3 and 4
# Step 1. load a single spreadsheet with all sheets (test500.xlsx) including sheet EventAssetMap
# Step 2. load (again) but 'remove' instead of 'add' in EventAssetMap (test500-rm-association.xlsx)
# Step 3. load (again) and 'add' association
# Step 4. repeat Step 3, load again and expect failure to add association since it already exists:
# err_msg expected:
# 'Association between 184eaac551524446a1c79eb67ff6e1cc and 42b28e2f1edf426e81eb6b22c7d9af29 with predicate hasVerificationEvent already exists'
log.debug('\n\n***** Start : test_upload_event_asset_remove')
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test500-rm-association.xlsx', 'test500.xlsx', 'test500.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
code_space_ids = []
pass_count = 0
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
if pass_count != 4:
raise BadRequest('Error in response: %s' % response['err_msg'])
else:
log.debug('\n\n[unit] received expected error_msg:\n%s', response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids),msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids),msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids),msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted +=len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total number of resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_event_asset_remove')
# -----
# ----- unit test: test_upload_multiple_event_asset
# -----
@attr('UNIT', group='sa')
def test_upload_multiple_event_asset(self):
# Test sheet EventAssetMap - 'add' association (Step 1); 'remove' association (Step 2); more in sheets 3 and 4
# Step 1. load a single spreadsheet with all sheets (test500.xlsx) includes sheet EventAssetMap
# Step 2. load and add 2 events (remove Test event for Platform, a Verification Category and Repair,
# a Location category) from Instrument 5010 using EventAssetMap (test510.xlsx)
# Step 3. load and add 2 Deployment events (one each to Platform and Instrument 5010) in EventAssetMap (test500-multiple-location-events.xlsx)
# Step 4. load same sheet as in Step 1 and expect error since only one Location event can be assigned at a time and
# err_msg expected:
# 'an association (hasLocationEvent) already exists; cannot assign more than one association of the same type'
log.debug('\n\n***** Start : test_upload_multiple_event_asset')
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test500.xlsx', 'test510.xlsx', 'test500-multiple-location-events.xlsx', 'test500.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
if pass_count != 4:
raise BadRequest('Error in response: %s' % response['err_msg'])
else:
log.debug('\n\n[unit] received expected error_msg:\n%s', response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))), msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))), msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted +=len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total number of resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_upload_multiple_event_asset')
# -----
# ----- unit test: test_deployment_to_multiple_assets
# -----
#@unittest.skip('targeting')
@attr('UNIT', group='sa')
def test_deployment_to_multiple_assets(self):
# Test sheet EventAssetMap - 'add' association (Step 1); 'remove' association (Step 2); more in sheets 3 and 4
# Step 1. load a single spreadsheet with all sheets (test400.xlsx) includes sheet EventAssetMap
# Step 2. load and add 2 events (remove Test event for Platform, a Verification Category and Repair,
# a Location category) from Instrument 5010 using EventAssetMap (test410.xlsx)
# Step 3. load and add 2 Deployment events (one each to Platform and Instrument 5010) in EventAssetMap (test410-multiple-location-events.xlsx)
# Step 4. load same sheet as in Step 1 and expect error since only one Location event can be assigned at a time and
# err_msg expected:
# 'an association (hasLocationEvent) already exists; cannot assign more than one association of the same type'
log.debug('\n\n***** Start : test_deployment_to_multiple_assets')
#self._preload_scenario("BETA") # for testing Orgs
verbose = False
breakpoint1A = False
breakpoint2A = False
breakpoint2B = False
interactive = False
if interactive:
verbose = True
breakpoint1A = True
breakpoint2A = True
breakpoint2B = True
# Input and folder(s) and files for driving test
input_files= ['test505.xlsx', 'test500-event-to-multiple-assets.xlsx']
current_file = ''
del_sum_code_space_ids, del_sum_asset_type_ids, del_sum_asset_ids, del_sum_event_ids, del_sum_event_type_ids = [],[],[],[],[]
sum_code_space_ids, sum_asset_type_ids, sum_asset_ids, sum_event_ids, sum_event_type_ids = [],[],[],[],[]
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
pass_count = 0
try:
for fid in input_files:
pass_count += 1
if verbose:
log.debug('\n- - - - - - - - - - - -- - - - - - - - - - -- - - - - - - -' + \
'\n- - - - - - - - - - - - Pass %d - - - - - - - - - - - - - -' + \
'\n- - - - - - - - - - - -- - - - - - - - - - - - - - - - - - ', pass_count)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load marine assets into system from xslx file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
current_file = TEST_XLS_FOLDER + fid
response = self.load_marine_assets_from_xlsx(current_file)
if response:
if verbose: log.debug('\n\n[unit] response - pass %d: %s', pass_count, response)
if response['status'] != 'ok' or response['err_msg']:
if pass_count != 4:
raise BadRequest('Error in response: %s' % response['err_msg'])
else:
log.debug('\n\n[unit] received expected error_msg:\n%s', response['err_msg'])
if response['res_modified']:
code_space_ids, asset_type_ids, asset_ids, event_type_ids, event_ids = [],[],[],[],[]
if response['res_modified']['codespaces']:
code_space_ids = response['res_modified']['codespaces']
sum_code_space_ids.extend(response['res_modified']['codespaces'][:])
if response['res_modified']['asset_types']:
asset_type_ids = response['res_modified']['asset_types']
sum_asset_type_ids.extend(response['res_modified']['asset_types'][:])
if response['res_modified']['assets']:
asset_ids = response['res_modified']['assets']
sum_asset_ids.extend(response['res_modified']['assets'][:])
if response['res_modified']['event_types']:
event_type_ids = response['res_modified']['event_types']
sum_event_type_ids.extend(response['res_modified']['event_types'][:])
if response['res_modified']['events']:
event_ids = response['res_modified']['events']
sum_event_ids.extend(response['res_modified']['events'][:])
if response['res_removed']:
rem_code_space_ids, rem_asset_type_ids, rem_asset_ids, rem_event_type_ids, rem_event_ids = [],[],[],[],[]
if response['res_removed']['codespaces']:
rem_code_space_ids = response['res_removed']['codespaces'][:]
del_sum_code_space_ids.extend(response['res_removed']['codespaces'][:])
if response['res_removed']['asset_types']:
rem_asset_type_ids = response['res_removed']['asset_types'][:]
del_sum_asset_type_ids.extend(response['res_removed']['asset_types'][:])
if response['res_removed']['assets']:
rem_asset_ids = response['res_removed']['assets']
del_sum_asset_ids.extend(response['res_removed']['assets'][:])
if response['res_removed']['event_types']:
rem_event_type_ids = response['res_removed']['event_types'][:]
del_sum_event_type_ids.extend(response['res_removed']['event_types'][:])
if response['res_removed']['events']:
rem_event_ids = response['res_removed']['events'][:]
del_sum_event_ids.extend(response['res_removed']['events'][:])
# pass one 'add' all resources - full load; asserts specifically for this unit test
if pass_count == 1:
self.assertEqual(1, len(sum_code_space_ids), msg='pass 1: sum_code_space_ids')
self.assertEqual(4, len(sum_asset_ids), msg='pass 1: sum_asset_ids')
self.assertEqual(4, len(sum_asset_type_ids), msg='pass 1: sum_asset_type_ids')
self.assertEqual(8, len(sum_event_ids), msg='pass 1: sum_event_ids')
self.assertEqual(9, len(sum_event_type_ids), msg='pass 1: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 1: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 1: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 1: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 1: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 1: del_sum_event_type_ids')
# pass two - asserts specifically for this unit test
if pass_count == 2:
self.assertEqual(4, len(list(set(sum_asset_ids))), msg='pass 2: sum_asset_ids')
self.assertEqual(4, len(list(set(sum_asset_type_ids))),msg='pass 2: sum_asset_type_ids')
self.assertEqual(8, len(list(set(sum_event_ids))), msg='pass 2: sum_event_ids')
self.assertEqual(9, len(list(set(sum_event_type_ids))),msg='pass 2: sum_event_type_ids')
self.assertEqual(0, len(del_sum_code_space_ids), msg='pass 2: del_sum_code_space_ids')
self.assertEqual(0, len(del_sum_asset_ids), msg='pass 2: del_sum_asset_ids')
self.assertEqual(0, len(del_sum_asset_type_ids), msg='pass 2: del_sum_asset_type_ids')
self.assertEqual(0, len(del_sum_event_ids), msg='pass 2: del_sum_event_ids')
self.assertEqual(0, len(del_sum_event_type_ids), msg='pass 2: del_sum_event_type_ids')
# set breakpoint for testing...
if breakpoint1A:
log.debug('\n\n[unit] verify result of pass %d...', pass_count)
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
# summary and cleanup
total_resources_to_delete = 0
rm_code_space_ids = list(set(sum_code_space_ids))
rm_asset_ids = list(set(sum_asset_ids))
rm_asset_type_ids = list(set(sum_asset_type_ids))
rm_event_ids = list(set(sum_event_ids))
rm_event_type_ids = list(set(sum_event_type_ids))
total_resources_to_delete = len(rm_code_space_ids) + len(rm_asset_ids) + len(rm_asset_type_ids) + \
len(rm_event_ids) + len(rm_event_type_ids)
if verbose: log.debug('\n\n[unit] total number of resources to delete: %d', total_resources_to_delete)
# asserts specifically for this unit test
self.assertEqual(1, len(rm_code_space_ids), msg='cleanup rm_code_space_ids')
self.assertEqual(4, len(rm_asset_ids), msg='cleanup rm_asset_ids')
self.assertEqual(4, len(rm_asset_type_ids), msg='cleanup rm_asset_type_ids')
self.assertEqual(8, len(rm_event_ids), msg='cleanup rm_event_ids')
self.assertEqual(9, len(rm_event_type_ids), msg='cleanup rm_event_type_ids')
self.assertEqual(26, total_resources_to_delete, msg='summary of resources to delete')
# Cleanup all resources (retire/force delete)
total_resources_deleted = 0
if rm_asset_type_ids:
total_resources_deleted += len(rm_asset_type_ids)
for id in rm_asset_type_ids:
self.OMS.force_delete_asset_type(id)
if rm_event_type_ids:
total_resources_deleted += len(rm_event_type_ids)
for id in rm_event_type_ids:
self.OMS.force_delete_event_duration_type(id)
if rm_asset_ids:
total_resources_deleted += len(rm_asset_ids)
for id in rm_asset_ids:
self.OMS.force_delete_asset(id)
if rm_event_ids:
total_resources_deleted += len(rm_event_ids)
for id in rm_event_ids:
self.OMS.force_delete_event_duration(id)
if rm_code_space_ids:
inx = 0
total_resources_deleted +=len(rm_code_space_ids)
for code_space_id in rm_code_space_ids:
id = rm_code_space_ids[inx]
self.OMS.force_delete_code_space(id)
inx += 1
if verbose: log.debug('\n\n[unit] total number of resources deleted: %d', total_resources_deleted)
self.assertEqual(total_resources_to_delete, total_resources_deleted, msg='number of resources deleted different from number of resources created')
if breakpoint2B:
if verbose: log.debug('\n\n[unit] verify all resources have been deleted...')
from pyon.util.breakpoint import breakpoint
breakpoint(locals(), globals())
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise # raise here to fail test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', current_file, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s)', current_file, exc_info=True)
raise # raise here to fail test case
log.debug('\n\n***** Completed : test_deployment_to_multiple_assets')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# helper functions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _get_type_resource_by_name(self, res_name, res_type):
if not res_name:
raise BadRequest('res_name parameter is empty')
if not res_type:
raise BadRequest('res_type parameter is empty')
if res_type != RT.AssetType and res_type != RT.EventDurationType:
raise BadRequest('invalid res_type value (%s)' % res_type)
res_objs, res_keys = self.container.resource_registry.find_resources_ext(alt_id_ns=res_type, alt_id=res_name, id_only=False)
type_resource = ''
if res_keys:
if len(res_keys) == 1:
type_resource = res_objs[0]
return type_resource
def _create_attribute_specification(self, value_type, name, source, constraints, pattern, codeset_name):
if not value_type:
raise BadRequest('value_type is empty')
if not name:
raise BadRequest('name is empty')
if not source:
raise BadRequest('source is empty')
if value_type == 'CodeValue':
if not codeset_name:
raise BadRequest('_create_attribute_specification if value_type is CodeValue then a codeset_name')
simple_types = ['BooleanValue', 'IntegerValue', 'RealValue', 'StringValue', 'DateValue', 'TimeValue', 'DateTimeValue']
pattern_string = '[\w -_]{1,64}'
pattern_real = '\d*\.?\d*'
pattern_date = '\d{1,2}/\d{1,2}/(\d{4}|\d{2})'
pattern_time = '\d{2}:\d{2}'
pattern_datetime = '\d{1,2}/\d{1,2}/(\d{4}|\d{2}) \d{2}:\d{2}'
pattern_integer = '\d*'
pattern_codevalue = '[\w -_]{1,64}'
constraint_string = ''
constraint_real = 'min=0.00, max=10923.00'
constraint_date = ''
constraint_time = ''
constraint_datetime = ''
constraint_integer = ''
constraint_codevalue = ''
if codeset_name:
constraint_codevalue = 'set=MAM:' + codeset_name
attribute_specification = IonObject(OT.AttributeSpecification)
attribute_specification['id'] = name
attribute_specification['description'] = 'some description of ' + name
attribute_specification['value_type'] = value_type
attribute_specification['group_label'] = 'Group Label'
attribute_specification['attr_label'] = 'Some Attribute Label'
attribute_specification['rank'] = '1.1'
attribute_specification['visibility'] = 'True'
attribute_specification['editable'] = 'False'
attribute_specification['journal'] = 'False'
attribute_specification['default_value'] = 'NONE'
attribute_specification['uom'] = ''
attribute_specification['cardinality'] = '1..1'
attribute_specification['_source_id'] = source
if not pattern:
if value_type == 'StringValue':
pattern = pattern_string
elif value_type == 'RealValue':
pattern = pattern_real
elif value_type == 'DateValue':
pattern = pattern_date
elif value_type == 'TimeValue':
pattern = pattern_time
elif value_type == 'DateTimeValue':
pattern = pattern_datetime
elif value_type == 'IntegerValue':
pattern = pattern_integer
elif value_type == 'CodeValue':
pattern = pattern_codevalue
else:
raise BadRequest('_create_attribute_specification unknown value_type to process pattern: %s' % value_type)
if not constraints:
if value_type == 'StringValue':
constraints = constraint_string
elif value_type == 'RealValue':
constraints = constraint_real
elif value_type == 'DateValue':
constraints = constraint_date
elif value_type == 'TimeValue':
constraints = constraint_time
elif value_type == 'DateTimeValue':
constraints = constraint_datetime
elif value_type == 'IntegerValue':
constraints = constraint_integer
elif value_type == 'CodeValue':
constraints = constraint_codevalue
else:
raise BadRequest('_create_attribute_specification unknown value_type to process constraints: %s' % value_type)
attribute_specification['value_constraints'] = constraints
attribute_specification['value_pattern'] = pattern
return attribute_specification
def _create_attribute(self, value_type, name, value):
if not value_type:
raise BadRequest('value_type is empty')
if not name:
raise BadRequest('name is empty')
simple_types = ['BooleanValue', 'IntegerValue', 'RealValue', 'StringValue', 'DateValue', 'TimeValue', 'DateTimeValue']
value_string = 'hello world'
value_real = '1.45'
value_date = '12/25/2014'
value_time = '23:17'
value_datetime = '12/25/2014 23:17'
value_integer = '5'
if not value:
if value_type == 'StringValue':
value = value_string
elif value_type == 'RealValue':
value = value_real
elif value_type == 'DateValue':
value = value_date
elif value_type == 'TimeValue':
value = value_time
elif value_type == 'DateTimeValue':
value = value_datetime
elif value_type == 'IntegerValue':
value = value_integer
elif value_type == 'CodeValue':
raise BadRequest('_create_attribute requires a value, when value_type CodeValue')
# Create Attribute
attribute = IonObject(OT.Attribute)
attribute['name'] = name
#log.debug('\n\n[unit] _create_attribute - attribute name: %s', name)
if value_type in simple_types:
log.debug('\n\n[unit] _create_attribute - %s', value_type)
return_value = self.create_value(value)
else:
if value_type == 'CodeValue':
log.debug('\n\n[unit] _create_attribute - CodeValue')
# get valid enumeration value from codespace
code_space_id = self._get_code_space_id('MAM')
if not code_space_id:
raise BadRequest('attribute (\'%s\') processing: unable to determine code space id for code_space' % name)
codespace = self.OMS.read_code_space(code_space_id)
log.debug('\n\n[unit] _create_attribute - step 1')
if not codespace:
raise BadRequest('failed to read codespace (id=\'%s\')' % code_space_id)
codesets = {}
if not codespace.codesets:
raise BadRequest('codesets empty for codespace (id=\'%s\')' % code_space_id)
codesets = codespace.codesets
if name not in codesets:
log.debug('\n\n[unit] _create_attribute - step 2')
if value not in codesets:
tmp = 'asset ' + value
if tmp not in codesets: # look for codeset 'asset type'
event_tmp = 'event ' + value
if event_tmp not in codesets: # look for codeset 'event type'
raise BadRequest('unknown codeset \'%s\'' % value)
else:
value = event_tmp
else:
value = tmp
codeset = codesets[value]
codeset_enum = codeset.enumeration
if not codeset_enum:
raise BadRequest('codeset \'%s\' has empty enumeration' % value)
#get first entry in enumeration for value
if len(codeset_enum) > 0:
value = codeset_enum[0]
else:
raise BadRequest('_create_attribute - codeset enumeration empty')
else:
log.debug('\n\n[unit] _create_attribute - step 3')
codeset = codesets[name]
codeset_enum = codeset.enumeration
if value not in codeset_enum:
raise BadRequest('provide a value which is in codeset \'%s\' enumeration' % name)
return_value = self.create_complex_value(value_type, name, value)
else:
raise BadRequest('\n\n[unit] _create_attribute - unknown value_type: %s' % value_type)
log.debug('\n\n[unit] _create_attribute - step 4')
attribute['value'] = [return_value]
log.debug('\n\n[unit] _create_attribute - step 5')
return attribute
def load_marine_assets_from_xlsx(self, fid):
# unit test helper function
if not fid:
raise BadRequest('fid parameter empty.')
try:
try:
f = open(fid, 'r')
except:
log.error('failed to open xlsx file for read: ', exc_info=True)
raise
content = f.read()
response = self.OMS.declare_asset_tracking_resources(binascii.b2a_hex(content),
content_type='file_descriptor.mimetype',
content_encoding='b2a_hex')
f.close()
except BadRequest, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise # raise here to test case
except NotFound, Arguments:
log.debug('\n\n[unit] Exception (file: %s): %s', fid, Arguments.get_error_message())
raise
except:
log.error('\n\n[unit] Exception (file: %s):', fid, exc_info=True)
raise # raise here to test case
return response
def unique_altids(self, res_type):
# helper
if not res_type:
raise BadRequest('res_type param is empty')
unique = True
picklist = []
altids = []
if res_type == RT.Asset:
picklist = self.OMS.get_assets_picklist(id_only='False')
else:
picklist = self.OMS.get_events_picklist(id_only='False')
altids = self.OMS.get_altids(res_type)
# test - force error
# picklist[0][2].append('asset:junk')
# verify one and only one altid per resource instance
for id_list in picklist:
if len(id_list[2]) != 1:
unique = False
break
# test - force error
#altids[0].append('asset:junk')
for id_list in altids:
if len(id_list) != 1:
unique = False
break
# compare list of altids for resource instances created to number of resource instances created
if len(picklist) != len(altids):
unique = False
# compare list of altids for tracking resource instances created to set()of same; if unique assert will pass
# if non-unique res.alt_ids have been created then assert will fail
if altids:
len_altids = len(altids[0]) # len of all altids
list_altids = []
list_altids = list(set(altids[0])) # len of unique altids
if len(list_altids) != len_altids:
unique = False
return unique
def create_value(self, value=None) :
# helper
constructor_map = {bool.__name__ : OT.BooleanValue, int.__name__ : OT.IntegerValue, float.__name__ : OT.RealValue, str.__name__ : OT.StringValue}
if not value :
raise BadRequest('value parameter is empty')
if type(value).__name__ not in constructor_map :
raise BadRequest('type of value provided not supported')
return IonObject(constructor_map[type(value).__name__],value=value)
def create_complex_value(self, type=None, name=None, value=None) :
# helper
constructor_map = {'CodeValue' : OT.CodeValue }
#, int.__name__ : OT.IntegerValue, float.__name__ : OT.RealValue, str.__name__ : OT.StringValue}
if not value :
raise BadRequest('value parameter is empty')
if not type :
raise BadRequest('type parameter is empty')
if not name :
raise BadRequest('name parameter is empty')
if type not in constructor_map :
raise BadRequest('type provided is not supported')
return IonObject(constructor_map[type], value=value)
def _get_code_space_id(self, code_space_name):
"""If CodeSpace exists, return id.
@param code_space_name str # marine asset management code space name ('MAM')
@retval code_space_id str # unique sys uuid4 of code space, else ''
@throws BadRequest 'code_space_name parameter is empty'
@throws BadRequest 'unable to locate CodeSpace instance (named \'%s\')'
"""
if not code_space_name:
raise BadRequest('code_space_name parameter is empty')
code_space_id = ''
try:
res_objs, res_keys = self.container.resource_registry.find_resources_ext(alt_id_ns=RT.CodeSpace,
alt_id=code_space_name, id_only=False)
if res_keys:
if len(res_keys) == 1:
code_space_id = res_keys[0]['id']
key = res_keys[0]
except BadRequest, Arguments:
raise BadRequest(Arguments.get_error_message())
except:
raise BadRequest('unable to locate CodeSpace instance (named \'%s\')' % code_space_name)
return code_space_id
| 58.171989
| 193
| 0.557378
| 61,917
| 542,861
| 4.590419
| 0.014907
| 0.037558
| 0.032763
| 0.023714
| 0.903267
| 0.884845
| 0.874663
| 0.861036
| 0.848592
| 0.837745
| 0
| 0.00927
| 0.322191
| 542,861
| 9,331
| 194
| 58.178223
| 0.763173
| 0.146122
| 0
| 0.865067
| 0
| 0.01574
| 0.197229
| 0.019424
| 0
| 0
| 0
| 0.000107
| 0.141656
| 0
| null | null | 0.147922
| 0.014059
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
7012a77ae461c5f10715170fa92024a704c29870
| 118,240
|
py
|
Python
|
main.py
|
BabyWrassler/science-sub
|
eff438acea51e8a1c86292dc9d2322ad8625c60e
|
[
"MIT"
] | null | null | null |
main.py
|
BabyWrassler/science-sub
|
eff438acea51e8a1c86292dc9d2322ad8625c60e
|
[
"MIT"
] | 2
|
2020-07-22T00:07:29.000Z
|
2020-07-29T01:59:26.000Z
|
main.py
|
BabyWrassler/science-sub
|
eff438acea51e8a1c86292dc9d2322ad8625c60e
|
[
"MIT"
] | null | null | null |
@namespace
class SpriteKind:
Immunity = SpriteKind.create()
Badge = SpriteKind.create()
def useImmunity():
global new_badge, current_immunity, level
sub.set_image(img("""
. . . . . . . . . . . f f f f f f . . . . . . . . . . . . . . .
. . . . . . . . . . f d e d e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d d e e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d e d e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d d e e e e f . . . . . . . . . . . . . .
. . f f f f f f f f d d e d e e e e f f f f f f f f f f f f . .
. f f c f d e d e e e e e e e e e e e e e e e e d e d f c f f .
f f c b f e d e e d d e e e e e e e e e e d d e e d e f b c f f
f c b b f d e e d c b d e e e e e e e e d c b d e e d f b b c f
f b b b f e e d c b b b d e e e e e e d c b b b d e e f b b b f
f f f f f e e d b b b b d e e e e e e d b b b b d e e f f f f f
f b b b f e e e d b b d e e e e e e e e d b b d e e e f b b b f
f b b b f e d e d d d e d e d e d e d e d d d e d e d f b b b f
f f b b f d e d e d e d e d e d e d e d e d e d e d e f b b f f
. f f b f d d d d d d d d d d d d d d d d d d d d d d f b f f .
. . f f f f f f f f f f f f f f f f f f f f f f f f f f f f . .
"""))
if immunity_badge_awarded[current_immunity] == 0:
new_badge = sprites.create(immunity_badge_list[current_immunity], SpriteKind.Badge)
new_badge.set_position(12 * current_immunity + 4, 4)
info.change_score_by(300)
current_immunity = -1
level += 0.1
music.change_tempo_by(20)
def displayDialog(text: str):
game.set_dialog_frame(img("""
. . e e e e e e e e e e e e e e e e e e e e . .
. e 4 4 e e 4 4 e e 4 4 e e 4 4 e e 4 4 e e e .
e e e 4 4 e e 4 4 e e 4 4 e e 4 4 e e 4 4 e 4 e
e e 4 e e e e e e e e e e e e e e e e e e 4 4 e
e 4 4 e e b b b b b b b b b b b b b b e e 4 e e
e 4 e e b b b b b b b b b b b b b b b b e e e e
e e e e b b b b b b b b b b b b b b b b e e 4 e
e e 4 e b b b b b b b b b b b b b b b b e 4 4 e
e 4 4 e b b b b b b b b b b b b b b b b e 4 e e
e 4 e e b b b b b b b b b b b b b b b b e e e e
e e e e b b b b b b b b b b b b b b b b e e 4 e
e e 4 e b b b b b b b b b b b b b b b b e 4 4 e
e 4 4 e b b b b b b b b b b b b b b b b e 4 e e
e 4 e e b b b b b b b b b b b b b b b b e e e e
e e e e b b b b b b b b b b b b b b b b e e 4 e
e e 4 e b b b b b b b b b b b b b b b b e 4 4 e
e 4 4 e b b b b b b b b b b b b b b b b e 4 e e
e 4 e e b b b b b b b b b b b b b b b b e e e e
e e e e b b b b b b b b b b b b b b b b e e 4 e
e e 4 e e b b b b b b b b b b b b b b e e 4 4 e
e 4 4 e e e e e e e e e e e e e e e e e e 4 e e
e 4 e 4 4 e e 4 4 e e 4 4 e e 4 4 e e 4 4 e e e
. e e e 4 4 e e 4 4 e e 4 4 e e 4 4 e e 4 4 e .
. . e e e e e e e e e e e e e e e e e e e e . .
"""))
game.show_long_text(text, DialogLayout.CENTER)
def subImmuneByStudyingAnimal(num: number):
global new_badge, current_immunity
if current_immunity > -1:
new_badge = sprites.create(img("""
4 . . . . . . 4
. 4 . . . . 4 .
. . 4 . . 4 . .
. . . 4 . . . .
. . . . 4 . . .
. . 4 . . 4 . .
. 4 . . . . 4 .
4 . . . . . . 4
"""),
SpriteKind.Badge)
new_badge.set_position(12 * current_immunity + 4, 4)
sub.set_image(immunity_sub_image_list[num])
current_immunity = num
def displayStartScreen():
scene.set_background_color(4)
scene.set_background_image(img("""
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d d d d d d d d d d d d d e e e e e e e e e e . . . . . . . . . d d d d d d d d e e e e e e e e e e d d d d d d d d d d . . . e e e e e e e e e d d d d d d d d d d d d d e e e e e e e e e e . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d d d d d d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d d d d d d d d d d d d d e e e e e e e e e e . . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e d d d d d d . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e . . . . e e e e e e e e e d . . . . . . . . . . . . e e e e e d d d d d d . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . d d d e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . e e e e e e e e e e e e e e e e e e e e e e e d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . d d d d d d d d d d d d d d d d d d d d d d d d d d d . . . . . . . . . . . . . d d d d d d d d d d d d d d d d d d d d d d d d d d d d . . . . . . . . d d d d d d d d d d d d d d d d d d d d d d d . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e . . . . . . . e e e e e e . . . . e e e e e e e e e e e e e e e e . . e e e e e . . . . . . . e e e e e e . . . . e e e e e e e e e e e e e e e e . . . . . . . e e e e e e e e e e . . . . . e e e e e e e e e e e e e e e . . . . . . . e e e e e e . . . . e e e e e e . . . . . . . . . . . .
. . . . . . . . . . e e e e e d . . . . . . e e e e e e d . . . e e e e e e e e e e e e e e e e d . e e e e e d . . . . . . e e e e e e d . . . e e e e e e e e e e e e e e e e d . . . . . . e e e e e e e e e e d . . . . e e e e e e e e e e e e e e e d . . . . . . e e e e e e d . . . e e e e e e d . . . . . . . . . . .
. . . . . . . . . . e e e e e d . . . . . . e e e e e e d . . . e e e e e e e e e e e e e e e e d . e e e e e d . . . . . . e e e e e e d . . . e e e e e e e e e e e e e e e e d . . . . . . e e e e e e e e e e d . . . . e e e e e e e e e e e e e e e d . . . . . . e e e e e e d . . . e e e e e e d . . . . . . . . . . .
. . . . . . . . . . e e e e e e e e . . e e e e e e e e d . . . . d d d d e e e e e e d d d d d d . e e e e e e e e . . e e e e e e e e d . . . . d d d d e e e e e e d d d d d d . . . e e e e e e d d d d e e e e e e . . e e e e e d d d d d d d e e e e e e . . . . e e e e e e d . . . e e e e e e d . . . . . . . . . . .
. . . . . . . . . . e e e e e e e e d . e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e e e e d . e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . . . e e e e e e d . . . e e e e e e d . e e e e e d . . . . . . e e e e e e d . . . e e e e e e d . . . e e e e e e d . . . . . . . . . . .
. . . . . . . . . . e e e e e e e e d . e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e e e e d . e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . . . e e e d d d d . . . e e e e e e d . e e e e e d . . . . . . e e e e e e d . . . e e e e e e d . . . e e e e e e d . . . . . . . . . . .
. . . . . . . . . . e e e e e e e e e e e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e e e e e e e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . . . . . . d d d d d d . e e e e e d . . . . . . e e e e e e d . . . e e e e e e d . . . e e e e e e d . . . . . . . . . . .
. . . . . . . . . . e e e e e e e e e e e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e e e e e e e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . . . . . . . . . . . . . e e e e e d . . . . . . e e e e e e d . . . e e e e e e d . . . e e e e e e d . . . . . . . . . . .
. . . . . . . . . . e e e e e e e e e e e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e e e e e e e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . . . . . . . . . . . . . e e e e e d . . . . e e e e e e e e d . . . . d d e e e e e e e e e e d d d d . . . . . . . . . . .
. . . . . . . . . . e e e e e e e e e e e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e e e e e e e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . . . . . . . . . . . . . e e e e e d . . . . e e e e e e e e d . . . . . . e e e e e e e e e e d . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e d d e e e d d e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d d e e e d d e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . . . . . . . . . . . . . e e e e e d . . . . e e e d d d d d d . . . . . . e e e e e e e e d d d . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e d . e e e d . e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . e e e d . e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . . . . . . . . . . . . . e e e e e e e e e e e e e d . . . . . . . . . . . . d e e e e e e d . . . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e d . e e e d . e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . e e e d . e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . . . . . . . . . . . . . e e e e e e e e e e e e e d . . . . . . . . . . . . . e e e e e e d . . . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e d . . d d d . e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . d d d . e e e e e e d . . . . . . . . e e e e e e d . . . . . . . d e e e e e e . . . . e e e e e e . . e e e e e d d e e e e e e e e . . . . . . . . . . . . e e e e e e d . . . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e d . . . . . . e e e e e e d . . . . . . . . e e e e e e d . . . . . . e e e e e d . . . . . . e e e e e e d . . . . . . . . e e e e e e d . . . . . . . . e e e e e e d . . . e e e e e e d . e e e e e d . e e e e e e e e d . . . . . . . . . . . e e e e e e d . . . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e d . . . . . . e e e e e e d . . . . . . . . e e e e e e e e e e e . . e e e e e d . . . . . . e e e e e e d . . . . . . . . e e e e e e e e e e e . . . . e e e e e e d . . . e e e d d d d . e e e e e d . e e e e e e e e e e e . . . . . . . . . e e e e e e d . . . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e d . . . . . . e e e e e e d . . . e e e e e e e e e e e e e e e e d . e e e e e d . . . . . . e e e e e e d . . . e e e e e e e e e e e e e e e e d . . . . d d e e e e e e e e e e d . . . . e e e e e d . . d d e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . . . . . . . . . . .
. . . . . . . . . . e e e e e d . . . . . . e e e e e e d . . . e e e e e e e e e e e e e e e e d . e e e e e d . . . . . . e e e e e e d . . . e e e e e e e e e e e e e e e e d . . . . . . e e e e e e e e e e d . . . . e e e e e d . . . . e e e e e e e e d . . . . . . . . e e e e e e d . . . . . . . . . . . . . . . .
. . . . . . . . . . . d d d d d . . . . . . . d d d d d d . . . . d d d d d d d d d d d d d d d d . . d d d d d . . . . . . . d d d d d d . . . . d d d d d d d d d d d d d d d d . . . . . . . d d d d d d d d d d . . . . . d d d d d . . . . . d d d d d d d d . . . . . . . . . d d d d d d . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
"""))
game.set_dialog_frame(img("""
. . . . . . . . . . . . . . .
. . . . . . . . . . . . . . .
. e e e e e e e e e e e e e .
e e 4 4 4 4 4 4 4 4 4 4 4 e e
e 4 4 4 4 4 4 4 4 4 4 4 4 4 e
e 4 4 e e e e e e e e e 4 4 e
e 4 4 e e e e e e e e e 4 4 e
e 4 4 e e e e e e e e e 4 4 e
e 4 4 e e e e e e e e e 4 4 e
e 4 4 e e e e e e e e e 4 4 e
e 4 4 4 4 4 4 4 4 4 4 4 4 4 e
e e 4 4 4 4 4 4 4 4 4 4 4 e e
. e e e e e e e e e e e e e .
. . . . . . . . . . . . . . .
. . . . . . . . . . . . . . .
"""))
game.show_long_text("Venture into the sea, study the animals. Press \"A\" to start.",
DialogLayout.BOTTOM)
def sharkEncountered(mySprite: Sprite):
if current_immunity > -1:
music.magic_wand.play()
displayDialog("" + immunity_text_list[current_immunity] + " You caught this shark!")
num_caught_list[current_immunity] = 0
num_caught_list[9] = num_caught_list[9] + 1
info.change_score_by(50)
useImmunity()
if num_caught_list[9] == 9:
music.stop_all_sounds()
game.over(True, effects.bubbles)
mySprite.destroy()
else:
sub.say("Ack, teeth!", 2000)
game.over(False)
def on_on_overlap(sprite, otherSprite):
if sprites.read_data_string(otherSprite, "species") == "Shark":
sharkEncountered(otherSprite)
else:
nonSharkEncountered(otherSprite)
sprites.on_overlap(SpriteKind.player, SpriteKind.projectile, on_on_overlap)
def nonSharkEncountered(mySprite: Sprite):
global animal_caught_species_id_number, num_animals_caught
music.ba_ding.play()
sub.start_effect(effects.trail, 500)
animal_caught_species_id_number = sprites.read_data_number(mySprite, "animal_index")
num_animals_caught = num_caught_list[animal_caught_species_id_number]
if num_animals_caught < animals_needed_to_learn_immunity - 1:
num_caught_list[animal_caught_species_id_number] = num_animals_caught + 1
sub.say("" + sprites.read_data_string(mySprite, "species") + " #" + str((num_animals_caught + 1)),
500)
elif num_animals_caught == animals_needed_to_learn_immunity - 1:
num_caught_list[animal_caught_species_id_number] = animals_needed_to_learn_immunity
sub.say("" + sprites.read_data_string(mySprite, "species") + " #" + str((num_animals_caught + 1)),
500)
subImmuneByStudyingAnimal(animal_caught_species_id_number)
else:
sub.say("" + sprites.read_data_string(mySprite, "species") + str(num_animals_caught) + "spent",
500)
mySprite.destroy()
# Faster animals are worth more points.
info.change_score_by(animal_caught_species_id_number)
def fillAnimalArrays():
global immunity_text_list, immunity_badge_list, immunity_sub_image_list, animal_image_list, animal_names, animal_speed_list, num_caught_list, immunity_badge_awarded
immunity_text_list = ["Studying the turtle, you learned to harden your shell.",
"Studying the crab, you learned to use pinchers.",
"Studying the green fish, you learned to blend into the grass.",
"Studying the octopus, you learn to use tentacles.",
"Studying the pink fish, you learn to blend into the coral.",
"Studying the narwhal, you learn to use a horn defensively.",
"Studying the ray, you learn to use a stinger, and blend into the bottom.",
"Studying the whale, you learn to use your size to your advantage.",
"Studying the pufferfish, you learn how spines deter predators from eating you."]
immunity_badge_list = [img("""
. . . 9 9 . . .
. . 9 7 8 9 . .
. 9 7 8 7 8 9 .
. 9 8 7 8 7 9 .
. 9 7 8 7 8 9 .
. 9 8 7 8 7 9 .
. . 9 8 7 9 . .
. . . 9 9 . . .
"""),
img("""
. . 4 4 . 4 . .
. 4 4 . . . 4 .
. 4 . . . 4 4 .
. 4 4 . . . 4 .
. 4 . . . 4 4 .
. 4 4 . . . 4 .
. 4 4 4 4 4 4 .
. . 4 4 4 4 . .
"""),
img("""
. . 9 . . 9 . .
. . 9 . . 9 . .
. 9 . . . . 9 .
. 9 . 9 . . 9 .
. 9 . . 9 . 9 .
. . 9 . 9 . 9 9
. . 9 . 9 . . 9
. . 9 . 9 . . 9
"""),
img("""
. . . f f f . .
. . f f f f f f
. . f f f f f f
. f f f f f f f
f f f f f f f .
f f f f f f f .
. f f f f f . .
. . . . f f . .
"""),
img("""
. . . . . . . .
. 3 3 . 3 3 . 2
3 3 2 2 2 3 2 .
3 6 b 3 2 3 2 3
3 6 3 2 2 3 6 3
3 6 3 3 6 3 3 6
. 3 2 3 2 6 3 6
. 3 6 . 2 2 3 .
"""),
img("""
. . . . 6 . . .
. . . 6 6 . . .
. . . 6 5 . . .
. . . 5 6 . . .
. . . 6 6 . . .
. . . 6 5 . . .
. . . 5 6 . . .
. . . 6 6 . . .
"""),
img("""
. . . . . . . b
. . . . . . b .
. . . . . b . .
. . . . b . . .
. . . b . . . .
. . b . . . . .
. b . . . . . .
b . . . . . . .
"""),
img("""
2 2 2 2 2 2 2 2
2 2 6 2 6 2 6 2
2 6 b b b b 2 2
2 2 b a a b 2 2
2 2 b a a b 2 2
2 2 b b b b 2 2
2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2
"""),
img("""
e . . e . . . e
. e . e . . e .
. . e e e e . .
e e e d d e e e
. . e d d e . .
. . e e e e . .
. e . . e . e .
e . . . e . . e
""")]
immunity_sub_image_list = [img("""
. . . . . . . . . . . 9 9 9 9 9 9 . . . . . . . . . . . . . . .
. . . . . . . . . . 9 7 8 7 8 9 8 9 . . . . . . . . . . . . . .
. . . . . . . . . . 9 7 7 8 9 8 9 9 . . . . . . . . . . . . . .
. . . . . . . . . . 9 7 8 9 8 7 8 9 . . . . . . . . . . . . . .
. . . . . . . . . . 9 7 9 8 7 8 8 9 . . . . . . . . . . . . . .
. . 9 9 9 9 9 9 9 9 7 9 8 7 8 8 8 8 9 9 9 9 9 9 9 9 9 9 9 9 . .
. 9 9 c 9 7 8 7 9 8 9 8 7 8 8 8 8 8 8 8 9 8 9 8 7 8 7 9 c 9 9 .
9 9 c b 9 8 7 9 8 7 7 9 8 8 8 8 8 8 8 9 8 7 7 9 8 7 8 9 b c 9 9
9 c b b 9 7 9 8 7 c b 7 9 8 8 8 8 8 9 8 7 c b 7 9 8 7 9 b b c 9
9 b b b 9 9 8 7 c b b b 7 9 8 8 8 9 8 7 c b b b 7 9 8 9 b b b 9
9 9 9 9 9 9 8 7 b b b b 7 8 9 8 9 8 8 7 b b b b 7 8 9 9 9 9 9 9
9 b b b 9 8 9 8 7 b b 7 8 8 8 9 8 8 8 8 7 b b 7 8 9 8 9 b b b 9
9 b b b 9 7 8 9 7 7 7 8 7 8 9 8 9 8 7 8 7 7 7 8 9 8 7 9 b b b 9
9 9 b b 9 8 8 8 9 7 8 7 8 9 8 8 8 9 8 7 8 7 8 9 8 8 8 9 b b 9 9
. 9 9 b 9 7 8 7 8 9 7 7 9 7 8 7 8 7 9 7 7 7 9 8 7 8 7 9 b 9 9 .
. . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . .
"""),
img("""
. . . . . . . . . . . f f f f f f . . . . . . . . . . . . . . .
. . . . . . . . . . f d e d e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d d e e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d e d e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d d e e e e f . . . . . . . . . . . . . .
. . f f f f f f f f d d e d e e e e f f f f f f f f f f f f . .
. f f 4 4 4 4 d e e e e e e e e e e e e e e e e d 4 4 4 4 f f .
f f 4 4 4 4 4 4 e d d e e e e e e e e e e d d e 4 4 4 4 4 4 f f
f 4 4 f f 4 4 4 d c b d e e e e e e e e d c b d 4 4 4 f f 4 4 f
f 4 f . . f 4 4 c b b b d e e e e e e d c b b b 4 4 f . . f 4 f
. . . . . f 4 4 b b b b d e e e e e e d b b b b 4 4 f . . . . .
f 4 f . . f 4 4 d b b d e e e e e e e e d b b d 4 4 f . . f 4 f
f 4 4 f f 4 4 4 d d d e d e d e d e d e d d d e 4 4 4 f f 4 4 f
f f 4 4 4 4 4 4 e d e d e d e d e d e d e d e d 4 4 4 4 4 4 f f
. f f 4 4 4 4 d d d d d d d d d d d d d d d d d d 4 4 4 4 f f .
. . f f f f f f f f f f f f f f f f f f f f f f f f f f f f . .
"""),
img("""
. . . . . . . . . . . 7 8 7 8 7 8 . . . . . . . . . . . . . . .
. . . . . . . . . . 7 7 7 7 7 7 7 7 . . . . . . . . . . . . . .
. . . . . . . . . . 7 8 8 8 8 8 8 7 . . . . . . . . . . . . . .
. . . . . . . . . . 7 7 7 7 7 7 7 7 . . . . . . . . . . . . . .
. . . . . . . . . . 7 8 8 8 8 8 8 7 . . . . . . . . . . . . . .
. . 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 . .
. 7 7 c 7 7 8 7 7 7 7 7 7 8 7 8 7 7 8 7 7 7 7 7 7 8 8 7 c 7 7 .
7 7 c b 7 8 8 7 7 8 8 7 7 8 7 7 8 7 8 7 7 8 8 7 7 8 7 7 b c 7 7
7 c b b 7 7 8 7 8 c b 8 7 8 7 8 7 7 8 7 8 c b 8 7 8 8 7 b b c 7
7 b b b 7 8 8 7 c b b b 7 8 7 7 8 7 8 7 c b b b 7 8 7 7 b b b 7
7 7 7 7 7 7 8 7 b b b b 7 8 7 8 7 7 8 7 b b b b 7 8 8 7 7 7 7 7
7 b b b 7 8 8 7 8 b b 8 7 8 7 7 8 7 8 7 8 b b 8 7 8 7 7 b b b 7
7 b b b 7 7 8 7 7 8 8 7 7 8 7 8 7 7 8 7 7 8 8 7 7 8 8 7 b b b 7
7 7 b b 7 8 8 7 7 7 7 7 7 8 7 7 8 7 8 7 7 7 7 7 7 8 7 7 b b 7 7
. 7 7 b 7 7 7 8 7 8 7 8 7 8 7 8 7 8 7 7 8 7 8 7 8 7 8 7 b 7 7 .
. . 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 . .
"""),
img("""
. . . f f f f f f f f f f f f f f f f f f f . . . . . . f f f .
. . f f d e d e e e e e d d d e e d e e e f f f f f f f f e e f
. f f e d d e e e e e d e d e e e e e d e d e e e d e e e d e f
f f e d c b d e e d e e e e d f f f f f f f e d e e e d e f f .
f e d c b b b d e e e e e e e f . . . . . f f f f f f f f f . .
f e d b b b b d e e d d e e e f . . . . . . . . . . . . . . . .
f d e d b b d e e e e e e d e f . . f f f f f f f f f f f f f .
f e e d d d e d e e d d e d e f f f f e d e e e d e e e d e e f
f e e e d d e e e e d d e e e e e e d e e e d e e e d e e e d f
f e e d c b d e e e e e e d e e d e f f f f f f f f f f f f f .
f e d c b b b d e e d d e e e f f f f . . . . . . . . . . . . .
f e d b b b b d e e e e e e e f . . . . f f f f f f f f . . . .
f f e d b b d e e e e e e e d f f f f f f e d e e e d f f f f .
. f f d d d e d d d e d e d e e d e e e d e e e d e e e e d e f
. . f f d d d d d d d d d d d e e e d e e f f f f f f d e e e f
. . . f f f f f f f f f f f f f f f f f f f . . . . f f f f f .
"""),
img("""
. . . . . . . . . . . 5 5 5 5 5 5 . . . . . . . . . . . . . . .
. . . . . . . . . . 5 6 3 6 3 3 3 5 . . . . . . . . . . . . . .
. . . . . . . . . . 5 6 6 3 3 3 3 5 . . . . . . . . . . . . . .
. . . . . . . . . . 5 6 3 6 3 3 3 5 . . . . . . . . . . . . . .
. . . . . . . . . . 5 6 6 3 3 3 3 5 . . . . . . . . . . . . . .
. . 5 5 5 5 5 5 5 5 6 6 3 6 3 3 3 3 5 5 5 5 5 5 5 5 5 5 5 5 . .
. 5 5 c 5 6 3 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 6 3 6 5 c 5 5 .
5 5 c b 5 3 6 3 3 6 6 3 3 3 3 3 3 3 3 3 3 6 6 3 3 6 3 5 b c 5 5
5 c b b 5 6 3 3 6 c b 6 3 3 3 3 3 3 3 3 6 c b 6 3 3 6 5 b b c 5
5 b b b 5 3 3 6 c b b b 6 3 3 3 3 3 3 6 c b b b 6 3 3 5 b b b 5
5 5 5 5 5 3 3 6 b b b b 6 3 3 3 3 3 3 6 b b b b 6 3 3 5 5 5 5 5
5 b b b 5 3 3 3 6 b b 6 3 3 3 3 3 3 3 3 6 b b 6 3 3 3 5 b b b 5
5 b b b 5 3 6 3 6 6 6 3 6 3 6 3 6 3 6 3 6 6 6 3 6 3 6 5 b b b 5
5 5 b b 5 6 3 6 3 6 3 6 3 6 3 6 3 6 3 6 3 6 3 6 3 6 3 5 b b 5 5
. 5 5 b 5 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 5 b 5 5 .
. . 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 . .
"""),
img("""
. . . . . . . . . . . . . f f f f f f . . . . . . . . . . . . .
. . . . . . . . . . . . f d e d e e e f . . . . . . . . . . . .
. . . . . . . . . . . . f d d e e e e f . . . . . . . . . . . .
. . . . . . . . . . . . f d e d e e e f . . . . . . . . . . . .
. . . . . . . . . . . . f d d e e e e f . . . . . . . . . . . .
. . . . . . f f f f f f e d e d e e e d f f f f f f . . . . . .
. . . . . f f c f d e d e e e e e e e e e d d f c f f . . . . .
. . . . f f c b f e e e d d e e e e d d e e e f b c f f . . . .
. . . . f c b b f d e d c b d e e d c b d e d f b b c f . . . .
. . . . f b b b f e d c b b b d d c b b b d e f b b b f . . . .
f f f f f f f f f e d b b b b d d b b b b d e f f f f f f f f f
. . . . f b b b f e e d b b d e e d b b d e e f b b b f . . . .
. . . . f b b b f e e d d d e d e d d d e d d f b b b f . . . .
. . . . f f b b f d e d e d e d e d e d e d e f b b f f . . . .
. . . . . f f b f d d d d d d d d d d d d d d f b f f . . . . .
. . . . . . f f f f f f f f f f f f f f f f f f f f . . . . . .
"""),
img("""
. . . . . . . . . . . . . . . . . . . f f f f f f . . . . . . .
. . . . . . . . . . . . . . . . . . f c b b b b c f . . . . . .
. . . . . . . . . . . . . . . . . f c b c b b b b c f . . . . .
f f f . . . . . . . . . . . . . f f f f f f f f f f f f . . . .
. . . f f f f . . . . . . . . f e d e e e e e e e e e d f . . .
. . . . . . . f f f f . . . f e d e d e d e d e d e d e d f . .
. . . . . . . . . . . f f f d d d d d d d d d d d d d d d d f .
. . . . . . . . . . . . . f f f f f f f f f f f f f f f f f f f
"""),
img("""
. . f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f . . .
. f f c f d e d e e e e e e d d d e e e e e e d d d d e e e e e e d e d f c f f . .
f f c c f e d e e e e e e e e d d d e e e e d d d d e e e e e e e e d e f c c f f .
f c c b f e e e e e e e e e e e d d d d d d d d d e e e e e e e e e e e f b c c f .
f c b b f e e e e d d d d e e e e d d d d d d d e e e e d d d d e e e e f b b c f .
f b b b f d e e d c b c b d e e e e e e e e e e e e e d c b c b d e e d f b b b f .
f b b b f e e d c b c b b b d e e e e d d d e e e e d c b c b b b d e e f b b b f .
f b b b f e e d c b b b b b d e e e e e e e e e e e d c b b b b b d e e f b b b f .
f b b b f e e d b b b b b b d e d d d d d d d d d e d b b b b b b d e e f b b b f .
f b b b f e e d b b b b b b d e e e e e e e e e e e d b b b b b b d e e f b b b f .
f b b b f e e d b b b b b b d e e e e d d d e e e e d b b b b b b d e e f b b b f .
f b b b f e e e d b b b b d e e e e e e e e e e e e e d b b b b d e e e f b b b f .
f b b b f e d e d d d e d e d e e d e d e d e d e d e d e d d d e d e d f b b b f .
f f b b f d e d e d e d e d e d d . d . d . d . d e d e d e d e d e d e f b b f f .
. f f b f d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d f b f f . .
. . f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f f . . .
"""),
img("""
. . . . . f . . . . . . . . . . . . . . . . f . . . . . . . . .
. . . f . . f . . . . f f f f f f . . . . f . . . f . . . . . .
. f . . f . . f . . f d e d e e e f . . f . . . f . . f . . . .
. f . . f . . f . . f d d e e e e f . . f . . . f . . f . . f .
. . f . . f . f . . f d e d e e e f . . f . . f . . f . . f . .
. . f . . f . . f . f d d e e e e f . . f . . f . . f . f . . .
f . f f f f f f f f d d e e e e e e f f f f f f f f f f f f . .
. f f c f d e d e e e e e e e e e e e e e e e e d e d f c f f .
f f c b f e d e e d d e e e e e e e e e e d d e e d e f b c f f
f c b b f d e e d c b d e e e e e e e e d c b d e e d f b b c f
f b b b f e e d c b b b d e e e e e e d c b b b d e e f b b b f
f f f f f e e d b b b b d e e e e e e d b b b b d e e f f f f f
f b b b f e e e d b b d e e e e e e e e d b b d e e e f b b b f
f b b b f e d e d d d e e e d e d e e e d d d e d e d f b b b f
f f b b f d e d e d e d e d e d e d e d e d e d e d e f b b f f
. f f b f d d d d d d d d d d d d d d d d d d d d d d f b f f .
. . f f f f f f f f f f f f f f f f f f f f f f f f f f f f . .
. . . . f . . . f . . . f . . f . . . f . . . f . . . f . . . .
. . . f . . . f . . . f f . . f . . . f . . . . f . . . f . . .
. . f . . . f . . . . f . . . f . . . . f . . . . f . . . f . .
. f . . . f . . . . f . . . . f . . . . . f . . . . f . . . f .
. . . . f . . . . f . . . . . f . . . . . f . . . . . f . . . .
. . . f . . . . . f . . . . . f . . . . . . f . . . . . f . . .
. . . . . . . . f . . . . . . f . . . . . . . f . . . . . . . .
""")]
animal_image_list = [img("""
. . . . . 7 7 7 . . . . . . . .
. . . . 7 7 7 . . . . . . . . .
. . . . 7 7 . . . . . 7 7 7 7 .
. . . . 7 8 . . . . . 7 7 7 7 7
. . . . 7 8 8 9 9 8 8 7 7 8 . .
. . . . 8 9 9 8 8 7 7 9 8 . . .
. f 7 7 8 9 9 8 8 7 7 9 9 . . .
7 7 7 7 7 8 7 7 9 9 8 8 7 7 . .
7 7 7 7 7 8 7 7 9 9 8 8 7 7 . .
. f 7 7 8 9 9 8 8 7 7 9 9 . . .
. . . . 8 9 9 8 8 7 7 9 8 . . .
. . . . 7 8 8 9 9 8 8 7 7 8 . .
. . . . 7 8 . . . . . 7 7 7 7 7
. . . . 7 7 . . . . . . 7 7 7 .
. . . . 7 7 7 . . . . . . . . .
. . . . . . 7 7 . . . . . . . .
"""),
img("""
. . . . . . . . . . . . . . . .
. . . 4 4 . . . . . . . . . . .
. . 4 4 . . . . . . . 4 4 . . .
. . 4 4 . . 4 . . . . . 4 4 . .
. . 4 4 4 4 4 . . 4 . . 4 4 . .
. 4 4 4 4 4 . . . 4 4 4 4 4 . .
. 4 4 . . . . . . . 4 4 4 4 4 .
. 4 . . . . . . . . . . . . 4 .
. 4 4 4 4 4 4 4 4 4 4 4 4 4 4 .
. . 4 4 4 4 f 4 f 4 4 4 4 4 . .
. . . 4 4 4 4 4 4 4 4 4 4 . . .
. . 4 4 4 4 4 4 4 4 4 4 4 4 . .
. 4 4 . 4 . . . . . . 4 . 4 4 .
. 4 . . 4 . . . . . . 4 . . 4 .
. . 4 . . 4 . . . . 4 . . 4 . .
. . . . . . . . . . . . . . . .
"""),
img("""
. . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . .
. . . . . . . . . . . 8 . . . .
. . . . . . . . . 9 8 8 . . . .
. . . . . . . 8 8 9 8 8 . . . 9
. . . . . 8 9 8 8 9 8 8 . . 9 9
. . . 8 8 8 9 8 8 9 8 8 . 9 9 9
. 8 8 f 8 f 9 8 8 9 8 8 9 9 9 9
9 8 8 8 8 8 9 8 8 9 8 8 9 9 9 9
. 8 8 8 8 8 9 8 8 9 8 8 9 9 9 9
. . . 8 8 8 9 8 8 9 8 8 . 9 9 9
. . . . . 8 9 8 8 9 8 8 . . 9 9
. . . . . . . 8 8 9 8 8 . . . 9
. . . . . . . . . 9 8 8 . . . .
. . . . . . . . . . . 8 . . . .
. . . . . . . . . . . . . . . .
"""),
img("""
. . . . . . 5 5 5 5 . . . . . .
. . . . . 5 4 4 4 4 5 . . . . .
. 4 . . 5 4 4 4 4 4 4 5 . . . .
. 4 . 5 4 4 f 4 f 4 4 4 5 . . 4
4 . . 5 4 4 4 4 4 4 4 4 5 . 4 .
4 . 4 5 4 4 4 4 4 4 4 4 5 . . 4
. 4 . 5 4 4 4 4 4 4 4 4 5 4 . 4
. . . 4 5 4 4 4 4 4 4 5 . 4 4 .
. . 4 4 . 5 4 4 4 4 5 4 4 . . .
. . 4 . . 4 5 5 5 5 4 . . 4 . .
. 4 . . 4 . . 4 . 4 . 4 . . 4 .
4 . . 4 . . 4 . . 4 . 4 . . 4 .
4 . 4 4 . 4 4 . 4 . . . 4 . . 4
4 . 4 . . 4 . . 4 . . . 4 . . 4
. . . 4 . . 4 . . 4 . . . 4 . .
. . . . . . . . . . . . . . . .
"""),
img("""
. . . . . . . . . . . . . . . .
. . . . . . . 2 2 2 2 . . . . .
. . . . . . 2 2 2 2 . . . . . .
. . . . . 1 1 2 2 . . . . . . .
. . . . 1 1 1 1 1 . . . . . . .
. . . 1 1 1 1 2 1 1 1 . . . 2 2
. . 1 1 1 1 2 1 1 1 2 1 . 2 2 2
. . 1 f 1 f 1 2 1 2 1 1 2 2 2 .
. 1 1 1 1 1 2 1 1 1 2 1 2 2 2 .
1 1 1 1 1 1 1 2 1 2 1 1 2 2 2 .
. 1 1 1 1 1 2 1 1 1 2 1 . 2 2 2
. . 1 1 1 1 1 2 1 1 1 . . . 2 2
. . . 1 1 1 1 1 1 2 . . . . . .
. . . . 1 1 1 2 2 2 2 . . . . .
. . . . . . . . . 2 2 2 . . . .
. . . . . . . . . . . . . . . .
"""),
img("""
. . . . . . 6 . . . . . . . . .
. . . . . . 6 . . . . . . . . .
. . . . . . 6 . . . . . . . . .
. . . . . . 5 5 . . . . . . . .
. . . . . 5 5 5 5 . . . . . . .
. . . . 5 f 5 f 5 5 . . 5 . . .
. . . . 5 5 5 5 5 5 5 5 5 . . .
. . . 5 5 5 5 5 5 5 5 5 5 . . .
. . 5 5 5 5 6 6 5 5 . . . . . .
. . 5 5 5 6 6 6 6 5 . . . . . .
. . 5 . 5 6 6 6 6 5 . 5 . . . .
. . . . 5 6 6 6 6 5 . 5 5 . . .
. . . . 5 6 6 6 6 5 . 5 5 5 . .
. . . . . 5 6 6 5 5 . 5 5 5 5 .
. . . . . 5 5 5 5 5 5 5 . . . .
. . . . . . . 5 5 5 5 . . . . .
"""),
img("""
b b b b b b b b b . . . . . . .
b b b f b c b b b . . . . . . .
b b b b b b c b b . . . . . . .
b f b b b b b c b . . . . . . .
b b b b b b b c b . . . . . . .
b c b b b b b b b b . . . . . .
b b c b b b b b b b b . . . . .
b b b c c b b b b . . . . . . .
b b b b b b b b b b . . . . . .
. . . . . b b . . b . . . . . .
. . . . . . b . . b b . . . . .
. . . . . . . . . . b b . . . .
. . . . . . . . . . . b b b b .
. . . . . . . . . . . . . . b b
. . . . . . . . . . . . . . . b
. . . . . . . . . . . . . . . .
"""),
img("""
. . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . 2 .
. . . . . . . . . . . . 2 2 2 .
. . . . . . . . . . . 2 2 2 . .
. . . . . . . . . . . 2 2 2 2 .
. . . . . . . . . . 2 2 . 2 2 2
. . . . . . . . . . . . . 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 f 2 f 2 2 2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3
2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3
2 2 3 3 3 3 3 3 3 3 3 2 2 3 3 .
3 3 3 3 3 3 3 3 3 3 3 3 3 3 . .
. . . 2 2 . 2 . . . . . . . . .
. . . 2 . . 2 . . . . . . . . .
. . . . . . . . . . . . . . . .
"""),
img("""
. . . . . . . . . . . . . . . .
. . . . . . . . . . . . . . . .
. e . e . e . . e . . . . . . .
. e d d e d d e d . e . . . . .
. d d d d d d d d e d . e . . .
d d d d d d d d d d d e . . . d
d d d d d d d d d d d d . d d d
d f d f d d d d d d d d d d d .
d d d d d d d d d d d d d d d d
d d d d d d d d d d d d d d d .
d d d e e e e e e d d d . d d d
. d e e e e e e e e d . . . . d
. . e e e e e e e e . . . . . .
. . . d e e e d e d . . . . . .
. . d . . d . d . . d . . . . .
. . . . . . . . . . . . . . . .
"""),
img("""
. . . . . . . . . . . . . . . .
. . . . . . d d d d . . . . . .
. . . . . d d d d . . . . . . .
d d d d d d d d d d d d d . . .
d d f d f d d d d d d d d d d .
. e e e e e e e d d d d d d d d
. . e c c c e e e e e e e e d d
. . . e e e e e e e e e e e e d
. . . . . d d . d d . . . . e d
. . . . . d . . . d . . . . d d
. . . . . . . . . . . d d d d d
. . . . . . . . . . d d d d d .
. . . . . . . . . . . . . d d .
. . . . . . . . . . . . . d d .
. . . . . . . . . . . . . d . .
. . . . . . . . . . . . . . . .
""")]
animal_names = ["Turtle",
"Crab",
"Green Fish",
"Octopus",
"Pink fish",
"Narwhal",
"Ray",
"Whale",
"Pufferfish",
"Shark"]
animal_speed_list = [-15, -25, -35, -45, -55, -65, -75, -85, -95, -110]
num_caught_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
immunity_badge_awarded = [0, 0, 0, 0, 0, 0, 0, 0, 0]
animal_sprite: Sprite = None
animal_speed = 0
animal_choice = 0
shark: Sprite = None
animal_speed_list: List[number] = []
animal_names: List[str] = []
animal_image_list: List[Image] = []
num_animals_caught = 0
animal_caught_species_id_number = 0
num_caught_list: List[number] = []
immunity_text_list: List[str] = []
immunity_sub_image_list: List[Image] = []
immunity_badge_list: List[Image] = []
new_badge: Sprite = None
immunity_badge_awarded: List[number] = []
current_immunity = 0
animals_needed_to_learn_immunity = 0
sub: Sprite = None
displayStartScreen()
scene.set_background_image(img("""
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d a a
d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d
d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d
d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d a a 7 7 7 a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d a 7 7 7 7 7 7 7 a a a a a a a a a a a a a a a a a a a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d a a a a a a a a a a a 7 a a a a a a a a a a a a d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 a a a a a a a a a a a a a a a a a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d a a a a a 7 7 7 7 7 7 7 7 a a a a a a a a a a a d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a a a d d a a a a a a a a a d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 a a a a a a a a a d d d a a a a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d a a 7 7 7 7 7 7 7 7 7 7 7 a a a a a a a a a a a d d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a a d d d d d d d a a a a d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 a a a a a a d d d a a a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 a a a a a a a a a a d d d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d a a d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 a a d d d d a a a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 a a a a a a a a a a d d d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d a a a a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 a a a a a a a a a d d d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d a a a a a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 a a a a a a a a a d d d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d a a 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 a a a a a a d d d d d d d d d d d d d d d d d d d a a a a a a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d a a d d d a a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d a a d d d d a a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d
d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d a a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d
d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d a a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d
d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d
d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d 7 7 7 7 7 7 7 d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d
d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d 7 7 7 7 7 7 d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d
d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d 7 7 7 7 7 d d d d d d d d d d d d d d d d a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d 7 d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d 7 7 7 7 7 7 7 d d d d d d d d d d d d d d a a a a a a a a a a a d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d a a a a a a a a a a d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d a d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d 7 7 7 7 7 7 7 7
"""))
sub = sprites.create(img("""
. . . . . . . . . . . f f f f f f . . . . . . . . . . . . . . .
. . . . . . . . . . f d e d e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d d e e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d e d e e e f . . . . . . . . . . . . . .
. . . . . . . . . . f d d e e e e f . . . . . . . . . . . . . .
. . f f f f f f f f d d e d e e e e f f f f f f f f f f f f . .
. f f c f d e d e e e e e e e e e e e e e e e e d e d f c f f .
f f c b f e d e e d d e e e e e e e e e e d d e e d e f b c f f
f c b b f d e e d c b d e e e e e e e e d c b d e e d f b b c f
f b b b f e e d c b b b d e e e e e e d c b b b d e e f b b b f
f f f f f e e d b b b b d e e e e e e d b b b b d e e f f f f f
f b b b f e e e d b b d e e e e e e e e d b b d e e e f b b b f
f b b b f e d e d d d e d e d e d e d e d d d e d e d f b b b f
f f b b f d e d e d e d e d e d e d e d e d e d e d e f b b f f
. f f b f d d d d d d d d d d d d d d d d d d d d d d f b f f .
. . f f f f f f f f f f f f f f f f f f f f f f f f f f f f . .
"""),
SpriteKind.player)
controller.move_sprite(sub)
sub.set_flag(SpriteFlag.STAY_IN_SCREEN, True)
left_shark_image = img("""
. . . . . . . . . . . . . . . .
. . . . . . d d d d . . . . . .
. . . . . d d d d . . . . . . .
d d d d d d d d d d d d d . . .
d d f d f d d d d d d d d d d .
. e e e e e e e d d d d d d d d
. . e c c c e e e e e e e e d d
. . . e e e e e e e e e e e e d
. . . . . d d . d d . . . . e d
. . . . . d . . . d . . . . d d
. . . . . . . . . . . d d d d d
. . . . . . . . . . d d d d d .
. . . . . . . . . . . . . d d .
. . . . . . . . . . . . . d d .
. . . . . . . . . . . . . d . .
. . . . . . . . . . . . . . . .
""").clone()
left_shark_image.flip_x()
fillAnimalArrays()
animals_needed_to_learn_immunity = 5
current_immunity = -1
level = 1
def on_update_interval():
global shark
shark = sprites.create_projectile_from_side(left_shark_image, level * 90, 0)
shark.y = randint(10, scene.screen_height() - 10)
sprites.set_data_string(shark, "species", "Shark")
game.on_update_interval(5000, on_update_interval)
def on_update_interval2():
global shark
if level > 1.7:
shark = sprites.create_projectile_from_side(img("""
. . . . . . . . . . . . . . . .
. . . . . . d d d d . . . . . .
. . . . . d d d d . . . . . . .
d d d d d d d d d d d d d . . .
d d f d f d d d d d d d d d d .
. e e e e e e e d d d d d d d d
. . e c c c e e e e e e e e d d
. . . e e e e e e e e e e e e d
. . . . . d d . d d . . . . e d
. . . . . d . . . d . . . . d d
. . . . . . . . . . . d d d d d
. . . . . . . . . . d d d d d .
. . . . . . . . . . . . . d d .
. . . . . . . . . . . . . d d .
. . . . . . . . . . . . . d . .
. . . . . . . . . . . . . . . .
"""),
level * -90,
0)
shark.y = randint(10, scene.screen_height() - 10)
sprites.set_data_string(shark, "species", "Shark")
game.on_update_interval(9000, on_update_interval2)
def on_update_interval3():
global animal_choice, animal_speed, animal_sprite
animal_choice = randint(0, 8)
animal_speed = animal_speed_list[animal_choice]
animal_sprite = sprites.create_projectile_from_side(animal_image_list[animal_choice], animal_speed * level, 0)
# Choose random height for animal.
animal_sprite.y = randint(10, scene.screen_height() - 10)
sprites.set_data_string(animal_sprite, "species", animal_names[animal_choice])
sprites.set_data_number(animal_sprite, "animal_index", animal_choice)
game.on_update_interval(1000, on_update_interval3)
def on_forever():
music.play_tone(262, music.beat(BeatFraction.WHOLE))
music.play_tone(523, music.beat(BeatFraction.WHOLE))
music.play_tone(440, music.beat(BeatFraction.WHOLE))
music.play_tone(392, music.beat(BeatFraction.WHOLE))
music.play_tone(349, music.beat(BeatFraction.HALF))
music.rest(music.beat(BeatFraction.HALF))
music.play_tone(392, music.beat(BeatFraction.HALF))
music.play_tone(349, music.beat(BeatFraction.HALF))
music.play_tone(330, music.beat(BeatFraction.WHOLE))
music.play_tone(294, music.beat(BeatFraction.WHOLE))
forever(on_forever)
| 122.78297
| 336
| 0.294587
| 30,587
| 118,240
| 1.129238
| 0.006996
| 0.774059
| 1.152924
| 1.527504
| 0.907035
| 0.892588
| 0.88674
| 0.876867
| 0.867516
| 0.857614
| 0
| 0.093896
| 0.534599
| 118,240
| 962
| 337
| 122.910603
| 0.533773
| 0.000592
| 0
| 0.497904
| 0
| 0.461216
| 0.940889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012579
| false
| 0
| 0
| 0
| 0.015723
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
70518403a9dba990b016f6baa45f271f14c58925
| 66,707
|
py
|
Python
|
custos-client-sdks/custos-python-sdk/custos/server/core/UserProfileService_pb2.py
|
hasithajayasundara/airavata-custos
|
2d341849dd8ea8a7c2efec6cc73b01dfd495352e
|
[
"Apache-2.0"
] | null | null | null |
custos-client-sdks/custos-python-sdk/custos/server/core/UserProfileService_pb2.py
|
hasithajayasundara/airavata-custos
|
2d341849dd8ea8a7c2efec6cc73b01dfd495352e
|
[
"Apache-2.0"
] | null | null | null |
custos-client-sdks/custos-python-sdk/custos/server/core/UserProfileService_pb2.py
|
hasithajayasundara/airavata-custos
|
2d341849dd8ea8a7c2efec6cc73b01dfd495352e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: UserProfileService.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='UserProfileService.proto',
package='org.apache.custos.user.profile.service',
syntax='proto3',
serialized_options=b'P\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x18UserProfileService.proto\x12&org.apache.custos.user.profile.service\"\xdc\x03\n\x0bUserProfile\x12\x10\n\x08username\x18\x01 \x01(\t\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x12\n\nfirst_name\x18\x03 \x01(\t\x12\x11\n\tlast_name\x18\x04 \x01(\t\x12\x12\n\ncreated_at\x18\x05 \x01(\x03\x12\x42\n\x06status\x18\x06 \x01(\x0e\x32\x32.org.apache.custos.user.profile.service.UserStatus\x12I\n\nattributes\x18\x07 \x03(\x0b\x32\x35.org.apache.custos.user.profile.service.UserAttribute\x12\x14\n\x0c\x63lient_roles\x18\x08 \x03(\t\x12\x13\n\x0brealm_roles\x18\t \x03(\t\x12\x18\n\x10last_modified_at\x18\n \x01(\x03\x12?\n\x04type\x18\x0b \x01(\x0e\x32\x31.org.apache.custos.user.profile.service.UserTypes\x12\\\n\x0fmembership_type\x18\x0c \x01(\x0e\x32\x43.org.apache.custos.user.profile.service.DefaultGroupMembershipTypes\"\x93\x01\n\x12UserProfileRequest\x12\x10\n\x08tenantId\x18\x01 \x01(\x03\x12\x44\n\x07profile\x18\x02 \x01(\x0b\x32\x33.org.apache.custos.user.profile.service.UserProfile\x12\x13\n\x0bperformedBy\x18\x03 \x01(\t\x12\x10\n\x08\x63lientId\x18\x04 \x01(\t\"7\n\rUserAttribute\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x03(\t\"c\n\x1aGetAllUserProfilesResponse\x12\x45\n\x08profiles\x18\x01 \x03(\x0b\x32\x33.org.apache.custos.user.profile.service.UserProfile\"@\n\x1aGetUpdateAuditTrailRequest\x12\x10\n\x08tenantId\x18\x01 \x01(\x03\x12\x10\n\x08username\x18\x02 \x01(\t\"\x83\x01\n\"UserProfileAttributeUpdateMetadata\x12\x18\n\x10updatedAttribute\x18\x01 \x01(\t\x12\x1d\n\x15updatedAttributeValue\x18\x02 \x01(\t\x12\x11\n\tupdatedBy\x18\x03 \x01(\t\x12\x11\n\tupdatedAt\x18\x04 \x01(\t\"\x92\x01\n\x1fUserProfileStatusUpdateMetadata\x12I\n\rupdatedStatus\x18\x01 \x01(\x0e\x32\x32.org.apache.custos.user.profile.service.UserStatus\x12\x11\n\tupdatedBy\x18\x02 \x01(\t\x12\x11\n\tupdatedAt\x18\x03 \x01(\t\"\xdf\x01\n\x1bGetUpdateAuditTrailResponse\x12\x62\n\x0e\x61ttributeAudit\x18\x01 \x03(\x0b\x32J.org.apache.custos.user.profile.service.UserProfileAttributeUpdateMetadata\x12\\\n\x0bstatusAudit\x18\x02 \x03(\x0b\x32G.org.apache.custos.user.profile.service.UserProfileStatusUpdateMetadata\"\xe3\x01\n\x0cGroupRequest\x12\x10\n\x08tenantId\x18\x01 \x01(\x03\x12<\n\x05group\x18\x02 \x01(\x0b\x32-.org.apache.custos.user.profile.service.Group\x12\x13\n\x0bperformedBy\x18\x03 \x01(\t\x12\x10\n\x08\x63lientId\x18\x04 \x01(\t\x12\\\n\x0fmembership_type\x18\x05 \x01(\x0e\x32\x43.org.apache.custos.user.profile.service.DefaultGroupMembershipTypes\"U\n\x14GetAllGroupsResponse\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.org.apache.custos.user.profile.service.Group\"\x84\x02\n\x05Group\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x13\n\x0brealm_roles\x18\x03 \x03(\t\x12\x14\n\x0c\x63lient_roles\x18\x04 \x03(\t\x12\x11\n\tparent_id\x18\x05 \x01(\t\x12\x14\n\x0c\x63reated_time\x18\x06 \x01(\x03\x12\x1a\n\x12last_modified_time\x18\x07 \x01(\x03\x12J\n\nattributes\x18\x08 \x03(\x0b\x32\x36.org.apache.custos.user.profile.service.GroupAttribute\x12\x13\n\x0b\x64\x65scription\x18\t \x01(\t\x12\x10\n\x08owner_id\x18\n \x01(\t\"8\n\x0eGroupAttribute\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x03(\t\"g\n\x0fGroupMembership\x12\x10\n\x08tenantId\x18\x01 \x01(\x03\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x10\n\x08username\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x10\n\x08\x63lientId\x18\x05 \x01(\t\"b\n\x16GroupToGroupMembership\x12\x10\n\x08tenantId\x18\x01 \x01(\x03\x12\x11\n\tparent_id\x18\x02 \x01(\t\x12\x10\n\x08\x63hild_id\x18\x03 \x01(\t\x12\x11\n\tclient_id\x18\x04 \x01(\t\"\x18\n\x06Status\x12\x0e\n\x06status\x18\x01 \x01(\x08\".\n\x1eUserGroupMembershipTypeRequest\x12\x0c\n\x04type\x18\x01 \x01(\t*\xe3\x01\n\nUserStatus\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\r\n\tCONFIRMED\x10\x01\x12\x0c\n\x08\x41PPROVED\x10\x02\x12\x0b\n\x07\x44\x45LETED\x10\x03\x12\r\n\tDUPLICATE\x10\x04\x12\x10\n\x0cGRACE_PERIOD\x10\x05\x12\x0b\n\x07INVITED\x10\x06\x12\n\n\x06\x44\x45NIED\x10\x07\x12\x0b\n\x07PENDING\x10\x08\x12\x14\n\x10PENDING_APPROVAL\x10\t\x12\x18\n\x14PENDING_CONFIRMATION\x10\n\x12\r\n\tSUSPENDED\x10\x0b\x12\x0c\n\x08\x44\x45\x43LINED\x10\x0c\x12\x0b\n\x07\x45XPIRED\x10\r*?\n\x1b\x44\x65\x66\x61ultGroupMembershipTypes\x12\t\n\x05OWNER\x10\x00\x12\t\n\x05\x41\x44MIN\x10\x01\x12\n\n\x06MEMBER\x10\x02*(\n\tUserTypes\x12\x0c\n\x08\x45ND_USER\x10\x00\x12\r\n\tCOMMUNITY\x10\x01\x32\xed\x19\n\x12UserProfileService\x12\x84\x01\n\x11\x63reateUserProfile\x12:.org.apache.custos.user.profile.service.UserProfileRequest\x1a\x33.org.apache.custos.user.profile.service.UserProfile\x12\x84\x01\n\x11updateUserProfile\x12:.org.apache.custos.user.profile.service.UserProfileRequest\x1a\x33.org.apache.custos.user.profile.service.UserProfile\x12\x81\x01\n\x0egetUserProfile\x12:.org.apache.custos.user.profile.service.UserProfileRequest\x1a\x33.org.apache.custos.user.profile.service.UserProfile\x12\x84\x01\n\x11\x64\x65leteUserProfile\x12:.org.apache.custos.user.profile.service.UserProfileRequest\x1a\x33.org.apache.custos.user.profile.service.UserProfile\x12\x9c\x01\n\x1agetAllUserProfilesInTenant\x12:.org.apache.custos.user.profile.service.UserProfileRequest\x1a\x42.org.apache.custos.user.profile.service.GetAllUserProfilesResponse\x12\x9e\x01\n\x1c\x66indUserProfilesByAttributes\x12:.org.apache.custos.user.profile.service.UserProfileRequest\x1a\x42.org.apache.custos.user.profile.service.GetAllUserProfilesResponse\x12r\n\x0b\x63reateGroup\x12\x34.org.apache.custos.user.profile.service.GroupRequest\x1a-.org.apache.custos.user.profile.service.Group\x12r\n\x0bupdateGroup\x12\x34.org.apache.custos.user.profile.service.GroupRequest\x1a-.org.apache.custos.user.profile.service.Group\x12r\n\x0b\x64\x65leteGroup\x12\x34.org.apache.custos.user.profile.service.GroupRequest\x1a-.org.apache.custos.user.profile.service.Group\x12o\n\x08getGroup\x12\x34.org.apache.custos.user.profile.service.GroupRequest\x1a-.org.apache.custos.user.profile.service.Group\x12\x82\x01\n\x0cgetAllGroups\x12\x34.org.apache.custos.user.profile.service.GroupRequest\x1a<.org.apache.custos.user.profile.service.GetAllGroupsResponse\x12\xa4\x01\n\x19getUserProfileAuditTrails\x12\x42.org.apache.custos.user.profile.service.GetUpdateAuditTrailRequest\x1a\x43.org.apache.custos.user.profile.service.GetUpdateAuditTrailResponse\x12y\n\x0e\x61\x64\x64UserToGroup\x12\x37.org.apache.custos.user.profile.service.GroupMembership\x1a..org.apache.custos.user.profile.service.Status\x12~\n\x13removeUserFromGroup\x12\x37.org.apache.custos.user.profile.service.GroupMembership\x1a..org.apache.custos.user.profile.service.Status\x12\x8c\x01\n\x1a\x61\x64\x64\x43hildGroupToParentGroup\x12>.org.apache.custos.user.profile.service.GroupToGroupMembership\x1a..org.apache.custos.user.profile.service.Status\x12\x91\x01\n\x1fremoveChildGroupFromParentGroup\x12>.org.apache.custos.user.profile.service.GroupToGroupMembership\x1a..org.apache.custos.user.profile.service.Status\x12\x8e\x01\n\x12getAllGroupsOfUser\x12:.org.apache.custos.user.profile.service.UserProfileRequest\x1a<.org.apache.custos.user.profile.service.GetAllGroupsResponse\x12\x8f\x01\n\x19getAllParentGroupsOfGroup\x12\x34.org.apache.custos.user.profile.service.GroupRequest\x1a<.org.apache.custos.user.profile.service.GetAllGroupsResponse\x12\x94\x01\n\x1a\x61\x64\x64UserGroupMembershipType\x12\x46.org.apache.custos.user.profile.service.UserGroupMembershipTypeRequest\x1a..org.apache.custos.user.profile.service.Status\x12\x97\x01\n\x1dremoveUserGroupMembershipType\x12\x46.org.apache.custos.user.profile.service.UserGroupMembershipTypeRequest\x1a..org.apache.custos.user.profile.service.Status\x12\x8c\x01\n\x10getAllChildUsers\x12\x34.org.apache.custos.user.profile.service.GroupRequest\x1a\x42.org.apache.custos.user.profile.service.GetAllUserProfilesResponse\x12\x87\x01\n\x11getAllChildGroups\x12\x34.org.apache.custos.user.profile.service.GroupRequest\x1a<.org.apache.custos.user.profile.service.GetAllGroupsResponse\x12\x83\x01\n\x18\x63hangeUserMembershipType\x12\x37.org.apache.custos.user.profile.service.GroupMembership\x1a..org.apache.custos.user.profile.service.Status\x12t\n\thasAccess\x12\x37.org.apache.custos.user.profile.service.GroupMembership\x1a..org.apache.custos.user.profile.service.StatusB\x02P\x01\x62\x06proto3'
)
_USERSTATUS = _descriptor.EnumDescriptor(
name='UserStatus',
full_name='org.apache.custos.user.profile.service.UserStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACTIVE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONFIRMED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='APPROVED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DELETED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DUPLICATE', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GRACE_PERIOD', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVITED', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DENIED', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING_APPROVAL', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING_CONFIRMATION', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SUSPENDED', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DECLINED', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXPIRED', index=13, number=13,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2348,
serialized_end=2575,
)
_sym_db.RegisterEnumDescriptor(_USERSTATUS)
UserStatus = enum_type_wrapper.EnumTypeWrapper(_USERSTATUS)
_DEFAULTGROUPMEMBERSHIPTYPES = _descriptor.EnumDescriptor(
name='DefaultGroupMembershipTypes',
full_name='org.apache.custos.user.profile.service.DefaultGroupMembershipTypes',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OWNER', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ADMIN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MEMBER', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2577,
serialized_end=2640,
)
_sym_db.RegisterEnumDescriptor(_DEFAULTGROUPMEMBERSHIPTYPES)
DefaultGroupMembershipTypes = enum_type_wrapper.EnumTypeWrapper(_DEFAULTGROUPMEMBERSHIPTYPES)
_USERTYPES = _descriptor.EnumDescriptor(
name='UserTypes',
full_name='org.apache.custos.user.profile.service.UserTypes',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='END_USER', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMMUNITY', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2642,
serialized_end=2682,
)
_sym_db.RegisterEnumDescriptor(_USERTYPES)
UserTypes = enum_type_wrapper.EnumTypeWrapper(_USERTYPES)
ACTIVE = 0
CONFIRMED = 1
APPROVED = 2
DELETED = 3
DUPLICATE = 4
GRACE_PERIOD = 5
INVITED = 6
DENIED = 7
PENDING = 8
PENDING_APPROVAL = 9
PENDING_CONFIRMATION = 10
SUSPENDED = 11
DECLINED = 12
EXPIRED = 13
OWNER = 0
ADMIN = 1
MEMBER = 2
END_USER = 0
COMMUNITY = 1
_USERPROFILE = _descriptor.Descriptor(
name='UserProfile',
full_name='org.apache.custos.user.profile.service.UserProfile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='username', full_name='org.apache.custos.user.profile.service.UserProfile.username', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='org.apache.custos.user.profile.service.UserProfile.email', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='first_name', full_name='org.apache.custos.user.profile.service.UserProfile.first_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_name', full_name='org.apache.custos.user.profile.service.UserProfile.last_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='org.apache.custos.user.profile.service.UserProfile.created_at', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='org.apache.custos.user.profile.service.UserProfile.status', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='attributes', full_name='org.apache.custos.user.profile.service.UserProfile.attributes', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_roles', full_name='org.apache.custos.user.profile.service.UserProfile.client_roles', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='realm_roles', full_name='org.apache.custos.user.profile.service.UserProfile.realm_roles', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_modified_at', full_name='org.apache.custos.user.profile.service.UserProfile.last_modified_at', index=9,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='org.apache.custos.user.profile.service.UserProfile.type', index=10,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='membership_type', full_name='org.apache.custos.user.profile.service.UserProfile.membership_type', index=11,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=545,
)
_USERPROFILEREQUEST = _descriptor.Descriptor(
name='UserProfileRequest',
full_name='org.apache.custos.user.profile.service.UserProfileRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tenantId', full_name='org.apache.custos.user.profile.service.UserProfileRequest.tenantId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='profile', full_name='org.apache.custos.user.profile.service.UserProfileRequest.profile', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='performedBy', full_name='org.apache.custos.user.profile.service.UserProfileRequest.performedBy', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='clientId', full_name='org.apache.custos.user.profile.service.UserProfileRequest.clientId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=548,
serialized_end=695,
)
_USERATTRIBUTE = _descriptor.Descriptor(
name='UserAttribute',
full_name='org.apache.custos.user.profile.service.UserAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='org.apache.custos.user.profile.service.UserAttribute.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.custos.user.profile.service.UserAttribute.key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.custos.user.profile.service.UserAttribute.value', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=697,
serialized_end=752,
)
_GETALLUSERPROFILESRESPONSE = _descriptor.Descriptor(
name='GetAllUserProfilesResponse',
full_name='org.apache.custos.user.profile.service.GetAllUserProfilesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='profiles', full_name='org.apache.custos.user.profile.service.GetAllUserProfilesResponse.profiles', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=754,
serialized_end=853,
)
_GETUPDATEAUDITTRAILREQUEST = _descriptor.Descriptor(
name='GetUpdateAuditTrailRequest',
full_name='org.apache.custos.user.profile.service.GetUpdateAuditTrailRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tenantId', full_name='org.apache.custos.user.profile.service.GetUpdateAuditTrailRequest.tenantId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='username', full_name='org.apache.custos.user.profile.service.GetUpdateAuditTrailRequest.username', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=855,
serialized_end=919,
)
_USERPROFILEATTRIBUTEUPDATEMETADATA = _descriptor.Descriptor(
name='UserProfileAttributeUpdateMetadata',
full_name='org.apache.custos.user.profile.service.UserProfileAttributeUpdateMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='updatedAttribute', full_name='org.apache.custos.user.profile.service.UserProfileAttributeUpdateMetadata.updatedAttribute', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updatedAttributeValue', full_name='org.apache.custos.user.profile.service.UserProfileAttributeUpdateMetadata.updatedAttributeValue', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updatedBy', full_name='org.apache.custos.user.profile.service.UserProfileAttributeUpdateMetadata.updatedBy', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updatedAt', full_name='org.apache.custos.user.profile.service.UserProfileAttributeUpdateMetadata.updatedAt', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=922,
serialized_end=1053,
)
_USERPROFILESTATUSUPDATEMETADATA = _descriptor.Descriptor(
name='UserProfileStatusUpdateMetadata',
full_name='org.apache.custos.user.profile.service.UserProfileStatusUpdateMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='updatedStatus', full_name='org.apache.custos.user.profile.service.UserProfileStatusUpdateMetadata.updatedStatus', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updatedBy', full_name='org.apache.custos.user.profile.service.UserProfileStatusUpdateMetadata.updatedBy', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updatedAt', full_name='org.apache.custos.user.profile.service.UserProfileStatusUpdateMetadata.updatedAt', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1056,
serialized_end=1202,
)
_GETUPDATEAUDITTRAILRESPONSE = _descriptor.Descriptor(
name='GetUpdateAuditTrailResponse',
full_name='org.apache.custos.user.profile.service.GetUpdateAuditTrailResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='attributeAudit', full_name='org.apache.custos.user.profile.service.GetUpdateAuditTrailResponse.attributeAudit', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='statusAudit', full_name='org.apache.custos.user.profile.service.GetUpdateAuditTrailResponse.statusAudit', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1205,
serialized_end=1428,
)
_GROUPREQUEST = _descriptor.Descriptor(
name='GroupRequest',
full_name='org.apache.custos.user.profile.service.GroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tenantId', full_name='org.apache.custos.user.profile.service.GroupRequest.tenantId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='group', full_name='org.apache.custos.user.profile.service.GroupRequest.group', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='performedBy', full_name='org.apache.custos.user.profile.service.GroupRequest.performedBy', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='clientId', full_name='org.apache.custos.user.profile.service.GroupRequest.clientId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='membership_type', full_name='org.apache.custos.user.profile.service.GroupRequest.membership_type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1431,
serialized_end=1658,
)
_GETALLGROUPSRESPONSE = _descriptor.Descriptor(
name='GetAllGroupsResponse',
full_name='org.apache.custos.user.profile.service.GetAllGroupsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='groups', full_name='org.apache.custos.user.profile.service.GetAllGroupsResponse.groups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1660,
serialized_end=1745,
)
_GROUP = _descriptor.Descriptor(
name='Group',
full_name='org.apache.custos.user.profile.service.Group',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='org.apache.custos.user.profile.service.Group.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='org.apache.custos.user.profile.service.Group.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='realm_roles', full_name='org.apache.custos.user.profile.service.Group.realm_roles', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_roles', full_name='org.apache.custos.user.profile.service.Group.client_roles', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parent_id', full_name='org.apache.custos.user.profile.service.Group.parent_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_time', full_name='org.apache.custos.user.profile.service.Group.created_time', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_modified_time', full_name='org.apache.custos.user.profile.service.Group.last_modified_time', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='attributes', full_name='org.apache.custos.user.profile.service.Group.attributes', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='org.apache.custos.user.profile.service.Group.description', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='owner_id', full_name='org.apache.custos.user.profile.service.Group.owner_id', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1748,
serialized_end=2008,
)
_GROUPATTRIBUTE = _descriptor.Descriptor(
name='GroupAttribute',
full_name='org.apache.custos.user.profile.service.GroupAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='org.apache.custos.user.profile.service.GroupAttribute.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.custos.user.profile.service.GroupAttribute.key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.custos.user.profile.service.GroupAttribute.value', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2010,
serialized_end=2066,
)
_GROUPMEMBERSHIP = _descriptor.Descriptor(
name='GroupMembership',
full_name='org.apache.custos.user.profile.service.GroupMembership',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tenantId', full_name='org.apache.custos.user.profile.service.GroupMembership.tenantId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='group_id', full_name='org.apache.custos.user.profile.service.GroupMembership.group_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='username', full_name='org.apache.custos.user.profile.service.GroupMembership.username', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='org.apache.custos.user.profile.service.GroupMembership.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='clientId', full_name='org.apache.custos.user.profile.service.GroupMembership.clientId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2068,
serialized_end=2171,
)
_GROUPTOGROUPMEMBERSHIP = _descriptor.Descriptor(
name='GroupToGroupMembership',
full_name='org.apache.custos.user.profile.service.GroupToGroupMembership',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tenantId', full_name='org.apache.custos.user.profile.service.GroupToGroupMembership.tenantId', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parent_id', full_name='org.apache.custos.user.profile.service.GroupToGroupMembership.parent_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='child_id', full_name='org.apache.custos.user.profile.service.GroupToGroupMembership.child_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_id', full_name='org.apache.custos.user.profile.service.GroupToGroupMembership.client_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2173,
serialized_end=2271,
)
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='org.apache.custos.user.profile.service.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='org.apache.custos.user.profile.service.Status.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2273,
serialized_end=2297,
)
_USERGROUPMEMBERSHIPTYPEREQUEST = _descriptor.Descriptor(
name='UserGroupMembershipTypeRequest',
full_name='org.apache.custos.user.profile.service.UserGroupMembershipTypeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='org.apache.custos.user.profile.service.UserGroupMembershipTypeRequest.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2299,
serialized_end=2345,
)
_USERPROFILE.fields_by_name['status'].enum_type = _USERSTATUS
_USERPROFILE.fields_by_name['attributes'].message_type = _USERATTRIBUTE
_USERPROFILE.fields_by_name['type'].enum_type = _USERTYPES
_USERPROFILE.fields_by_name['membership_type'].enum_type = _DEFAULTGROUPMEMBERSHIPTYPES
_USERPROFILEREQUEST.fields_by_name['profile'].message_type = _USERPROFILE
_GETALLUSERPROFILESRESPONSE.fields_by_name['profiles'].message_type = _USERPROFILE
_USERPROFILESTATUSUPDATEMETADATA.fields_by_name['updatedStatus'].enum_type = _USERSTATUS
_GETUPDATEAUDITTRAILRESPONSE.fields_by_name['attributeAudit'].message_type = _USERPROFILEATTRIBUTEUPDATEMETADATA
_GETUPDATEAUDITTRAILRESPONSE.fields_by_name['statusAudit'].message_type = _USERPROFILESTATUSUPDATEMETADATA
_GROUPREQUEST.fields_by_name['group'].message_type = _GROUP
_GROUPREQUEST.fields_by_name['membership_type'].enum_type = _DEFAULTGROUPMEMBERSHIPTYPES
_GETALLGROUPSRESPONSE.fields_by_name['groups'].message_type = _GROUP
_GROUP.fields_by_name['attributes'].message_type = _GROUPATTRIBUTE
DESCRIPTOR.message_types_by_name['UserProfile'] = _USERPROFILE
DESCRIPTOR.message_types_by_name['UserProfileRequest'] = _USERPROFILEREQUEST
DESCRIPTOR.message_types_by_name['UserAttribute'] = _USERATTRIBUTE
DESCRIPTOR.message_types_by_name['GetAllUserProfilesResponse'] = _GETALLUSERPROFILESRESPONSE
DESCRIPTOR.message_types_by_name['GetUpdateAuditTrailRequest'] = _GETUPDATEAUDITTRAILREQUEST
DESCRIPTOR.message_types_by_name['UserProfileAttributeUpdateMetadata'] = _USERPROFILEATTRIBUTEUPDATEMETADATA
DESCRIPTOR.message_types_by_name['UserProfileStatusUpdateMetadata'] = _USERPROFILESTATUSUPDATEMETADATA
DESCRIPTOR.message_types_by_name['GetUpdateAuditTrailResponse'] = _GETUPDATEAUDITTRAILRESPONSE
DESCRIPTOR.message_types_by_name['GroupRequest'] = _GROUPREQUEST
DESCRIPTOR.message_types_by_name['GetAllGroupsResponse'] = _GETALLGROUPSRESPONSE
DESCRIPTOR.message_types_by_name['Group'] = _GROUP
DESCRIPTOR.message_types_by_name['GroupAttribute'] = _GROUPATTRIBUTE
DESCRIPTOR.message_types_by_name['GroupMembership'] = _GROUPMEMBERSHIP
DESCRIPTOR.message_types_by_name['GroupToGroupMembership'] = _GROUPTOGROUPMEMBERSHIP
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
DESCRIPTOR.message_types_by_name['UserGroupMembershipTypeRequest'] = _USERGROUPMEMBERSHIPTYPEREQUEST
DESCRIPTOR.enum_types_by_name['UserStatus'] = _USERSTATUS
DESCRIPTOR.enum_types_by_name['DefaultGroupMembershipTypes'] = _DEFAULTGROUPMEMBERSHIPTYPES
DESCRIPTOR.enum_types_by_name['UserTypes'] = _USERTYPES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UserProfile = _reflection.GeneratedProtocolMessageType('UserProfile', (_message.Message,), {
'DESCRIPTOR' : _USERPROFILE,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.UserProfile)
})
_sym_db.RegisterMessage(UserProfile)
UserProfileRequest = _reflection.GeneratedProtocolMessageType('UserProfileRequest', (_message.Message,), {
'DESCRIPTOR' : _USERPROFILEREQUEST,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.UserProfileRequest)
})
_sym_db.RegisterMessage(UserProfileRequest)
UserAttribute = _reflection.GeneratedProtocolMessageType('UserAttribute', (_message.Message,), {
'DESCRIPTOR' : _USERATTRIBUTE,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.UserAttribute)
})
_sym_db.RegisterMessage(UserAttribute)
GetAllUserProfilesResponse = _reflection.GeneratedProtocolMessageType('GetAllUserProfilesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETALLUSERPROFILESRESPONSE,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.GetAllUserProfilesResponse)
})
_sym_db.RegisterMessage(GetAllUserProfilesResponse)
GetUpdateAuditTrailRequest = _reflection.GeneratedProtocolMessageType('GetUpdateAuditTrailRequest', (_message.Message,), {
'DESCRIPTOR' : _GETUPDATEAUDITTRAILREQUEST,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.GetUpdateAuditTrailRequest)
})
_sym_db.RegisterMessage(GetUpdateAuditTrailRequest)
UserProfileAttributeUpdateMetadata = _reflection.GeneratedProtocolMessageType('UserProfileAttributeUpdateMetadata', (_message.Message,), {
'DESCRIPTOR' : _USERPROFILEATTRIBUTEUPDATEMETADATA,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.UserProfileAttributeUpdateMetadata)
})
_sym_db.RegisterMessage(UserProfileAttributeUpdateMetadata)
UserProfileStatusUpdateMetadata = _reflection.GeneratedProtocolMessageType('UserProfileStatusUpdateMetadata', (_message.Message,), {
'DESCRIPTOR' : _USERPROFILESTATUSUPDATEMETADATA,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.UserProfileStatusUpdateMetadata)
})
_sym_db.RegisterMessage(UserProfileStatusUpdateMetadata)
GetUpdateAuditTrailResponse = _reflection.GeneratedProtocolMessageType('GetUpdateAuditTrailResponse', (_message.Message,), {
'DESCRIPTOR' : _GETUPDATEAUDITTRAILRESPONSE,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.GetUpdateAuditTrailResponse)
})
_sym_db.RegisterMessage(GetUpdateAuditTrailResponse)
GroupRequest = _reflection.GeneratedProtocolMessageType('GroupRequest', (_message.Message,), {
'DESCRIPTOR' : _GROUPREQUEST,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.GroupRequest)
})
_sym_db.RegisterMessage(GroupRequest)
GetAllGroupsResponse = _reflection.GeneratedProtocolMessageType('GetAllGroupsResponse', (_message.Message,), {
'DESCRIPTOR' : _GETALLGROUPSRESPONSE,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.GetAllGroupsResponse)
})
_sym_db.RegisterMessage(GetAllGroupsResponse)
Group = _reflection.GeneratedProtocolMessageType('Group', (_message.Message,), {
'DESCRIPTOR' : _GROUP,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.Group)
})
_sym_db.RegisterMessage(Group)
GroupAttribute = _reflection.GeneratedProtocolMessageType('GroupAttribute', (_message.Message,), {
'DESCRIPTOR' : _GROUPATTRIBUTE,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.GroupAttribute)
})
_sym_db.RegisterMessage(GroupAttribute)
GroupMembership = _reflection.GeneratedProtocolMessageType('GroupMembership', (_message.Message,), {
'DESCRIPTOR' : _GROUPMEMBERSHIP,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.GroupMembership)
})
_sym_db.RegisterMessage(GroupMembership)
GroupToGroupMembership = _reflection.GeneratedProtocolMessageType('GroupToGroupMembership', (_message.Message,), {
'DESCRIPTOR' : _GROUPTOGROUPMEMBERSHIP,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.GroupToGroupMembership)
})
_sym_db.RegisterMessage(GroupToGroupMembership)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.Status)
})
_sym_db.RegisterMessage(Status)
UserGroupMembershipTypeRequest = _reflection.GeneratedProtocolMessageType('UserGroupMembershipTypeRequest', (_message.Message,), {
'DESCRIPTOR' : _USERGROUPMEMBERSHIPTYPEREQUEST,
'__module__' : 'UserProfileService_pb2'
# @@protoc_insertion_point(class_scope:org.apache.custos.user.profile.service.UserGroupMembershipTypeRequest)
})
_sym_db.RegisterMessage(UserGroupMembershipTypeRequest)
DESCRIPTOR._options = None
_USERPROFILESERVICE = _descriptor.ServiceDescriptor(
name='UserProfileService',
full_name='org.apache.custos.user.profile.service.UserProfileService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2685,
serialized_end=5994,
methods=[
_descriptor.MethodDescriptor(
name='createUserProfile',
full_name='org.apache.custos.user.profile.service.UserProfileService.createUserProfile',
index=0,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=_USERPROFILE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='updateUserProfile',
full_name='org.apache.custos.user.profile.service.UserProfileService.updateUserProfile',
index=1,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=_USERPROFILE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getUserProfile',
full_name='org.apache.custos.user.profile.service.UserProfileService.getUserProfile',
index=2,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=_USERPROFILE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='deleteUserProfile',
full_name='org.apache.custos.user.profile.service.UserProfileService.deleteUserProfile',
index=3,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=_USERPROFILE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getAllUserProfilesInTenant',
full_name='org.apache.custos.user.profile.service.UserProfileService.getAllUserProfilesInTenant',
index=4,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=_GETALLUSERPROFILESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='findUserProfilesByAttributes',
full_name='org.apache.custos.user.profile.service.UserProfileService.findUserProfilesByAttributes',
index=5,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=_GETALLUSERPROFILESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='createGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.createGroup',
index=6,
containing_service=None,
input_type=_GROUPREQUEST,
output_type=_GROUP,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='updateGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.updateGroup',
index=7,
containing_service=None,
input_type=_GROUPREQUEST,
output_type=_GROUP,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='deleteGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.deleteGroup',
index=8,
containing_service=None,
input_type=_GROUPREQUEST,
output_type=_GROUP,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.getGroup',
index=9,
containing_service=None,
input_type=_GROUPREQUEST,
output_type=_GROUP,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getAllGroups',
full_name='org.apache.custos.user.profile.service.UserProfileService.getAllGroups',
index=10,
containing_service=None,
input_type=_GROUPREQUEST,
output_type=_GETALLGROUPSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getUserProfileAuditTrails',
full_name='org.apache.custos.user.profile.service.UserProfileService.getUserProfileAuditTrails',
index=11,
containing_service=None,
input_type=_GETUPDATEAUDITTRAILREQUEST,
output_type=_GETUPDATEAUDITTRAILRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='addUserToGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.addUserToGroup',
index=12,
containing_service=None,
input_type=_GROUPMEMBERSHIP,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='removeUserFromGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.removeUserFromGroup',
index=13,
containing_service=None,
input_type=_GROUPMEMBERSHIP,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='addChildGroupToParentGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.addChildGroupToParentGroup',
index=14,
containing_service=None,
input_type=_GROUPTOGROUPMEMBERSHIP,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='removeChildGroupFromParentGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.removeChildGroupFromParentGroup',
index=15,
containing_service=None,
input_type=_GROUPTOGROUPMEMBERSHIP,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getAllGroupsOfUser',
full_name='org.apache.custos.user.profile.service.UserProfileService.getAllGroupsOfUser',
index=16,
containing_service=None,
input_type=_USERPROFILEREQUEST,
output_type=_GETALLGROUPSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getAllParentGroupsOfGroup',
full_name='org.apache.custos.user.profile.service.UserProfileService.getAllParentGroupsOfGroup',
index=17,
containing_service=None,
input_type=_GROUPREQUEST,
output_type=_GETALLGROUPSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='addUserGroupMembershipType',
full_name='org.apache.custos.user.profile.service.UserProfileService.addUserGroupMembershipType',
index=18,
containing_service=None,
input_type=_USERGROUPMEMBERSHIPTYPEREQUEST,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='removeUserGroupMembershipType',
full_name='org.apache.custos.user.profile.service.UserProfileService.removeUserGroupMembershipType',
index=19,
containing_service=None,
input_type=_USERGROUPMEMBERSHIPTYPEREQUEST,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getAllChildUsers',
full_name='org.apache.custos.user.profile.service.UserProfileService.getAllChildUsers',
index=20,
containing_service=None,
input_type=_GROUPREQUEST,
output_type=_GETALLUSERPROFILESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getAllChildGroups',
full_name='org.apache.custos.user.profile.service.UserProfileService.getAllChildGroups',
index=21,
containing_service=None,
input_type=_GROUPREQUEST,
output_type=_GETALLGROUPSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='changeUserMembershipType',
full_name='org.apache.custos.user.profile.service.UserProfileService.changeUserMembershipType',
index=22,
containing_service=None,
input_type=_GROUPMEMBERSHIP,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='hasAccess',
full_name='org.apache.custos.user.profile.service.UserProfileService.hasAccess',
index=23,
containing_service=None,
input_type=_GROUPMEMBERSHIP,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_USERPROFILESERVICE)
DESCRIPTOR.services_by_name['UserProfileService'] = _USERPROFILESERVICE
# @@protoc_insertion_point(module_scope)
| 46.910689
| 8,352
| 0.771178
| 8,012
| 66,707
| 6.138667
| 0.053794
| 0.045748
| 0.080739
| 0.071081
| 0.774841
| 0.754061
| 0.749629
| 0.727894
| 0.718927
| 0.637842
| 0
| 0.033177
| 0.109854
| 66,707
| 1,421
| 8,353
| 46.943702
| 0.795113
| 0.026609
| 0
| 0.689888
| 1
| 0.000749
| 0.286614
| 0.254768
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003745
| 0
| 0.003745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7070a6a97bea1b0f7d1d915a1255f96156c72d7b
| 5,321
|
py
|
Python
|
Models/attentionLayer.py
|
go2chayan/HateXplain
|
c5f173d39dca348ec6481fca08a17bc80616651a
|
[
"MIT"
] | 69
|
2021-03-05T20:50:39.000Z
|
2022-03-29T17:45:55.000Z
|
Models/attentionLayer.py
|
go2chayan/HateXplain
|
c5f173d39dca348ec6481fca08a17bc80616651a
|
[
"MIT"
] | 10
|
2021-03-05T07:38:06.000Z
|
2022-03-31T22:27:39.000Z
|
Models/attentionLayer.py
|
go2chayan/HateXplain
|
c5f173d39dca348ec6481fca08a17bc80616651a
|
[
"MIT"
] | 28
|
2021-03-22T03:46:43.000Z
|
2022-03-08T18:34:38.000Z
|
import torch
import torch.nn as nn
debug=False
# Custom Layers
class Attention(nn.Module):
def __init__(self, feature_dim, step_dim, bias=True, **kwargs):
super(Attention, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.step_dim = step_dim
self.features_dim = 0
weight = torch.zeros(feature_dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight)
if bias:
self.b = nn.Parameter(torch.zeros(step_dim))
def forward(self, x, mask=None):
feature_dim = self.feature_dim
step_dim = self.step_dim
temp=x.contiguous().view(-1, feature_dim)
if(debug):
print("temp",temp.shape)
print("weight",self.weight.shape)
eij = torch.mm(temp, self.weight)
if(debug):
print("eij step 1",eij.shape)
eij = eij.view(-1, step_dim)
if(debug):
print("eij step 2",eij.shape)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
eij[~mask] = float('-inf')
a=torch.softmax(eij, dim=1)
# a = torch.exp(eij)
# if(debug==True):
# print("a shape",a.shape)
# print("mask shape",mask.shape)
# if mask is not None:
# a = a * mask
# a = a /(torch.sum(a, 1, keepdim=True) + 1e-10)
if(debug):
print("attention",a.shape)
weighted_input = x * torch.unsqueeze(a, -1)
if(debug):
print("weighted input",weighted_input.shape)
return torch.sum(weighted_input, 1),a
class Attention_LBSA(nn.Module):
def __init__(self, feature_dim, step_dim, bias=True, **kwargs):
super(Attention_LBSA, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.step_dim = step_dim
self.features_dim = 0
weight = torch.zeros(feature_dim, feature_dim)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight)
context=torch.zeros(feature_dim, 1)
nn.init.xavier_uniform_(context)
self.context_vector=nn.Parameter(context)
if bias:
self.b = nn.Parameter(torch.zeros(feature_dim))
def forward(self, x, mask=None):
feature_dim = self.feature_dim
step_dim = self.step_dim
temp=x.contiguous().view(-1, feature_dim)
if(debug):
print("temp",temp.shape)
print("weight",self.weight.shape)
eij = torch.mm(temp, self.weight)
if(debug):
print("eij step 1",eij.shape)
#eij = eij.view(-1, step_dim)
if(debug):
print("eij step 2",eij.shape)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
### changedstep
eij = torch.mm(eij, self.context_vector)
if(debug):
print("eij step 3",eij.shape)
print("context_vector",self.context_vector.shape)
eij = eij.view(-1, step_dim)
# a = torch.exp(eij)
# if(debug==True):
# print("a shape",a.shape)
# print("mask shape",mask.shape)
# if mask is not None:
# a = a * mask
# a = a /(torch.sum(a, 1, keepdim=True) + 1e-10)
eij[~mask] = float('-inf')
a=torch.softmax(eij, dim=1)
if(debug):
print("attention",a.shape)
weighted_input = x * torch.unsqueeze(a, -1)
if(debug):
print("weighted input",weighted_input.shape)
return torch.sum(weighted_input, 1),a
class Attention_LBSA_sigmoid(Attention_LBSA):
def __init__(self, feature_dim, step_dim, bias=True, **kwargs):
super().__init__(feature_dim, step_dim, bias, **kwargs)
def forward(self, x, mask=None):
feature_dim = self.feature_dim
step_dim = self.step_dim
temp=x.contiguous().view(-1, feature_dim)
if(debug):
print("temp",temp.shape)
print("weight",self.weight.shape)
eij = torch.mm(temp, self.weight)
if(debug):
print("eij step 1",eij.shape)
#eij = eij.view(-1, step_dim)
if(debug):
print("eij step 2",eij.shape)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
### changedstep
eij = torch.mm(eij, self.context_vector)
if(debug):
print("eij step 3",eij.shape)
print("context_vector",self.context_vector.shape)
eij = eij.view(-1, step_dim)
sigmoid = nn.Sigmoid()
a=sigmoid(eij)
if(debug==True):
print("a shape",a.shape)
print("mask shape",mask.shape)
if mask is not None:
a = a * mask
if(debug):
print("attention",a.shape)
weighted_input = x * torch.unsqueeze(a, -1)
if(debug):
print("weighted input",weighted_input.shape)
return torch.sum(weighted_input, 1),a
| 29.726257
| 67
| 0.535614
| 665
| 5,321
| 4.13985
| 0.100752
| 0.079913
| 0.074101
| 0.043589
| 0.930258
| 0.92263
| 0.92263
| 0.92263
| 0.899382
| 0.88449
| 0
| 0.01023
| 0.338658
| 5,321
| 178
| 68
| 29.893258
| 0.772094
| 0.108062
| 0
| 0.808333
| 0
| 0
| 0.049142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.016667
| 0
| 0.116667
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
708adadf1231d74102ff04d560d2a5d25f39db97
| 48,026
|
py
|
Python
|
src/test/core/test_Boolean.py
|
leonard112/OctaneScript
|
f04dedb0cf9b1dcebd3056639ff3e40f52f54424
|
[
"MIT"
] | null | null | null |
src/test/core/test_Boolean.py
|
leonard112/OctaneScript
|
f04dedb0cf9b1dcebd3056639ff3e40f52f54424
|
[
"MIT"
] | 45
|
2020-11-23T01:44:15.000Z
|
2021-06-14T02:44:31.000Z
|
src/test/core/test_Boolean.py
|
leonard112/OctaneScript
|
f04dedb0cf9b1dcebd3056639ff3e40f52f54424
|
[
"MIT"
] | null | null | null |
# This file is licensed under the MIT license.
# See license for more details: https://github.com/leonard112/OctaneScript/blob/main/README.md
import pytest
from core.Line import Line
from core.Stack import Stack
from core.Boolean import Boolean
from Interpreter import reserved
line = Line("TEST", 0, "test")
test_stack = Stack()
test_stack.push(line)
# BRACKETS
def test_no_brackets_fails():
assert_error(Boolean('true', test_stack, {}))
def test_no_right_bracket_fails():
assert_error(Boolean('[true', test_stack, {}))
def test_no_left_bracket_fails():
assert_error(Boolean('true]', test_stack, {}))
def test_extra_right_bracket_fails():
assert_error(Boolean('[[true]', test_stack, {}))
def test_extra_left_bracket_fails():
assert_error(Boolean('[true]]', test_stack, {}))
def test_extra_left_right_brackets_equal_success():
assert Boolean('[[true]]', test_stack, {}).evaluate() == True
def test_evaluating_two_separate_conditionals_works():
assert Boolean('[[true] and [true]]', test_stack, {}).evaluate() == True
def test_evaluating_three_separate_conditionals_works():
assert Boolean('[[true] and [true] and [true]]', test_stack, {}).evaluate() == True
def test_evaluating_four_separate_conditionals_works():
assert Boolean('[[true] and [true] and [true] and [true]]', test_stack, {}).evaluate() == True
# SINGLE VALUES
def test_true_is_true():
assert Boolean('[true]', test_stack, {}).evaluate() == True
def test_false_is_false():
assert Boolean('[false]', test_stack, {}).evaluate() == False
def test_integer_is_true():
assert Boolean('[2]', test_stack, {}).evaluate() == True
def test_decimal_is_true():
assert Boolean('[2.2]', test_stack, {}).evaluate() == True
def test_math_is_true():
assert Boolean('[(1+1)]', test_stack, {}).evaluate() == True
def test_string_is_true():
assert Boolean('["hello"]', test_stack, {}).evaluate() == True
def test_string_is_true():
assert Boolean('["hello"]', test_stack, {}).evaluate() == True
def test_array_is_true():
assert Boolean('[<1,2,3>]', test_stack, {}).evaluate() == True
def test_string_type_is_true():
assert Boolean('[@Type:String]', test_stack, {}).evaluate() == True
def test_number_type_is_true():
assert Boolean('[@Type:Number]', test_stack, {}).evaluate() == True
def test_boolean_type_is_true():
assert Boolean('[@Type:Boolean]', test_stack, {}).evaluate() == True
def test_array_type_is_true():
assert Boolean('[@Type:Array]', test_stack, {}).evaluate() == True
def test_function_type_is_true():
assert Boolean('[@Type:Array]', test_stack, {}).evaluate() == True
# OPERATIONS
def test_invalid_operation_fails():
assert_error(Boolean('[true invalid true]', test_stack, {}))
def test_missing_operation_fails():
assert_error(Boolean('[true true]', test_stack, {}))
# EQUALS
def test_equals_for_equal_integers_true():
assert Boolean('[1 equals 1]', test_stack, {}).evaluate() == True
def test_equals_for_equal_decimals_true():
assert Boolean('[1.1 equals 1.1]', test_stack, {}).evaluate() == True
def test_equals_for_equal_math_true():
assert Boolean('[(1 + 1) equals (1 + 1)]', test_stack, {}).evaluate() == True
def test_equals_for_equal_arrays_true():
assert Boolean('[<1, 2, 3> equals <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_equals_for_equal_booleans_true():
assert Boolean('[true equals true]', test_stack, {}).evaluate() == True
def test_equals_for_equal_strings_true():
assert Boolean('["hello" equals "hello"]', test_stack, {}).evaluate() == True
def test_equals_for_equal_variables_true():
assert Boolean('[x equals y]', test_stack, {'x': 1, 'y': 1}).evaluate() == True
def test_equals_for_equal_types_true():
assert Boolean('[@Type:String equals @Type:String]', test_stack, {}).evaluate() == True
def test_equals_for_unequal_integers_false():
assert Boolean('[1 equals 2]', test_stack, {}).evaluate() == False
def test_equals_for_unequal_decimals_false():
assert Boolean('[1.1 equals 2.4]', test_stack, {}).evaluate() == False
def test_equals_for_unequal_math_false():
assert Boolean('[(1 + 1) equals (1 + 2)]', test_stack, {}).evaluate() == False
def test_equals_for_unequal_arrays_false():
assert Boolean('[<1, 2, 3> equals <1, 2, 4>]', test_stack, {}).evaluate() == False
def test_equals_for_unequal_booleans_false():
assert Boolean('[true equals false]', test_stack, {}).evaluate() == False
def test_equals_for_unequal_strings_false():
assert Boolean('["hello" equals "world"]', test_stack, {}).evaluate() == False
def test_equals_for_unequal_variables_false():
assert Boolean('[x equals y]', test_stack, {'x': 1, 'y': 2}).evaluate() == False
def test_equals_for_unequal_types_false():
assert Boolean('[@Type:String equals @Type:Number]', test_stack, {}).evaluate() == False
def test_equals_for_differing_value_types_raises_error():
assert_error(Boolean('[1 equals "hello"]', test_stack, {}))
# NOT EQUALS
def test_not_equals_for_unequal_integers_lower_first_true():
assert Boolean('[1 notEquals 2]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_decimals_lower_first_true():
assert Boolean('[1.1 notEquals 2.4]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_math_lower_first_true():
assert Boolean('[(1 + 1) notEquals (1 + 2)]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_arrays_lower_first_true():
assert Boolean('[<1, 2 ,3> notEquals <1, 2, 4>]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_booleans_lower_first_true():
assert Boolean('[true notEquals false]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_strings_lower_first_true():
assert Boolean('["hello" notEquals "world"]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_variables_lower_first_true():
assert Boolean('[x notEquals y]', test_stack, {'x': 1, 'y': 2}).evaluate() == True
def test_not_equals_for_unequal_integers_greater_first_true():
assert Boolean('[2 notEquals 1]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_decimals_greater_first_true():
assert Boolean('[2.4 notEquals 1.1]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_math_greater_first_true():
assert Boolean('[(1 + 2) notEquals (1 + 1)]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_arrays_greater_first_true():
assert Boolean('[<1, 2, 4> notEquals <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_booleans_greater_first_true():
assert Boolean('[false notEquals true]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_strings_greater_first_true():
assert Boolean('["world" notEquals "hello"]', test_stack, {}).evaluate() == True
def test_not_equals_for_unequal_variables_greater_first_true():
assert Boolean('[x notEquals y]', test_stack, {'x': 2, 'y': 1}).evaluate() == True
def test_not_equals_for_equal_integers_false():
assert Boolean('[1 notEquals 1]', test_stack, {}).evaluate() == False
def test_not_equals_for_equal_decimals_false():
assert Boolean('[1.1 notEquals 1.1]', test_stack, {}).evaluate() == False
def test_not_equals_for_equal_math_false():
assert Boolean('[(1 + 1) notEquals (1 + 1)]', test_stack, {}).evaluate() == False
def test_not_equals_for_equal_arrays_false():
assert Boolean('[<1, 2, 3> notEquals <1, 2, 3>]', test_stack, {}).evaluate() == False
def test_not_equals_for_equal_booleans_false():
assert Boolean('[true notEquals true]', test_stack, {}).evaluate() == False
def test_not_equals_for_equal_strings_false():
assert Boolean('["hello" notEquals "hello"]', test_stack, {}).evaluate() == False
def test_not_equals_for_equal_variables_false():
assert Boolean('[x notEquals y]', test_stack, {'x': 1, 'y': 1}).evaluate() == False
def test_not_equals_for_unequal_types_true():
assert Boolean('[@Type:String notEquals @Type:Integer]', test_stack, {}).evaluate() == True
def test_not_equals_for_equal_types_false():
assert Boolean('[@Type:String notEquals @Type:String]', test_stack, {}).evaluate() == False
def test_not_equals_for_differing_value_types_raises_error():
assert_error(Boolean('[1 notEquals "hello"]', test_stack, {}))
# LESS THAN
def test_less_than_for_first_integer_less_than_following_true():
assert Boolean('[1 lessThan 2]', test_stack, {}).evaluate() == True
def test_less_than_for_first_decimal_less_than_following_true():
assert Boolean('[1.1 lessThan 2.4]', test_stack, {}).evaluate() == True
def test_less_than_for_first_math_expression_less_than_following_true():
assert Boolean('[(1 + 1) lessThan (1 + 2)]', test_stack, {}).evaluate() == True
def test_less_than_for_first_array_less_than_following_true():
assert Boolean('[<1, 2, 3> lessThan <1, 2, 4>]', test_stack, {}).evaluate() == True
def test_less_than_for_first_boolean_less_than_following_true():
assert Boolean('[false lessThan true]', test_stack, {}).evaluate() == True
def test_less_than_for_first_string_less_than_following_true():
assert Boolean('["hello" lessThan "world"]', test_stack, {}).evaluate() == True
def test_less_than_for_first_variable_less_than_following_true():
assert Boolean('[x lessThan y]', test_stack, {'x': 1, 'y': 2}).evaluate() == True
def test_less_than_for_first_integer_greater_than_following_false():
assert Boolean('[2 lessThan 1]', test_stack, {}).evaluate() == False
def test_less_than_for_first_decimal_greater_than_following_false():
assert Boolean('[2.4 lessThan 1.1]', test_stack, {}).evaluate() == False
def test_less_than_for_first_math_expression_greater_than_following_false():
assert Boolean('[(1 + 2) lessThan (1 + 1)]', test_stack, {}).evaluate() == False
def test_less_than_for_first_array_greater_than_following_false():
assert Boolean('[<1, 2, 4> lessThan <1, 2, 3>]', test_stack, {}).evaluate() == False
def test_less_than_for_first_boolean_greater_than_following_false():
assert Boolean('[true lessThan false]', test_stack, {}).evaluate() == False
def test_less_than_for_first_string_greater_than_following_false():
assert Boolean('["world" lessThan "hello"]', test_stack, {}).evaluate() == False
def test_less_than_for_first_variable_greater_than_following_false():
assert Boolean('[x lessThan y]', test_stack, {'x': 2, 'y': 1}).evaluate() == False
def test_less_than_for_first_integer_equal_to_following_false():
assert Boolean('[1 lessThan 1]', test_stack, {}).evaluate() == False
def test_less_than_for_first_decimal_equal_to_following_false():
assert Boolean('[1.1 lessThan 1.1]', test_stack, {}).evaluate() == False
def test_less_than_for_first_math_expression_equal_to_following_false():
assert Boolean('[(1 + 1) lessThan (1 + 1)]', test_stack, {}).evaluate() == False
def test_less_than_for_first_array_equal_to_following_false():
assert Boolean('[<1, 2, 3> lessThan <1, 2, 3>]', test_stack, {}).evaluate() == False
def test_less_than_for_first_boolean_equal_to_following_false():
assert Boolean('[true lessThan true]', test_stack, {}).evaluate() == False
def test_less_than_for_first_string_equal_to_following_false():
assert Boolean('["hello" lessThan "hello"]', test_stack, {}).evaluate() == False
def test_less_than_for_first_variable_equal_to_following_false():
assert Boolean('[x lessThan y]', test_stack, {'x': 1, 'y': 1}).evaluate() == False
def test_less_than_for_differing_value_types_raises_error():
assert_error(Boolean('[1 lessThan "hello"]', test_stack, {}))
def test_less_than_for_types_raises_error():
assert_error(Boolean('[@Type:String lessThan @Type:String]', test_stack, {}))
#LESS THAN EQUALS
def test_less_than_equals_for_first_integer_less_than_following_true():
assert Boolean('[1 lessThanEquals 2]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_decimal_less_than_following_true():
assert Boolean('[1.1 lessThanEquals 2.4]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_math_expression_less_than_following_true():
assert Boolean('[(1 + 1) lessThanEquals (1 + 2)]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_array_less_than_following_true():
assert Boolean('[<1, 2, 3> lessThanEquals <1, 2, 4>]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_boolean_less_than_following_true():
assert Boolean('[false lessThanEquals true]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_string_less_than_following_true():
assert Boolean('["hello" lessThanEquals "world"]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_variable_less_than_following_true():
assert Boolean('[x lessThanEquals y]', test_stack, {'x': 1, 'y': 2}).evaluate() == True
def test_less_than_equals_for_first_integer_greater_than_following_false():
assert Boolean('[2 lessThanEquals 1]', test_stack, {}).evaluate() == False
def test_less_than_equals_for_first_decimal_greater_than_following_false():
assert Boolean('[2.4 lessThanEquals 1.1]', test_stack, {}).evaluate() == False
def test_less_than_equals_for_first_math_expression_greater_than_following_false():
assert Boolean('[(1 + 2) lessThanEquals (1 + 1)]', test_stack, {}).evaluate() == False
def test_less_than_equals_for_first_array_greater_than_following_false():
assert Boolean('[<1, 2, 4> lessThanEquals <1, 2, 3>]', test_stack, {}).evaluate() == False
def test_less_than_equals_for_first_boolean_greater_than_following_false():
assert Boolean('[true lessThanEquals false]', test_stack, {}).evaluate() == False
def test_less_than_equals_for_first_string_greater_than_following_false():
assert Boolean('["world" lessThanEquals "hello"]', test_stack, {}).evaluate() == False
def test_less_than_equals_for_first_variable_greater_than_following_false():
assert Boolean('[x lessThanEquals y]', test_stack, {'x': 2, 'y': 1}).evaluate() == False
def test_less_than_equals_for_first_integer_equal_to_following_false():
assert Boolean('[1 lessThanEquals 1]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_decimal_equal_to_following_false():
assert Boolean('[1.1 lessThanEquals 1.1]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_math_expression_equal_to_following_false():
assert Boolean('[(1 + 1) lessThanEquals (1 + 1)]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_array_equal_to_following_false():
assert Boolean('[<1, 2, 3> lessThanEquals <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_boolean_equal_to_following_false():
assert Boolean('[true lessThanEquals true]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_string_equal_to_following_false():
assert Boolean('["hello" lessThanEquals "hello"]', test_stack, {}).evaluate() == True
def test_less_than_equals_for_first_variable_equal_to_following_false():
assert Boolean('[x lessThanEquals y]', test_stack, {'x': 1, 'y': 1}).evaluate() == True
def test_less_than_equals_for_differing_value_types_raises_error():
assert_error(Boolean('[1 lessThanEquals "hello"]', test_stack, {}))
def test_less_than_equals_for_types_raises_error():
assert_error(Boolean('[@Type:String lessThanEquals @Type:String]', test_stack, {}))
# GREATER THAN
def test_greater_than_for_first_integer_less_than_following_false():
assert Boolean('[1 greaterThan 2]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_decimal_less_than_following_false():
assert Boolean('[1.1 greaterThan 2.4]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_math_expression_less_than_following_false():
assert Boolean('[(1 + 1) greaterThan (1 + 2)]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_array_less_than_following_false():
assert Boolean('[<1, 2, 3> greaterThan <1, 2, 4>]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_boolean_less_than_following_false():
assert Boolean('[false greaterThan true]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_string_less_than_following_false():
assert Boolean('["hello" greaterThan "world"]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_variable_less_than_following_false():
assert Boolean('[x greaterThan y]', test_stack, {'x': 1, 'y': 2}).evaluate() == False
def test_greater_than_for_first_integer_greater_than_following_true():
assert Boolean('[2 greaterThan 1]', test_stack, {}).evaluate() == True
def test_greater_than_for_first_decimal_greater_than_following_true():
assert Boolean('[2.4 greaterThan 1.1]', test_stack, {}).evaluate() == True
def test_greater_than_for_first_math_expression_greater_than_following_true():
assert Boolean('[(1 + 2) greaterThan (1 + 1)]', test_stack, {}).evaluate() == True
def test_greater_than_for_first_array_greater_than_following_true():
assert Boolean('[<1, 2, 4> greaterThan <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_greater_than_for_first_boolean_greater_than_following_true():
assert Boolean('[true greaterThan false]', test_stack, {}).evaluate() == True
def test_greater_than_for_first_string_greater_than_following_true():
assert Boolean('["world" greaterThan "hello"]', test_stack, {}).evaluate() == True
def test_greater_than_for_first_variable_greater_than_following_true():
assert Boolean('[x greaterThan y]', test_stack, {'x': 2, 'y': 1}).evaluate() == True
def test_greater_than_for_first_integer_equal_to_following_false():
assert Boolean('[1 greaterThan 1]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_decimal_equal_to_following_false():
assert Boolean('[1.1 greaterThan 1.1]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_math_expression_equal_to_following_false():
assert Boolean('[(1 + 1) greaterThan (1 + 1)]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_array_equal_to_following_false():
assert Boolean('[<1, 2, 3> greaterThan <1, 2, 3>]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_boolean_equal_to_following_false():
assert Boolean('[true greaterThan true]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_string_equal_to_following_false():
assert Boolean('["hello" greaterThan "hello"]', test_stack, {}).evaluate() == False
def test_greater_than_for_first_variable_equal_to_following_false():
assert Boolean('[x greaterThan y]', test_stack, {'x': 1, 'y': 1}).evaluate() == False
def test_greater_than_for_differing_value_types_raises_error():
assert_error(Boolean('[1 greaterThan "hello"]', test_stack, {}))
def test_greater_than_for_types_raises_error():
assert_error(Boolean('[@Type:String greaterThan @Type:String]', test_stack, {}))
# GREATER THAN EQUALS
def test_greater_than_equals_for_first_integer_less_than_following_false():
assert Boolean('[1 greaterThanEquals 2]', test_stack, {}).evaluate() == False
def test_greater_than_equals_for_first_decimal_less_than_following_false():
assert Boolean('[1.1 greaterThanEquals 2.4]', test_stack, {}).evaluate() == False
def test_greater_than_equals_for_first_math_less_than_following_false():
assert Boolean('[(1 + 1) greaterThanEquals (1 + 2)]', test_stack, {}).evaluate() == False
def test_greater_than_equals_for_first_array_less_than_following_false():
assert Boolean('[<1, 2, 3> greaterThanEquals <1, 2, 4>]', test_stack, {}).evaluate() == False
def test_greater_than_equals_for_first_boolean_less_than_following_false():
assert Boolean('[false greaterThanEquals true]', test_stack, {}).evaluate() == False
def test_greater_than_equals_for_first_string_less_than_following_false():
assert Boolean('["hello" greaterThanEquals "world"]', test_stack, {}).evaluate() == False
def test_greater_than_equals_for_first_variable_less_than_following_false():
assert Boolean('[x greaterThanEquals y]', test_stack, {'x': 1, 'y': 2}).evaluate() == False
def test_greater_than_equals_for_first_integer_greater_than_following_true():
assert Boolean('[2 greaterThanEquals 1]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_decimal_greater_than_following_true():
assert Boolean('[2.4 greaterThanEquals 1.1]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_math_expression_greater_than_following_true():
assert Boolean('[(1 + 2) greaterThanEquals (1 + 1)]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_array_greater_than_following_true():
assert Boolean('[<1, 2, 4> greaterThanEquals <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_boolean_greater_than_following_true():
assert Boolean('[true greaterThanEquals false]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_string_greater_than_following_true():
assert Boolean('["world" greaterThanEquals "hello"]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_variable_greater_than_following_true():
assert Boolean('[x greaterThanEquals y]', test_stack, {'x': 2, 'y': 1}).evaluate() == True
def test_greater_than_equals_for_first_integer_equal_to_following_true():
assert Boolean('[1 greaterThanEquals 1]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_decimal_equal_to_following_true():
assert Boolean('[1.1 greaterThanEquals 1.1]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_math_expression_equal_to_following_true():
assert Boolean('[(1 + 1) greaterThanEquals (1 + 1)]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_array_equal_to_following_true():
assert Boolean('[<1, 2, 3> greaterThanEquals <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_boolean_equal_to_following_true():
assert Boolean('[true greaterThanEquals true]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_string_equal_to_following_true():
assert Boolean('["hello" greaterThanEquals "hello"]', test_stack, {}).evaluate() == True
def test_greater_than_equals_for_first_variable_equal_to_following_true():
assert Boolean('[x greaterThanEquals y]', test_stack, {'x': 1, 'y': 1}).evaluate() == True
def test_greater_than_equals_for_differing_value_types_raises_error():
assert_error(Boolean('[1 greaterThanEquals "hello"]', test_stack, {}))
def test_greater_than_equals_for_types_raises_error():
assert_error(Boolean('[@Type:String greaterThanEquals @Type:String]', test_stack, {}))
# AND
def test_and_for_equal_boolean_values_is_true():
assert Boolean('[true and true]', test_stack, {}).evaluate() == True
def test_and_for_equal_integer_values_is_true():
assert Boolean('[1 and 1]', test_stack, {}).evaluate() == True
def test_and_for_equal_decimal_values_is_true():
assert Boolean('[1.1 and 1.1]', test_stack, {}).evaluate() == True
def test_and_for_equal_math_values_is_true():
assert Boolean('[(1 + 1) and (1 + 1)]', test_stack, {}).evaluate() == True
def test_and_for_equal_arrays_values_is_true():
assert Boolean('[<1, 2, 3> and <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_and_for_equal_string_values_is_true():
assert Boolean('["hello" and "hello"]', test_stack, {}).evaluate() == True
def test_and_for_equal_variable_values_is_true():
assert Boolean('[x and y]', test_stack, {'x': 1, 'y': 1}).evaluate() == True
def test_and_for_unequal_booleans_with_greater_first_is_true_unless_booleans():
assert Boolean('[true and false]', test_stack, {}).evaluate() == False
def test_and_for_unequal_integers_with_greater_first_is_true_unless_booleans():
assert Boolean('[2 and 1]', test_stack, {}).evaluate() == True
def test_and_for_unequal_decimals_with_greater_first_is_true_unless_booleans():
assert Boolean('[2.4 and 1.1]', test_stack, {}).evaluate() == True
def test_and_for_unequal_math_with_greater_first_is_true_unless_booleans():
assert Boolean('[(1 + 2) and (1 + 1)]', test_stack, {}).evaluate() == True
def test_and_for_unequal_arrays_with_greater_first_is_true_unless_booleans():
assert Boolean('[<1, 2, 4> and <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_and_for_unequal_strings_with_greater_first_is_true_unless_booleans():
assert Boolean('["world" and "hello"]', test_stack, {}).evaluate() == True
def test_and_for_unequal_variables_with_greater_first_is_true_unless_booleans():
assert Boolean('[x and y]', test_stack, {'x': 2, 'y': 1}).evaluate() == True
def test_and_for_unequal_booleans_with_lower_first_is_true_unless_booleans():
assert Boolean('[false and true]', test_stack, {}).evaluate() == False
def test_and_for_unequal_integers_with_lower_first_is_true_unless_booleans():
assert Boolean('[1 and 2]', test_stack, {}).evaluate() == True
def test_and_for_unequal_decimals_with_lower_first_is_true_unless_booleans():
assert Boolean('[1.1 and 2.4]', test_stack, {}).evaluate() == True
def test_and_for_unequal_math_with_lower_first_is_true_unless_booleans():
assert Boolean('[(1 + 1) and (1 + 2)]', test_stack, {}).evaluate() == True
def test_and_for_unequal_decimals_with_lower_first_is_true_unless_booleans():
assert Boolean('[<1, 2, 3> and <1, 2, 4>]', test_stack, {}).evaluate() == True
def test_and_for_unequal_strings_with_lower_first_is_true_unless_booleans():
assert Boolean('["hello" and "world"]', test_stack, {}).evaluate() == True
def test_and_for_unequal_variables_with_lower_first_is_true_unless_booleans():
assert Boolean('[x and y]', test_stack, {'x': 1, 'y': 2}).evaluate() == True
def test_false_and_false_is_false():
assert Boolean('[false and false]', test_stack, {}).evaluate() == False
def test_and_for_differing_value_types_raises_error():
assert_error(Boolean('[1 and "hello"]', test_stack, {}))
def test_and_for_types_raises_error():
assert_error(Boolean('[@Type:String and @Type:String]', test_stack, {}))
# OR
def test_or_for_equal_boolean_values_is_true():
assert Boolean('[true or true]', test_stack, {}).evaluate() == True
def test_or_for_equal_integer_values_is_true():
assert Boolean('[1 or 1]', test_stack, {}).evaluate() == True
def test_or_for_equal_decimal_values_is_true():
assert Boolean('[1.1 or 1.1]', test_stack, {}).evaluate() == True
def test_or_for_equal_math_values_is_true():
assert Boolean('[(1 + 1) or (1 + 1)]', test_stack, {}).evaluate() == True
def test_or_for_equal_array_values_is_true():
assert Boolean('[<1, 2, 3> or <1, 2 ,3>]', test_stack, {}).evaluate() == True
def test_or_for_equal_string_values_is_true():
assert Boolean('["hello" or "hello"]', test_stack, {}).evaluate() == True
def test_or_for_equal_variable_values_is_true():
assert Boolean('[x or y]', test_stack, {'x': 1, 'y': 1}).evaluate() == True
def test_or_for_unequal_booleans_with_greater_first_is_true():
assert Boolean('[true or false]', test_stack, {}).evaluate() == True
def test_or_for_unequal_integers_with_greater_first_is_true():
assert Boolean('[2 or 1]', test_stack, {}).evaluate() == True
def test_or_for_unequal_decimals_with_greater_first_is_true():
assert Boolean('[2.4 or 1.1]', test_stack, {}).evaluate() == True
def test_or_for_unequal_math_with_greater_first_is_true():
assert Boolean('[(1 + 2) or (1 + 1)]', test_stack, {}).evaluate() == True
def test_or_for_unequal_arrays_with_greater_first_is_true():
assert Boolean('[<1, 2, 4> or <1, 2, 3>]', test_stack, {}).evaluate() == True
def test_or_for_unequal_strings_with_greater_first_is_true():
assert Boolean('["world" or "hello"]', test_stack, {}).evaluate() == True
def test_or_for_unequal_variables_with_greater_first_is_true():
assert Boolean('[x or y]', test_stack, {'x': 2, 'y': 1}).evaluate() == True
def test_or_for_unequal_booleans_with_lower_first_is_true():
assert Boolean('[false or true]', test_stack, {}).evaluate() == True
def test_or_for_unequal_integers_with_lower_first_is_true():
assert Boolean('[1 or 2]', test_stack, {}).evaluate() == True
def test_or_for_unequal_decimals_with_lower_first_is_true():
assert Boolean('[1.1 or 2.4]', test_stack, {}).evaluate() == True
def test_or_for_unequal_math_with_lower_first_is_true():
assert Boolean('[(1 + 1) or (1 + 2)]', test_stack, {}).evaluate() == True
def test_or_for_unequal_arrays_with_lower_first_is_true():
assert Boolean('[<1, 2, 3> or <1, 2, 4>]', test_stack, {}).evaluate() == True
def test_or_for_unequal_strings_with_lower_first_is_true():
assert Boolean('["hello" or "world"]', test_stack, {}).evaluate() == True
def test_or_for_unequal_variables_with_lower_first_is_true():
assert Boolean('[x or y]', test_stack, {'x': 1, 'y': 2}).evaluate() == True
def test_false_or_false_is_false():
assert Boolean('[false or false]', test_stack, {}).evaluate() == False
def test_or_for_differing_value_types_raises_error():
assert_error(Boolean('[1 or "hello"]', test_stack, {}))
def test_or_for_types_raises_error():
assert_error(Boolean('[@Type:String or @Type:String]', test_stack, {}))
# STRING EXPRESSIONS
def test_double_quote_string_expressions_can_be_compared_using_boolean():
assert Boolean('["hello" . "world" equals "hello" . "world"]', test_stack, {}).evaluate() == True
def test_single_quote_string_expressions_can_be_compared_using_boolean():
assert Boolean("['hello' . 'world' equals 'hello' . 'world']", test_stack, {}).evaluate() == True
def test_double_quote_string_expressions_with_variables_concatenated_after_can_be_compared_using_boolean():
assert Boolean('["hello" . x equals "hello" . x]', test_stack, {'x': 1}).evaluate() == True
def test_single_quote_string_expressions_with_variables_concatenated_after_can_be_compared_using_boolean():
assert Boolean("['hello' . x equals 'hello' . x]", test_stack, {'x': 1}).evaluate() == True
def test_double_quote_string_expressions_with_variables_concatenated_before_can_be_compared_using_boolean():
assert Boolean('[x . "hello" equals x . "hello"]', test_stack, {'x': 1}).evaluate() == True
def test_single_quote_string_expressions_with_variables_concatenated_before_can_be_compared_using_boolean():
assert Boolean("[x . 'hello' equals x . 'hello']", test_stack, {'x': 1}).evaluate() == True
def test_string_concatenated_variables_can_be_compared_using_boolean():
assert Boolean("[x . y equals x . y]", test_stack, {'x': 1, 'y': 2}).evaluate() == True
def test_complex_boolean_that_works_with_string_expressions():
assert Boolean('[[x . "hello" lessThan x . \'world\'] and [x . "hello" lessThanEquals x . y]]', test_stack, {'x': 1, 'y': "world"}).evaluate() == True
# SPACING
# LACK OF SPACES
def test_no_spaces_between_variable_operators_and_operands_raises_error():
assert_error(Boolean('[xequalsy]', test_stack, {'x': 'hello', 'y': 'world'}))
def test_no_spaces_between_unenclosed_boolean_operators_and_operands_raises_error():
assert_error(Boolean('[trueequalsfalse]', test_stack, {}))
def test_no_spaces_between_complex_double_quote_string_expression_operators_and_operands_with_variables_touching_operator_raises_error():
assert_error(Boolean('["hello".xequalsy."world"]', test_stack, {'x': 'hello', 'y': 'world'}))
def test_no_spaces_between_complex_single_quote_string_expression_operators_and_operands_with_variables_touching_operator_raises_error():
assert_error(Boolean("['hello'.xequalsy.'world']", test_stack, {'x': 'hello', 'y': 'world'}))
def test_no_spaces_between_complex_string_expression_operators_and_operands_with_math_and_variables_touching_operator_raises_error():
assert_error(Boolean('[(1+1).xequalsy.(2*2)]', test_stack, {'x': 'hello', 'y': 'world'}))
def test_no_spaces_between_complex_string_expression_operators_and_operands_with_boolean_expression_and_variables_touching_operator_raises_error():
assert_error(Boolean('[[true].xequalsy.[false]]', test_stack, {'x': 'hello', 'y': 'world'}))
def test_no_spaces_between_operator_and_types_raises_error():
assert_error(Boolean('[@Type:Stringequals@Type:String]', test_stack, {}))
def test_no_spaces_between_complex_double_quote_string_expression_operators_and_operands_with_strings_touching_operator_works():
assert Boolean('[x."hello"equals"world".y]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_no_spaces_between_complex_single_quote_string_expression_operators_and_operands_with_strings_touching_operator_works():
assert Boolean("[x.'hello'equals'world'.y]", test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_no_spaces_between_complex_string_expression_operators_and_operands_with_math_touching_operator_works():
assert Boolean('[x.(1+1)equals(2*2).y]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_no_spaces_between_complex_string_expression_operators_and_operands_with_arrays_touching_operator_works():
assert Boolean('[x.<1,2,3>equals<1,2,4>.y]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_no_spaces_between_complex_string_expression_operators_and_operands_with_boolean_expression_touching_operator_works():
assert Boolean('[x.[true]equals[false].y]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_no_spaces_when_comparing_single_enclosed_true_false_booleans_works():
assert Boolean('[[true]equals[false]]', test_stack, {}).evaluate() == False
def test_no_spaces_when_comparing_double_quote_strings_works():
assert Boolean('["hello"equals"world"]', test_stack, {}).evaluate() == False
def test_no_spaces_when_comparing_single_quote_strings_works():
assert Boolean("['hello'equals'world']", test_stack, {}).evaluate() == False
def test_no_spaces_when_comparing_math_operations_works():
assert Boolean('[(1 + 1)equals(2 * 2)]', test_stack, {}).evaluate() == False
# EXTRA SPACES
def test_extra_spaces_in_boolean_operation_on_variables_works():
assert Boolean('[ x equals y ]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_unenclosed_booleans_works():
assert Boolean('[ true equals false ]', test_stack, {}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_double_quote_strings_expressions_concatenated_with_variables_facing_operator_works():
assert Boolean('[ "hello" . x equals y . "world" ]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_double_quote_string_expressions_concatenated_with_variables_facing_operator_works():
assert Boolean("[ 'hello' . x equals y . 'world' ]", test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_string_expressions_with_math_concatenated_with_variables_facing_operator_works():
assert Boolean('[ ( 1 + 1 ) . x equals y . ( 2 * 2 ) ]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_string_expressions_with_boolean_expressions_concatenated_with_variables_facing_operator_works():
assert Boolean('[ [ true ] . x equals y . [ false ] ]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_no_spaces_between_operator_and_types_works():
assert Boolean('[ @Type:String equals @Type:String ]', test_stack, {}).evaluate() == True
def test_extra_spaces_in_boolean_operation_on_double_quote_strings_expressions_concatenated_with_strings_facing_operator_works():
assert Boolean('[ x . "hello" equals "world" . y ]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_single_quote_strings_expressions_concatenated_with_strings_facing_operator_works():
assert Boolean("[ x . 'hello' equals 'world' . y ]", test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_string_expressions_with_variables_concatenated_with_math_operation_facing_operator_works():
assert Boolean('[ x . ( 1 + 1 ) equals ( 2 * 2 ) . y ]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_string_expressions_with_variables_concatenated_with_arrays_facing_operator_works():
assert Boolean('[ x . < 1 , 2 , 3 > equals < 1 , 2 , 4 > . y ]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_in_boolean_operation_on_string_expressions_with_variables_concatenated_with_boolean_operation_facing_operator_works():
assert Boolean('[ x . [ true ] equals [ false ] . y ]', test_stack, {'x': 'hello', 'y': 'world'}).evaluate() == False
def test_extra_spaces_when_comparing_single_enclosed_true_false_booleans_works():
assert Boolean('[ [ true ] equals [ false ] ]', test_stack, {}).evaluate() == False
def test_extra_spaces_when_comparing_double_quote_strings_works():
assert Boolean('[ "hello" equals "world" ]', test_stack, {}).evaluate() == False
def test_extra_spaces_when_comparing_single_quote_strings_works():
assert Boolean("[ 'hello' equals 'world' ]", test_stack, {}).evaluate() == False
def test_extra_spaces_when_comparing_math_operations_works():
assert Boolean('[ ( 1 + 1 ) equals (2 * 2) ]', test_stack, {}).evaluate() == False
# MISSING OR EXTRA ENCLOSING SYMBOLS AND OPERATORS
# BOOLEAN BRACKETS
def test_extra_right_brackets_on_complex_boolean_raises_error():
assert_error(Boolean('[[true equals true]] and [true notEquals false]]', test_stack, {}))
def test_missing_right_brackets_on_complex_boolean_raises_error():
assert_error(Boolean('[[true equals true and [true notEquals false]]', test_stack, {}))
def test_extra_left_brackets_on_complex_boolean_raises_error():
assert_error(Boolean('[[true equals true] and [[true notEquals false]]', test_stack, {}))
def test_missing_left_brackets_on_complex_boolean_raises_error():
assert_error(Boolean('[[true equals true] and true notEquals false]]', test_stack, {}))
# STRINGS
def test_extra_dot_operator_when_comparing_strings_raises_error():
assert_error(Boolean('["hello" . "world" . equals "hello" . "world"]', test_stack, {}))
def test_missing_dot_operator_when_comparing_strings_raises_error():
assert_error(Boolean('["hello" "world" equals "hello" . "world"]', test_stack, {}))
def test_extra_closing_double_quote_when_comparing_strings_raises_error():
assert_error(Boolean('["hello" . "world"" equals "hello" . "world"]', test_stack, {}))
def test_extra_leading_double_quote_when_comparing_strings_raises_error():
assert_error(Boolean('["hello" . "world" equals ""hello" . "world"]', test_stack, {}))
def test_extra_closing_single_quote_when_comparing_strings_raises_error():
assert_error(Boolean("['hello' . 'world'' equals 'hello' . 'world']", test_stack, {}))
def test_extra_leading_single_quote_when_comparing_strings_raises_error():
assert_error(Boolean("['hello' . 'world' equals ''hello' . 'world']", test_stack, {}))
def test_missing_closing_double_quote_when_comparing_strings_raises_error():
assert_error(Boolean('["hello" . "world equals "hello" . "world"]', test_stack, {}))
def test_missing_leading_double_quote_when_comparing_strings_raises_error():
assert_error(Boolean('["hello" . "world" equals hello" . "world"]', test_stack, {}))
def test_missing_closing_single_quote_when_comparing_strings_raises_error():
assert_error(Boolean("['hello' . 'world equals 'hello' . 'world']", test_stack, {}))
def test_missing_leading_single_quote_when_comparing_strings_raises_error():
assert_error(Boolean("['hello' . 'world' equals hello' . 'world']", test_stack, {}))
# MATH
def test_extra_closing_parenthesis_when_comparing_math_raises_error():
assert_error(Boolean('[(1 + 1)) equals (2 * 2)]', test_stack, {}))
def test_extra_leading_parenthesis_when_comparing_math_raises_error():
assert_error(Boolean('[(1 + 1) equals ((2 * 2)]', test_stack, {}))
def test_missing_closing_parenthesis_when_comparing_math_raises_error():
assert_error(Boolean('[(1 + 1 equals (2 * 2)]', test_stack, {}))
def test_missing_leading_parenthesis_when_comparing_math_raises_error():
assert_error(Boolean('[(1 + 1) equals 2 * 2)]', test_stack, {}))
# ARRAY
def test_extra_closing_symbol_when_comparing_arrays_raises_error():
assert_error(Boolean('[<1, 2, 3>> equals <1, 2, 4>]', test_stack, {}))
def test_extra_leading_symbol_when_comparing_arrays_raises_error():
assert_error(Boolean('[<1, 2, 3> equals <<1, 2, 4>]', test_stack, {}))
def test_missing_closing_symbol_when_comparing_arrays_raises_error():
assert_error(Boolean('[<1, 2, 3 equals <1, 2, 4>]', test_stack, {}))
def test_missing_leading_symbol_when_comparing_arrays_raises_error():
assert_error(Boolean('[<1, 2, 3> equals 1, 2, 4>]', test_stack, {}))
def test_extra_comma_when_comparing_arrays_raises_error():
assert_error(Boolean('[<1, 2, 3,> equals <1, 2, 4>]', test_stack, {}))
def test_missing_comma_when_comparing_arrays_raises_error():
assert_error(Boolean('[<1, 2, 3> equals <1 2, 4>]', test_stack, {}))
# COMPARING TO DIFFERENT VALUE TYPES
# STRING
def test_string_cannot_be_compared_to_an_integer():
assert_error(Boolean('["hello" or 1]', test_stack, {}))
def test_string_cannot_be_compared_to_a_decimal():
assert_error(Boolean('["hello" or 1.1]', test_stack, {}))
def test_string_cannot_be_compared_to_math():
assert_error(Boolean('["hello" or (1 + 1)]', test_stack, {}))
def test_string_cannot_be_compared_to_an_array():
assert_error(Boolean('["hello" or <1, 2, 3>]', test_stack, {}))
def test_string_cannot_be_compared_to_a_boolean():
assert_error(Boolean('["hello" or true]', test_stack, {}))
# NUMBERS AND MATH
def test_integers_cannot_be_compared_to_strings():
assert_error(Boolean('[1 or "hello"]', test_stack, {}))
def test_decimals_cannot_be_compared_to_strings():
assert_error(Boolean('[1.0 or "hello"]', test_stack, {}))
def test_math_cannot_be_compared_to_strings():
assert_error(Boolean('[(2 - 1) or "hello"]', test_stack, {}))
def test_integers_can_be_compared_to_decimals():
assert Boolean("[1 or 1.1]", test_stack, {}).evaluate() == True
def test_integers_can_be_compared_to_math():
assert Boolean("[1 or (2 - 1)]", test_stack, {}).evaluate() == True
def test_decimals_can_be_compared_to_math():
assert Boolean("[1.1 or (2 - 1)]", test_stack, {}).evaluate() == True
def test_integers_cannot_be_compared_to_arrays():
assert_error(Boolean('[1 or <1, 2, 3>]', test_stack, {}))
def test_decimals_cannot_be_compared_to_arrays():
assert_error(Boolean('[1.0 or <1, 2, 3>]', test_stack, {}))
def test_math_cannot_be_compared_to_arrays():
assert_error(Boolean('[(2 - 1) or <1, 2, 3>]', test_stack, {}))
def test_integers_can_be_compared_to_booleans():
assert Boolean("[1 or true]", test_stack, {}).evaluate() == True
def test_decimals_can_be_compared_to_booleans():
assert Boolean("[1.0 or true]", test_stack, {}).evaluate() == True
def test_math_can_be_compared_to_booleans():
assert Boolean("[(2 - 1) or true]", test_stack, {}).evaluate() == True
# ARRAYS
def test_arrays_cannot_be_compared_to_strings():
assert_error(Boolean('[<1, 2, 3> or "hello"]', test_stack, {}))
def test_arrays_cannot_be_compared_to_integers():
assert_error(Boolean('[<1, 2, 3> or 1]', test_stack, {}))
def test_arrays_cannot_be_compared_to_decimals():
assert_error(Boolean('[<1, 2, 3> or 1.1]', test_stack, {}))
def test_arrays_cannot_be_compared_to_math():
assert_error(Boolean('[<1, 2, 3> or (1 + 1)]', test_stack, {}))
def test_arrays_cannot_be_compared_to_booleans():
assert_error(Boolean('[<1, 2, 3> or true]', test_stack, {}))
# BOOLEANS
def test_booleans_cannot_be_compared_to_strings():
assert_error(Boolean('[true or "hello"]', test_stack, {}))
def test_booleans_can_be_compared_to_integers():
assert Boolean("[true or 1]", test_stack, {}).evaluate() == True
def test_booleans_can_be_compared_to_decimals():
assert Boolean("[true or 1.1]", test_stack, {}).evaluate() == True
def test_booleans_cann_be_compared_to_math():
assert Boolean("[true or (1 + 1)]", test_stack, {}).evaluate() == True
def test_booleans_cannot_be_compared_to_arrays():
assert_error(Boolean('[true or <1, 2, 3>]', test_stack, {}))
# INLINE OPERATIONS
# EQUALS
def test_inline_equals_works_if_third_operator_is_boolean():
assert Boolean('["hello" equals "hello" equals true]', test_stack, {}).evaluate() == True
def test_inline_equals_fails_if_third_operator_is_not_boolean():
assert_error(Boolean('["hello" equals "hello" equals "hello"]', test_stack, {}))
# NOT EQUALS
def test_inline_not_equals_works_if_third_operator_is_boolean():
assert Boolean('["hello" notEquals "hello" notEquals true]', test_stack, {}).evaluate() == True
def test_inline_not_equals_fails_if_third_operator_is_not_boolean():
assert_error(Boolean('["hello" notEquals "hello" notEquals "hello"]', test_stack, {}))
# LESS THAN
def test_inline_lessThan_works_if_third_operator_is_boolean():
assert Boolean('["hello" lessThan "hello" lessThan true]', test_stack, {}).evaluate() == True
def test_inline_lessThan_fails_if_third_operator_is_not_boolean():
assert_error(Boolean('["hello" lessThan "hello" lessThan "hello"]', test_stack, {}))
# LESS THAN EQUALS
def test_inline_lessThanEquals_works_if_third_operator_is_boolean():
assert Boolean('["hello" lessThanEquals "world" lessThanEquals true]', test_stack, {}).evaluate() == True
def test_inline_lessThanEquals_fails_if_third_operator_is_not_boolean():
assert_error(Boolean('["hello" lessThanEquals "hello" lessThanEquals "hello"]', test_stack, {}))
# GREATER THAN
def test_inline_greaterThan_works_if_third_operator_is_boolean():
assert Boolean('["world" greaterThan "hello" greaterThan true]', test_stack, {}).evaluate() == False
def test_inline_greaterThan_fails_if_third_operator_is_not_boolean():
assert_error(Boolean('["world" greaterThan "hello" greaterThan "hello"]', test_stack, {}))
# GREATER THAN EQUALS
def test_inline_greaterThanEquals_works_if_third_operator_is_boolean():
assert Boolean('["world" greaterThanEquals "hello" greaterThanEquals true]', test_stack, {}).evaluate() == True
def test_inline_greaterThanEquals_fails_if_third_operator_is_not_boolean():
assert_error(Boolean('["world" greaterThanEquals "hello" greaterThanEquals "hello"]', test_stack, {}))
# AND
def test_inline_and_works_if_third_operator_is_boolean():
assert Boolean('["hello" and "hello" and true]', test_stack, {}).evaluate() == True
def test_inline_and_fails_if_third_operator_is_not_boolean():
assert_error(Boolean('["hello" and "hello" and "hello"]', test_stack, {}))
# OR
def test_inline_or_works_if_third_operator_is_boolean():
assert Boolean('["hello" or "hello" or true]', test_stack, {}).evaluate() == True
def test_inline_or_fails_if_third_operator_is_not_boolean():
assert_error(Boolean('["hello" or "hello" or "hello"]', test_stack, {}))
# COMPLICATED
def test_complicated_expression_successful():
assert Boolean('[[x."hello" equals (1 + 1)."hello"] lessThan ["world" or "test"] and true]', test_stack, {'x': 2}).evaluate() == False
assert Boolean('[true and [x."hello" equals (1 + 1)."hello"] lessThan ["world" or "test"]]', test_stack, {'x': 2}).evaluate() == False
assert Boolean('[[2 and 3] equals false]', test_stack, {}).evaluate() == False
def assert_error(expression):
with pytest.raises(SystemExit) as error:
expression.evaluate()
assert error.type == SystemExit
assert error.value.code == 1
| 66.242759
| 170
| 0.741286
| 6,754
| 48,026
| 4.823068
| 0.023838
| 0.087583
| 0.099678
| 0.082824
| 0.942809
| 0.907567
| 0.8765
| 0.841412
| 0.777345
| 0.710391
| 0
| 0.014235
| 0.110648
| 48,026
| 725
| 171
| 66.242759
| 0.748431
| 0.012181
| 0
| 0.051482
| 0
| 0.00468
| 0.177074
| 0.006772
| 0
| 0
| 0
| 0
| 0.4961
| 1
| 0.48986
| false
| 0
| 0.0078
| 0
| 0.49766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5637d8d08102a85afdfccaecd2fc649993b944bb
| 104
|
py
|
Python
|
Chapter 07/Chap07_Example7.51.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 07/Chap07_Example7.51.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 07/Chap07_Example7.51.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
myl1 = [1,2,3,[4,5]]
print(myl1) # L1
print(myl1[3]) # L2
print(myl1[3][0]) # L3
print(myl1[3][1]) # L4
| 17.333333
| 22
| 0.557692
| 23
| 104
| 2.521739
| 0.521739
| 0.62069
| 0.517241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213483
| 0.144231
| 104
| 5
| 23
| 20.8
| 0.438202
| 0.105769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.8
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
5692172b5ea8551dd9906fc109450e076f752f93
| 26,930
|
py
|
Python
|
tests/chain_fixtures.py
|
avilaton/quantipy
|
6ce4e5bfb22c6520164d8884fe6f83240e9baa21
|
[
"MIT"
] | null | null | null |
tests/chain_fixtures.py
|
avilaton/quantipy
|
6ce4e5bfb22c6520164d8884fe6f83240e9baa21
|
[
"MIT"
] | null | null | null |
tests/chain_fixtures.py
|
avilaton/quantipy
|
6ce4e5bfb22c6520164d8884fe6f83240e9baa21
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
BASIC_CHAIN_STR = (u'Chain...\nName: chain\nOrientation: None'
u'\nX: None\nY: None'
u'\nNumber of views: None')
X_INDEX = [(u'q5_1', u'All'), (u'q5_1', 1L), (u'q5_1', 2L),
(u'q5_1', 3L), (u'q5_1', 4L), (u'q5_1', 5L), (u'q5_1', 97L),
(u'q5_1', 98L), (u'q5_1', u'mean'), (u'q5_1', u'median')]
X_INDEX_PAINTED = [(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Base'),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'I would refuse if asked'),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Very unlikely'),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u"Probably wouldn't"),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Probably would if asked'),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Very likely'),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u"I'm already planning to"),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u"Don't know"),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Mean'),
(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Median')]
EXPECTED_X_BASIC = ([[[250.0, 81.0, 169.0], [11.0, 4.0, 7.0],
[20.0, 5.0, 15.0], [74.0, 30.0, 44.0], [0.0, 0.0, 0.0],
[74.0, 24.0, 50.0], [10.0, 4.0, 6.0], [61.0, 14.0, 47.0],
[30.364, 24.493827160493826, 33.17751479289941],
[5.0, 5.0, 5.0]],
X_INDEX,
[(u'q5_1', u'@'), (u'q4', 1L), (u'q4', 2L)],
X_INDEX_PAINTED,
[(u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Total'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No')]], )
EXPECTED_X_NEST_1 = ([[[250.0, 53.0, 28.0, 81.0, 88.0],
[11.0, 2.0, 2.0, 5.0, 2.0], [20.0, 2.0, 3.0, 7.0, 8.0],
[74.0, 19.0, 11.0, 21.0, 23.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[74.0, 19.0, 5.0, 21.0, 29.0],
[10.0, 4.0, 0.0, 2.0, 4.0],
[61.0, 7.0, 7.0, 25.0, 22.0],
[30.364, 23.245283018867923, 26.857142857142858,
34.95061728395062, 31.545454545454547],
[5.0, 5.0, 3.0, 5.0, 5.0]],
X_INDEX,
[(u'#pad-0', u'#pad-0', u'q5_1', u'@'),
(u'q4', 1L, u'gender', 1L), (u'q4', 1L, u'gender', 2L),
(u'q4', 2L, u'gender', 1L), (u'q4', 2L, u'gender', 2L)],
X_INDEX_PAINTED,
[(u'#pad-0', u'#pad-0',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Total'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female')]], )
EXPECTED_X_NEST_2 = ([[[250.0, 12.0, 15.0, 7.0, 8.0, 11.0, 3.0, 6.0, 6.0, 6.0,
7.0, 20.0, 12.0, 16.0, 17.0, 16.0, 21.0, 23.0, 21.0,
9.0, 14.0],
[11.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0,
1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0],
[20.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 3.0, 0.0, 2.0, 3.0, 3.0, 2.0, 0.0, 0.0],
[74.0, 1.0, 7.0, 3.0, 4.0, 4.0, 2.0, 4.0, 2.0, 1.0, 2.0,
4.0, 6.0, 5.0, 4.0, 2.0, 5.0, 6.0, 5.0, 2.0, 5.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[74.0, 6.0, 3.0, 1.0, 2.0, 7.0, 1.0, 0.0, 1.0, 1.0, 2.0,
7.0, 2.0, 3.0, 6.0, 3.0, 8.0, 7.0, 5.0, 2.0, 7.0],
[10.0, 1.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],
[61.0, 4.0, 1.0, 2.0, 0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0,
7.0, 2.0, 4.0, 5.0, 7.0, 4.0, 5.0, 7.0, 5.0, 1.0],
[30.364, 43.5, 22.066666666666666, 30.142857142857142,
15.125, 4.2727272727272725, 3.6666666666666665,
18.666666666666668, 34.833333333333336,
34.333333333333336, 30.571428571428573, 36.8,
18.916666666666668, 26.8125, 37.05882352941177,
50.5625, 26.19047619047619, 28.130434782608695,
39.42857142857143, 56.22222222222222, 17.5],
[5.0, 5.0, 3.0, 3.0, 3.0, 5.0, 3.0, 3.0, 4.0, 4.0, 5.0,
5.0, 3.0, 3.0, 5.0, 51.0, 5.0, 5.0, 5.0, 98.0, 5.0]],
X_INDEX,
[(u'#pad-0', u'#pad-0', u'#pad-0', u'#pad-0', u'q5_1', u'@'),
(u'q4', 1L, u'gender', 1L, u'Wave', 1L),
(u'q4', 1L, u'gender', 1L, u'Wave', 2L),
(u'q4', 1L, u'gender', 1L, u'Wave', 3L),
(u'q4', 1L, u'gender', 1L, u'Wave', 4L),
(u'q4', 1L, u'gender', 1L, u'Wave', 5L),
(u'q4', 1L, u'gender', 2L, u'Wave', 1L),
(u'q4', 1L, u'gender', 2L, u'Wave', 2L),
(u'q4', 1L, u'gender', 2L, u'Wave', 3L),
(u'q4', 1L, u'gender', 2L, u'Wave', 4L),
(u'q4', 1L, u'gender', 2L, u'Wave', 5L),
(u'q4', 2L, u'gender', 1L, u'Wave', 1L),
(u'q4', 2L, u'gender', 1L, u'Wave', 2L),
(u'q4', 2L, u'gender', 1L, u'Wave', 3L),
(u'q4', 2L, u'gender', 1L, u'Wave', 4L),
(u'q4', 2L, u'gender', 1L, u'Wave', 5L),
(u'q4', 2L, u'gender', 2L, u'Wave', 1L),
(u'q4', 2L, u'gender', 2L, u'Wave', 2L),
(u'q4', 2L, u'gender', 2L, u'Wave', 3L),
(u'q4', 2L, u'gender', 2L, u'Wave', 4L),
(u'q4', 2L, u'gender', 2L, u'Wave', 5L)],
X_INDEX_PAINTED,
[(u'#pad-0', u'#pad-0', u'#pad-0', u'#pad-0',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Total'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 1'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 2'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 3'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 4'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 5'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 1'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 2'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 3'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 4'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 5'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 1'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 2'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 3'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 4'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 5'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 1'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 2'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 3'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 4'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 5')]], )
EXPECTED_X_NEST_3 = ([[[250.0, 12.0, 15.0, 7.0, 8.0, 11.0, 3.0, 6.0, 6.0, 6.0,
7.0, 20.0, 12.0, 16.0, 17.0, 16.0, 21.0, 23.0, 21.0,
9.0, 14.0, 11.0, 20.0, 74.0, 0.0, 74.0, 10.0, 61.0,
53.0, 28.0, 81.0, 88.0],
[11.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0,
1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 11.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0, 5.0, 2.0],
[20.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 3.0, 0.0, 2.0, 3.0, 3.0, 2.0, 0.0, 0.0, 0.0,
20.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 3.0, 7.0, 8.0],
[74.0, 1.0, 7.0, 3.0, 4.0, 4.0, 2.0, 4.0, 2.0, 1.0, 2.0,
4.0, 6.0, 5.0, 4.0, 2.0, 5.0, 6.0, 5.0, 2.0, 5.0, 0.0,
0.0, 74.0, 0.0, 0.0, 0.0, 0.0, 19.0, 11.0, 21.0, 23.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[74.0, 6.0, 3.0, 1.0, 2.0, 7.0, 1.0, 0.0, 1.0, 1.0, 2.0,
7.0, 2.0, 3.0, 6.0, 3.0, 8.0, 7.0, 5.0, 2.0, 7.0, 0.0,
0.0, 0.0, 0.0, 74.0, 0.0, 0.0, 19.0, 5.0, 21.0, 29.0],
[10.0, 1.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0, 10.0, 0.0, 4.0, 0.0, 2.0, 4.0],
[61.0, 4.0, 1.0, 2.0, 0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0,
7.0, 2.0, 4.0, 5.0, 7.0, 4.0, 5.0, 7.0, 5.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 61.0, 7.0, 7.0, 25.0, 22.0],
[30.364, 43.5, 22.066666666666666, 30.142857142857142,
15.125, 4.2727272727272725, 3.6666666666666665,
18.666666666666668, 34.833333333333336,
34.333333333333336, 30.571428571428573, 36.8,
18.916666666666668, 26.8125, 37.05882352941177,
50.5625, 26.19047619047619, 28.130434782608695,
39.42857142857143, 56.22222222222222, 17.5, 1.0, 2.0,
3.0, np.NaN, 5.0, 97.0, 98.0, 23.245283018867923,
26.857142857142858, 34.95061728395062,
31.545454545454547],
[5.0, 5.0, 3.0, 3.0, 3.0, 5.0, 3.0, 3.0, 4.0, 4.0, 5.0,
5.0, 3.0, 3.0, 5.0, 51.0, 5.0, 5.0, 5.0, 98.0, 5.0,
1.0, 2.0, 3.0, 0.0, 5.0, 97.0, 98.0, 5.0, 3.0, 5.0,
5.0]],
X_INDEX,
[(u'#pad-0', u'#pad-0', u'#pad-0', u'#pad-0', u'q5_1', u'@'),
(u'q4', 1L, u'gender', 1L, u'Wave', 1L),
(u'q4', 1L, u'gender', 1L, u'Wave', 2L),
(u'q4', 1L, u'gender', 1L, u'Wave', 3L),
(u'q4', 1L, u'gender', 1L, u'Wave', 4L),
(u'q4', 1L, u'gender', 1L, u'Wave', 5L),
(u'q4', 1L, u'gender', 2L, u'Wave', 1L),
(u'q4', 1L, u'gender', 2L, u'Wave', 2L),
(u'q4', 1L, u'gender', 2L, u'Wave', 3L),
(u'q4', 1L, u'gender', 2L, u'Wave', 4L),
(u'q4', 1L, u'gender', 2L, u'Wave', 5L),
(u'q4', 2L, u'gender', 1L, u'Wave', 1L),
(u'q4', 2L, u'gender', 1L, u'Wave', 2L),
(u'q4', 2L, u'gender', 1L, u'Wave', 3L),
(u'q4', 2L, u'gender', 1L, u'Wave', 4L),
(u'q4', 2L, u'gender', 1L, u'Wave', 5L),
(u'q4', 2L, u'gender', 2L, u'Wave', 1L),
(u'q4', 2L, u'gender', 2L, u'Wave', 2L),
(u'q4', 2L, u'gender', 2L, u'Wave', 3L),
(u'q4', 2L, u'gender', 2L, u'Wave', 4L),
(u'q4', 2L, u'gender', 2L, u'Wave', 5L),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1', u'q5_1', 1L),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1', u'q5_1', 2L),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1', u'q5_1', 3L),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1', u'q5_1', 4L),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1', u'q5_1', 5L),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1', u'q5_1', 97L),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1', u'q5_1', 98L),
(u'#pad-2', u'#pad-2', u'q4', 1L, u'gender', 1L),
(u'#pad-2', u'#pad-2', u'q4', 1L, u'gender', 2L),
(u'#pad-2', u'#pad-2', u'q4', 2L, u'gender', 1L),
(u'#pad-2', u'#pad-2', u'q4', 2L, u'gender', 2L)],
X_INDEX_PAINTED,
[(u'#pad-0', u'#pad-0', u'#pad-0', u'#pad-0',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Total'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 1'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 2'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 3'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 4'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 5'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 1'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 2'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 3'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 4'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 5'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 1'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 2'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 3'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 4'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male',
u'Wave. Wave', u'Wave 5'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 1'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 2'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 3'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 4'),
((u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female',
u'Wave. Wave', u'Wave 5'),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'I would refuse if asked'),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Very unlikely'),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u"Probably wouldn't"),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Probably would if asked'),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u'Very likely'),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u"I'm already planning to"),
(u'#pad-1', u'#pad-1', u'#pad-1', u'#pad-1',
u'q5_1. How likely are you to do each of the following in the next year? - Surfing',
u"Don't know"),
(u'#pad-2', u'#pad-2',
(u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Male'),
(u'#pad-2', u'#pad-2',
(u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'Yes', u'gender. What is your gender?', u'Female'),
(u'#pad-2', u'#pad-2',
(u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Male'),
(u'#pad-2', u'#pad-2',
(u'q4. Do you ever participate in sports activities '
u'with people in your household?'),
u'No', u'gender. What is your gender?', u'Female')]], )
| 65.843521
| 108
| 0.403788
| 3,991
| 26,930
| 2.707091
| 0.039339
| 0.061829
| 0.079137
| 0.087375
| 0.973899
| 0.96455
| 0.96094
| 0.960755
| 0.95622
| 0.954646
| 0
| 0.159082
| 0.438619
| 26,930
| 408
| 109
| 66.004902
| 0.555563
| 0.00078
| 0
| 0.796482
| 0
| 0
| 0.35598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.002513
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3b15c86693ae75bc3f6646d4457da4c1fd99af79
| 169,314
|
py
|
Python
|
wac/test_assignments.py
|
taylorschimek/WhatAChore
|
8de1cacdcb81217e7d5a792dac20b7f0909409bd
|
[
"MIT"
] | null | null | null |
wac/test_assignments.py
|
taylorschimek/WhatAChore
|
8de1cacdcb81217e7d5a792dac20b7f0909409bd
|
[
"MIT"
] | 10
|
2017-08-28T19:40:14.000Z
|
2021-09-07T23:25:29.000Z
|
wac/test_assignments.py
|
taylorschimek/WhatAChore
|
8de1cacdcb81217e7d5a792dac20b7f0909409bd
|
[
"MIT"
] | null | null | null |
import datetime
from django_webtest import WebTest
from time import sleep
import webtest
from django.core.urlresolvers import reverse
from django.test.client import Client
from freezegun import freeze_time
from .forms import *
from .models import *
from useraccounts.models import User
class AssignmentsTests(WebTest):
fixtures = ['wac/fixtures/wac.json', 'useraccounts/fixtures/useraccounts.json']
@classmethod
def setUp(cls):
cls.client = Client()
def test_assignment_make2(self):
""" Testing creation of assignments.
All subintervals represented.
User = test2@example.com; Password = password2
"""
self.client.login(email='test2@example.com', password='password2')
resp = self.client.get(reverse('lineup-make'))
assignments = Assignment.objects.all()
chores = Chore.objects.filter(
user = 16
)
self.assertEqual(len(chores), 12)
self.assertTrue(10 <= len(assignments) <= 12)
def test_no_workers(self):
"""
Testing trying to get assignments without having any workers.
User = test3@example.com; password = password2
No Workers.
"""
self.client.login(email='test3@example.com', password='password2')
resp = self.client.get(reverse('lineup-make'), user='test3@example.com')
assignments = Assignment.objects.all()
chores = Chore.objects.filter(
user = 17
)
self.assertEqual(len(chores), 2)
self.assertEqual(len(assignments), 0)
self.assertEqual(resp.status_code, 302)
def test_assignment_intervals_through_a_year(self):
""" Testing creation of assignments starting 6-26-2017.
All intervals represented.
User = test1@example.com
"""
total_assignments = 0
monthlies_check = None
two_monthlies_check = None
quarterlies_check = None
yearlies_check = False
with freeze_time('2017-06-26'): # initial M1st
assert datetime.datetime.now() == datetime.datetime(2017, 6, 26)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1) # Monthly1st
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-07-03'): # 1 wk- E2W
assert datetime.datetime.now() == datetime.datetime(2017, 7, 3)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-07-10'): # 2 wks- Monthly1 M15th
assert datetime.datetime.now() == datetime.datetime(2017, 7, 10)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 1:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 2) # Monthly15th
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-07-17'): # 3 wks- E2W Monthly2
assert datetime.datetime.now() == datetime.datetime(2017, 7, 17)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
else:
if len(monthlies) == 0:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-07-24'): # 4 wks- Monthly3
assert datetime.datetime.now() == datetime.datetime(2017, 7, 24)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-07-31'): # 5 wks- E2W M1st 2M1st
assert datetime.datetime.now() == datetime.datetime(2017, 7, 31)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1) # Monthly1st
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 1)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-08-07'): # 6 wks- 2Months1
assert datetime.datetime.now() == datetime.datetime(2017, 8, 7)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if len(two_months) == 0:
two_monthlies_check = 'first'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-08-14'): # 7 wks- E2W Monthly1 2Months2 M15th 2M15th
assert datetime.datetime.now() == datetime.datetime(2017, 8, 14)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 1:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 2) # Monthly15th
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 1)
else:
if len(two_months) == 1:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 2)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-08-21'): # 8 wks- Monthly2 2Months3
assert datetime.datetime.now() == datetime.datetime(2017, 8, 21)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
else:
if len(monthlies) == 0:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
two_monthlies_check = None
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = None
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-08-28'): # 9 wks- E2W Monthly3 M1st Q1st
assert datetime.datetime.now() == datetime.datetime(2017, 8, 28)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertIn(len(two_months), (0,1))
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 1) # Quarterly1st
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-09-04'): # 10 wks-
assert datetime.datetime.now() == datetime.datetime(2017, 9, 4)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-09-11'): # 11 wks- E2W Quarterly1 M15th Q15th
assert datetime.datetime.now() == datetime.datetime(2017, 9, 11)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1) # Monthly15th
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if len(quarterlies) == 1:
quarterlies_check = 'first'
else:
self.assertEqual(len(quarterlies), 2)
quarterlies_check = 'success'
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-09-18'): # 12 wks- Monthly1 Quarterly2
assert datetime.datetime.now() == datetime.datetime(2017, 9, 18)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 0:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if quarterlies_check == 'success':
self.assertEqual(len(quarterlies), 0)
else:
if len(quarterlies) == 0:
quarterlies_check = 'second'
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = 'success'
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-09-25'): # 13 wks- E2W Monthly2* Quarterly3 M1st 2M1st
assert datetime.datetime.now() == datetime.datetime(2017, 9, 25)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
# Monthly1st
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
else:
if len(monthlies) == 1:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 1)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if quarterlies_check == 'success':
self.assertEqual(len(quarterlies), 0)
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = None
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-10-02'): # 14 wks- Monthly3
assert datetime.datetime.now() == datetime.datetime(2017, 10, 2)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-10-09'): # 15 wks- E2W M15th 2M15th 2Months1
assert datetime.datetime.now() == datetime.datetime(2017, 10, 9)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1) # Monthly15th
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if len(two_months) == 1:
two_monthlies_check = 'first'
else:
self.assertEqual(len(two_months), 2)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-10-16'): # 16 wks- 2Months2
assert datetime.datetime.now() == datetime.datetime(2017, 10, 16)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-10-23'): # 17 wks- E2W Monthly1 2Months3
assert datetime.datetime.now() == datetime.datetime(2017, 10, 23)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 0:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-10-30'): # 18 wks- Monthly2 M1st
assert datetime.datetime.now() == datetime.datetime(2017, 10, 30)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
else:
if len(monthlies) == 1:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-11-06'): # 19 wks- E2W Monthly3 FLAG
assert datetime.datetime.now() == datetime.datetime(2017, 11, 6)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-11-13'): # 20 wks- M15th
assert datetime.datetime.now() == datetime.datetime(2017, 11, 13)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1) # Monthly15th
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-11-20'): # 21 wks- E2W
assert datetime.datetime.now() == datetime.datetime(2017, 11, 20)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-11-27'): # 22 wks- Monthly1 M1st 2M1st Q1st
assert datetime.datetime.now() == datetime.datetime(2017, 11, 27)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 1:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 1)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 1)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-12-04'): # 23 wks- E2W Monthly2 FLAG
assert datetime.datetime.now() == datetime.datetime(2017, 12, 4)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
else:
if len(monthlies) == 0:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-12-11'): # 24 wks- Monthly3 2Months1 M15th 2M15th Q15th
assert datetime.datetime.now() == datetime.datetime(2017, 12, 11)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if len(two_months) == 1:
two_monthlies_check = 'first'
else:
self.assertEqual(len(two_months), 2)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 1)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-12-18'): # 25 wks- E2W 2Months2 Quarterly1
assert datetime.datetime.now() == datetime.datetime(2017, 12, 18)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if len(quarterlies) == 0:
quarterlies_check = 'first'
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = 'success'
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2017-12-25'): # 26 wks- 2Months3 Quarterly2
assert datetime.datetime.now() == datetime.datetime(2017, 12, 25)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if quarterlies_check == 'success':
self.assertEqual(len(quarterlies), 0)
else:
if len(quarterlies) == 0:
quarterlies_check = 'second'
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = 'success'
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-01-01'): # 27 wks- E2W Monthly1 Quarterly3 M1st
assert datetime.datetime.now() == datetime.datetime(2018, 1, 1)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 1:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if quarterlies_check == 'success':
self.assertEqual(len(quarterlies), 0)
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = None
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-01-08'): # 28 wks- Monthly2
assert datetime.datetime.now() == datetime.datetime(2018, 1, 8)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
else:
if len(monthlies) == 0:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
# --------------------------------------------------
with freeze_time('2018-01-15'): # 29 wks- E2W Monthly3 M15th
assert datetime.datetime.now() == datetime.datetime(2018, 1, 15)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertIn(len(two_months), (0,1))
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-01-22'): # 30 wks-
assert datetime.datetime.now() == datetime.datetime(2018, 1, 22)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-01-29'): # 31 wks- E2W M1st 2M1st
assert datetime.datetime.now() == datetime.datetime(2018, 1, 29)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 1)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-02-05'): # 32 wks- Monthly1
assert datetime.datetime.now() == datetime.datetime(2018, 2, 5)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 0:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-02-12'): # 33 wks- E2W Monthly2 2Months1 M15th 2M15th
assert datetime.datetime.now() == datetime.datetime(2018, 2, 12)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
else:
if len(monthlies) == 1:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if len(two_months) == 1:
two_monthlies_check = 'first'
else:
self.assertEqual(len(two_months), 2)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-02-19'): # 34 wks- Monthly3 2Months2
assert datetime.datetime.now() == datetime.datetime(2018, 2, 19)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-02-26'): # 35 wks- E2W 2Months3 M1st Q1st
assert datetime.datetime.now() == datetime.datetime(2018, 2, 26)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 1)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-03-05'): # 36 wks-
assert datetime.datetime.now() == datetime.datetime(2018, 3, 5)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-03-12'): # 37 wks- E2W Monthly1 M15th Q15th
assert datetime.datetime.now() == datetime.datetime(2018, 3, 12)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 1:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertIn(len(two_months), (0,1))
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 1)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-03-19'): # 38 wks- Monthly2
assert datetime.datetime.now() == datetime.datetime(2018, 3, 19)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
else:
if len(monthlies) == 0:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-03-26'): # 39 wks- E2W Monthly3 Quarterly1 M1st 2M1st
assert datetime.datetime.now() == datetime.datetime(2018, 3, 26)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 1)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if len(quarterlies) == 0:
quarterlies_check = 'first'
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = 'success'
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-04-02'): # 40 wks- Quarterly2
assert datetime.datetime.now() == datetime.datetime(2018, 4, 2)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if quarterlies_check == 'success':
self.assertEqual(len(quarterlies), 0)
else:
if len(quarterlies) == 0:
quarterlies_check = 'second'
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = 'success'
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-04-09'): # 41 wks- E2W Quarterly3 M15th 2M15th
assert datetime.datetime.now() == datetime.datetime(2018, 4, 9)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 1)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if quarterlies_check == 'success':
self.assertEqual(len(quarterlies), 0)
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = None
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-04-16'): # 42 wks- Monthly1 2Months1
assert datetime.datetime.now() == datetime.datetime(2018, 4, 16)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 0:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if len(two_months) == 0:
two_monthlies_check = 'first'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-04-23'): # 43 wks- E2W Monthly2 2Months2
assert datetime.datetime.now() == datetime.datetime(2018, 4, 23)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
else:
if len(monthlies) == 0:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-04-30'): # 44 wks- Monthly3 2Months3 M1st
assert datetime.datetime.now() == datetime.datetime(2018, 4, 30)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-05-07'): # 45 wks- E2W
assert datetime.datetime.now() == datetime.datetime(2018, 5, 7)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertIn(len(two_months), (0,1))
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-05-14'): # 46 wks- M15th
assert datetime.datetime.now() == datetime.datetime(2018, 5, 14)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-05-21'): # 47 wks- E2W Monthly1
assert datetime.datetime.now() == datetime.datetime(2018, 5, 21)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 0:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertIn(len(two_months), (0,1))
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-05-28'): # 48 wks- Monthly2 M1st 2M1st Q1st Y1st
assert datetime.datetime.now() == datetime.datetime(2018, 5, 28)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
else:
if len(monthlies) == 1:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 1)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 1)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 1)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-06-04'): # 49 wks- E2W Monthly3
assert datetime.datetime.now() == datetime.datetime(2018, 6, 4)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertIn(len(two_months), (0,1))
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-06-11'): # 50 wks- M15th 2M15th Q15th Y15th
assert datetime.datetime.now() == datetime.datetime(2018, 6, 11)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 1)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 1)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 1)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 1)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-06-18'): # 51 wks- E2W 2Months1 ErrorYear
assert datetime.datetime.now() == datetime.datetime(2018, 6, 18)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if len(two_months) == 0:
two_monthlies_check = 'first'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
if len(yearlies) == 0:
yearlies_check = True
else:
self.assertEqual(len(yearlies), 1)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-06-25'): # 52 wks- Monthly1 2Months2 Y M1st
assert datetime.datetime.now() == datetime.datetime(2018, 6, 25)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if len(monthlies) == 1:
monthlies_check = 'first'
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
if yearlies_check:
self.assertEqual(len(yearlies), 1)
yearlies_check = False
else:
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-07-02'): # 53 wks- E2W Monthly2 2Months3
assert datetime.datetime.now() == datetime.datetime(2018, 7, 2)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 0)
else:
if len(monthlies) == 0:
monthlies_check = 'second'
else:
self.assertEqual(len(monthlies), 1)
monthlies_check = 'success'
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
if two_monthlies_check == 'success':
self.assertEqual(len(two_months), 0)
else:
if len(two_months) == 0:
two_monthlies_check = 'second'
else:
self.assertEqual(len(two_months), 1)
two_monthlies_check = 'success'
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
self.assertEqual(len(quarterlies), 0)
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-07-09'): # 54 wks- Monthly3 Quarterly1 M15th
assert datetime.datetime.now() == datetime.datetime(2018, 7, 9)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
if monthlies_check == 'success':
self.assertEqual(len(monthlies), 1)
monthlies_check = None
else:
self.assertEqual(len(monthlies), 2)
monthlies_check = None
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if len(quarterlies) == 0:
quarterlies_check = 'first'
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = 'success'
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-07-16'): # 55 wks- E2W Quarterly2
assert datetime.datetime.now() == datetime.datetime(2018, 7, 16)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 1)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertIn(len(two_months), (0,1))
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if quarterlies_check == 'success':
self.assertEqual(len(quarterlies), 0)
else:
if len(quarterlies) == 0:
quarterlies_check = 'second'
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = 'success'
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
with freeze_time('2018-07-23'): # 56 wks- Quarterly3
assert datetime.datetime.now() == datetime.datetime(2018, 7, 23)
self.client.login(email='test1@example.com', password='password1')
resp = self.client.get(reverse('lineup-make'))
current_week = Week.objects.get(
is_current=True
)
assignments = Assignment.objects.filter(
week=current_week
)
daily_asses = Assignment.objects.filter(
what__interval='Daily'
).filter(
week=current_week
)
self.assertEqual(len(daily_asses), 7)
two_days = Assignment.objects.filter(
what__interval='Every 2 Days'
).filter(
week=current_week
)
self.assertIn(len(two_days), (3,4))
three_days = Assignment.objects.filter(
what__interval='Every 3 Days'
).filter(
week=current_week
)
self.assertIn(len(three_days), (2,3))
weeklies = Assignment.objects.filter(
what__interval='Weekly'
).filter(
week=current_week
)
self.assertEqual(len(weeklies), 1)
two_weeks = Assignment.objects.filter(
what__interval='Every 2 Weeks'
).filter(
week=current_week
)
self.assertEqual(len(two_weeks), 0)
monthlies = Assignment.objects.filter(
what__interval='Monthly'
).filter(
week=current_week
)
self.assertEqual(len(monthlies), 0)
two_months = Assignment.objects.filter(
what__interval='Every 2 Months'
).filter(
week=current_week
)
self.assertEqual(len(two_months), 0)
quarterlies = Assignment.objects.filter(
what__interval='Quarterly'
).filter(
week=current_week
)
if quarterlies_check == 'success':
self.assertEqual(len(quarterlies), 0)
else:
self.assertEqual(len(quarterlies), 1)
quarterlies_check = None
yearlies = Assignment.objects.filter(
what__interval='Yearly'
).filter(
week=current_week
)
self.assertEqual(len(yearlies), 0)
total_asses = len(daily_asses) + len(two_days) + len(three_days) + len(weeklies) + len(two_weeks) + len(monthlies) + len(two_months) + len(quarterlies) + len(yearlies)
total_assignments += total_asses
self.assertEqual(total_asses, len(assignments))
| 35.040149
| 179
| 0.515616
| 15,577
| 169,314
| 5.404699
| 0.013032
| 0.081923
| 0.155721
| 0.14218
| 0.978275
| 0.975733
| 0.975686
| 0.97502
| 0.945123
| 0.938353
| 0
| 0.021878
| 0.389342
| 169,314
| 4,831
| 180
| 35.047402
| 0.792384
| 0.011458
| 0
| 0.815352
| 0
| 0
| 0.050856
| 0.000359
| 0
| 0
| 0
| 0
| 0.160409
| 1
| 0.000951
| false
| 0.014021
| 0.002376
| 0
| 0.003802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3b5da9a3f6d372f1a070dc63109d95af2e01afac
| 302
|
py
|
Python
|
sports_manager/models/__init__.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
sports_manager/models/__init__.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
sports_manager/models/__init__.py
|
hbuyse/dj-sports-manager
|
7e32cc41347b968b4ede9ea6846de14d9504c3f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from sports_manager.models.category import Category
from sports_manager.models.gymnasium import Gymnasium
from sports_manager.models.license import License
from sports_manager.models.player import Player, MedicalCertificate
from sports_manager.models.team import Team, TimeSlot
| 43.142857
| 67
| 0.84106
| 40
| 302
| 6.225
| 0.375
| 0.200803
| 0.341365
| 0.461847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003636
| 0.089404
| 302
| 7
| 68
| 43.142857
| 0.901818
| 0.069536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3b73773190180dd5076da124494e890e0ae472ea
| 19,081
|
py
|
Python
|
utils/util.py
|
lujunzju/MachineLearningForAirTicketPredicting
|
e64b6c75a00a8b2a74d67d132f6e5b852db9c974
|
[
"MIT"
] | 47
|
2017-06-28T07:45:04.000Z
|
2022-01-31T09:15:13.000Z
|
utils/util.py
|
lujunzju/MachineLearningForAirTicketPredicting
|
e64b6c75a00a8b2a74d67d132f6e5b852db9c974
|
[
"MIT"
] | 2
|
2017-08-28T07:59:17.000Z
|
2018-03-02T06:37:08.000Z
|
utils/util.py
|
lujunzju/MachineLearningForAirTicketPredicting
|
e64b6c75a00a8b2a74d67d132f6e5b852db9c974
|
[
"MIT"
] | 20
|
2017-09-01T13:46:25.000Z
|
2021-05-05T12:47:16.000Z
|
# import system library
from datetime import datetime
import random
import json
from numpy import *
import math
import numpy as np
# import user-library
import load_data
routes_specific = ["BCN_BUD", # route 1
"BUD_BCN", # route 2
"CRL_OTP", # route 3
"MLH_SKP", # route 4
"MMX_SKP", # route 5
"OTP_CRL", # route 6
"SKP_MLH", # route 7
"SKP_MMX"] # route 8
def days_between(d1, d2):
"""
get the days interval between two dates
:param d1: date1
:param d2: date2
:return: days interval
"""
d1 = datetime.strptime(d1, "%Y%m%d")
d2 = datetime.strptime(d2, "%Y%m%d")
return abs((d2 - d1).days)
def remove_duplicates(values):
"""
remove duplicate value in a list
:param values: input list
:return: no duplicate entry list
"""
output = []
seen = set()
for value in values:
# If value has not been encountered yet,
# ... add it to both list and set.
if value not in seen:
output.append(value)
seen.add(value)
return output
def getPrice(price):
"""
Get the numeric price in a string format, which contains currency symbol
:param price:
:return:
"""
price = float( filter( lambda x: x in '0123456789.', price) )
return price
def pickRandomTicket(filePrefix="BCN_BUD", dataset="large data set"):
"""
pick 50 tickets randomly for one route
"""
# get the total departure date length in this route
departureLen = load_data.get_departure_len(filePrefix, dataset)
totalPrice = 0
len = 0
for index in range(departureLen):
# get the dataset with same departure date
datas = load_data.load_data_with_departureIndex(index, filePrefix, dataset)
date = datas[0]["Date"]
if int(date) < 20160115 and int(date) < 20160220:
random.shuffle(datas)
datas = datas[0:50]
for data in datas:
totalPrice += getPrice(data["MinimumPrice"])
len = len+1
avgPrice = totalPrice * 1.0 / (len*50)
return avgPrice
def getRandomTicketPriceForAllRoutes():
for route in routes_specific:
avgPrice = pickRandomTicket(route)
with open('randomPrice/randomPrice_{:}.json'.format(route), 'w') as outfile:
json.dump(avgPrice, outfile)
def pickRandomTicketByNumpy(flightNum):
evalMatrix = np.load('inputReg/X_test.npy')
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 8]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
len = 0;
departureDates = np.unique(evalMatrix[:, 8])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 8]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 12]
if tmpMatrix.shape[0] > 30:
np.random.shuffle(tmpMatrix)
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += np.sum(tmpMatrix[0:30, :])
len += 30
else:
totalPrice += np.sum(tmpMatrix)
len += tmpMatrix.shape[0]
avgPrice = totalPrice * 1.0 / len
return avgPrice
def getRandomTicketPriceForAllRoutesByNumpy():
for route in range(8):
avgPrice = pickRandomTicketByNumpy(route)
print avgPrice
#with open('randomPrice/randomPrice_{:}.json'.format(route), 'w') as outfile:
#json.dump(avgPrice, outfile)
def pickMinTicketByNumpy(flightNum):
evalMatrix = np.load('inputReg/X_train.npy')
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 8]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
len = 0;
departureDates = np.unique(evalMatrix[:, 8])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 8]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 12]
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += tmpMatrix.max()
avgPrice = totalPrice * 1.0 / departureDates.shape[0]
return avgPrice
def getMinTicketPriceForAllRoutesByNumpy():
for route in range(8):
avgPrice = pickMinTicketByNumpy(route)
print avgPrice
#with open('randomPrice/randomPrice_{:}.json'.format(route), 'w') as outfile:
#json.dump(avgPrice, outfile)
"""
Get the random price for the general routes
"""
def getRandomPriceForGeneral():
randomPrices = []
for flightNum in range(12):
# feature 0~11: flight number dummy variables
# feature 12: departure date; feature 13: observed date state;
# feature 14: minimum price; feature 15: maximum price
# fearure 16: current price;
evalMatrix = np.load('inputGeneralReg/X_train.npy')
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 12]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
len = 0;
departureDates = np.unique(evalMatrix[:, 12])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 12]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 16]
if tmpMatrix.shape[0] > 30:
np.random.shuffle(tmpMatrix)
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += np.sum(tmpMatrix[0:30, :])
len += 30
else:
totalPrice += np.sum(tmpMatrix)
len += tmpMatrix.shape[0]
avgPrice = totalPrice * 1.0 / len
randomPrices.append(avgPrice)
print randomPrices
"""
Get the minimum price for the specific routes
"""
def getMinPriceForSpecific_train():
"""
Get the minimum price for the general routes
:return:
"""
#print "TRAIN"
minPrices = []
for flightNum in range(8):
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: current price;
evalMatrix = np.load('inputSpecificClf2/X_train.npy')
y_train_price = np.load('inputSpecificClf2/y_train_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 8]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
departureDates = np.unique(evalMatrix[:, 8])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 8]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 12]
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += tmpMatrix.min()
avgPrice = totalPrice * 1.0 / departureDates.shape[0]
minPrices.append(avgPrice)
return minPrices
def getMinPriceForSpecific_test():
"""
Get the minimum price for the general routes
:return:
"""
#print "TEST:"
minPrices = []
for flightNum in range(8):
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: current price;
evalMatrix = np.load('inputSpecificClf2/X_test.npy')
y_train_price = np.load('inputSpecificClf2/y_test_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 8]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
departureDates = np.unique(evalMatrix[:, 8])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 8]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 12]
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += tmpMatrix.min()
avgPrice = totalPrice * 1.0 / departureDates.shape[0]
minPrices.append(avgPrice)
return minPrices
def getMinPriceForGeneral():
"""
Get the minimum price for the general routes
:return:
"""
#print "TEST:"
minPrices = []
for flightNum in range(12):
# feature 0~11: flight number dummy variables
# feature 12: departure date; feature 3: observed date state;
# feature 14: minimum price; feature 15: maximum price
# fearure 16: current price;
evalMatrix = np.load('inputGeneralClf_small/X_train.npy')
y_train_price = np.load('inputGeneralClf_small/y_train_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 12]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
departureDates = np.unique(evalMatrix[:, 12])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 12]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 16]
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += tmpMatrix.min()
avgPrice = totalPrice * 1.0 / departureDates.shape[0]
minPrices.append(avgPrice)
return minPrices
"""
Get the maximum price for the specific routes
"""
def getMaxPriceForSpecific_train():
"""
Get the minimum price for the general routes
:return:
"""
#print "TRAIN"
maxPrices = []
for flightNum in range(8):
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: current price;
evalMatrix = np.load('inputSpecificClf2/X_train.npy')
y_train_price = np.load('inputSpecificClf2/y_train_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 8]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
departureDates = np.unique(evalMatrix[:, 8])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 8]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 12]
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += tmpMatrix.max()
avgPrice = totalPrice * 1.0 / departureDates.shape[0]
maxPrices.append(avgPrice)
return maxPrices
def getMaxPriceForSpecific_test():
"""
Get the minimum price for the general routes
:return:
"""
#print "TEST"
maxPrices = []
for flightNum in range(8):
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: current price;
evalMatrix = np.load('inputSpecificClf2/X_test.npy')
y_train_price = np.load('inputSpecificClf2/y_test_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 8]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
departureDates = np.unique(evalMatrix[:, 8])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 8]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 12]
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += tmpMatrix.max()
avgPrice = totalPrice * 1.0 / departureDates.shape[0]
maxPrices.append(avgPrice)
return maxPrices
def getMaxPriceForGeneral():
"""
Get the minimum price for the general routes
:return:
"""
#print "TEST:"
maxPrices = []
for flightNum in range(12):
# feature 0~11: flight number dummy variables
# feature 12: departure date; feature 3: observed date state;
# feature 14: minimum price; feature 15: maximum price
# fearure 16: current price;
evalMatrix = np.load('inputGeneralClf_small/X_train.npy')
y_train_price = np.load('inputGeneralClf_small/y_train_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 12]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
departureDates = np.unique(evalMatrix[:, 12])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 12]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 16]
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += tmpMatrix.max()
avgPrice = totalPrice * 1.0 / departureDates.shape[0]
maxPrices.append(avgPrice)
return maxPrices
"""
Get the random price for the specific routes
"""
def getRandomPriceForSpecific_train():
#print "TRAIN"
randomPrices = []
for flightNum in range(8):
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: current price;
evalMatrix = np.load('inputSpecificClf2/X_train.npy')
y_train_price = np.load('inputSpecificClf2/y_train_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 8]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
len = 0;
departureDates = np.unique(evalMatrix[:, 8])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 8]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 12]
if tmpMatrix.shape[0] > 30:
np.random.shuffle(tmpMatrix)
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += np.sum(tmpMatrix[0:30, :])
len += 30
else:
totalPrice += np.sum(tmpMatrix)
len += tmpMatrix.shape[0]
avgPrice = totalPrice * 1.0 / len
randomPrices.append(avgPrice)
return randomPrices
def getRandomPriceForSpecific_test():
#print "TEST"
randomPrices = []
for flightNum in range(8):
# feature 0~7: flight number dummy variables
# feature 8: departure date; feature 9: observed date state;
# feature 10: minimum price; feature 11: maximum price
# fearure 12: current price;
evalMatrix = np.load('inputSpecificClf2/X_test.npy')
y_train_price = np.load('inputSpecificClf2/y_test_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 8]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
len = 0;
departureDates = np.unique(evalMatrix[:, 8])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 8]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 12]
if tmpMatrix.shape[0] > 60:
np.random.shuffle(tmpMatrix)
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += np.sum(tmpMatrix[0:60, :])
len += 60
else:
totalPrice += np.sum(tmpMatrix)
len += tmpMatrix.shape[0]
avgPrice = totalPrice * 1.0 / len
randomPrices.append(avgPrice)
return randomPrices
def getRandomPriceForGeneral():
#print "TEST"
randomPrices = []
for flightNum in range(12):
# feature 0~11: flight number dummy variables
# feature 12: departure date; feature 13: observed date state;
# feature 14: minimum price; feature 15: maximum price
# fearure 16: current price;
evalMatrix = np.load('inputGeneralClf_small/X_train.npy')
y_train_price = np.load('inputGeneralClf_small/y_train_price.npy')
evalMatrix = np.concatenate((evalMatrix, y_train_price), axis=1)
# take the departure date 20 days after the first observed date
evalMatrix = evalMatrix[np.where(evalMatrix[:, 12]>20)[0], :]
# take one route
evalMatrix = evalMatrix[np.where(evalMatrix[:, flightNum]==1)[0], :]
totalPrice = 0;
len = 0;
departureDates = np.unique(evalMatrix[:, 12])
for departureDate in departureDates:
tmpMatrix = evalMatrix[np.where(evalMatrix[:, 12]==departureDate)[0], :]
tmpMatrix = tmpMatrix[:, 16]
if tmpMatrix.shape[0] > 30:
np.random.shuffle(tmpMatrix)
tmpMatrix = tmpMatrix.reshape((tmpMatrix.shape[0], 1))
totalPrice += np.sum(tmpMatrix[0:30, :])
len += 30
else:
totalPrice += np.sum(tmpMatrix)
len += tmpMatrix.shape[0]
avgPrice = totalPrice * 1.0 / len
randomPrices.append(avgPrice)
return randomPrices
if __name__ == "__main__":
#getMinPriceForGeneral()
#getRandomPriceForGeneral()
getMinPriceForSpecific_train()
getRandomPriceForSpecific_train()
getMaxPriceForSpecific_train()
getMinPriceForSpecific_test()
getRandomPriceForSpecific_test()
getMaxPriceForSpecific_test()
print getMinPriceForGeneral()
print getRandomPriceForGeneral()
print getMaxPriceForGeneral()
| 35.934087
| 85
| 0.623081
| 2,114
| 19,081
| 5.569063
| 0.091296
| 0.058099
| 0.051983
| 0.082562
| 0.817973
| 0.814915
| 0.793935
| 0.792406
| 0.792406
| 0.792406
| 0
| 0.03509
| 0.265185
| 19,081
| 531
| 86
| 35.934087
| 0.804579
| 0.177821
| 0
| 0.74026
| 0
| 0
| 0.056205
| 0.044923
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.022727
| null | null | 0.019481
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3b7f15752f72cdc98e0ba10373821a3c87694cee
| 413
|
py
|
Python
|
pynumdiff/optimize/linear_model/__init__.py
|
luckystarufo/PyNumDiff
|
99ffeb0c118c6de715414af042020bb268941c99
|
[
"MIT"
] | 60
|
2020-09-07T20:39:04.000Z
|
2022-03-31T05:49:56.000Z
|
pynumdiff/optimize/linear_model/__init__.py
|
luckystarufo/PyNumDiff
|
99ffeb0c118c6de715414af042020bb268941c99
|
[
"MIT"
] | 15
|
2020-11-09T03:58:39.000Z
|
2022-03-15T16:35:31.000Z
|
pynumdiff/optimize/linear_model/__init__.py
|
luckystarufo/PyNumDiff
|
99ffeb0c118c6de715414af042020bb268941c99
|
[
"MIT"
] | 8
|
2021-02-12T01:50:14.000Z
|
2022-03-21T22:48:54.000Z
|
"""
import useful functions from __linear_model__
"""
from pynumdiff.optimize.linear_model.__linear_model__ import savgoldiff
from pynumdiff.optimize.linear_model.__linear_model__ import spectraldiff
from pynumdiff.optimize.linear_model.__linear_model__ import polydiff
from pynumdiff.optimize.linear_model.__linear_model__ import chebydiff
from pynumdiff.optimize.linear_model.__linear_model__ import lineardiff
| 45.888889
| 73
| 0.883777
| 51
| 413
| 6.470588
| 0.254902
| 0.366667
| 0.318182
| 0.409091
| 0.742424
| 0.742424
| 0.742424
| 0.742424
| 0
| 0
| 0
| 0
| 0.065375
| 413
| 8
| 74
| 51.625
| 0.854922
| 0.108959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
8e617c605494ec71359324a9a18f199bd77dfb64
| 983
|
py
|
Python
|
src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_bitwise_ops.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | 1
|
2021-04-09T15:55:35.000Z
|
2021-04-09T15:55:35.000Z
|
src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_bitwise_ops.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_bitwise_ops.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from webdnn.frontend.tensorflow.converter import TensorFlowConverter
@TensorFlowConverter.register_handler("BitwiseAnd")
def bitwise_and_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BitwiseOr")
def bitwise_or_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("BitwiseXor")
def bitwise_xor_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Invert")
def invert_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
| 40.958333
| 90
| 0.808749
| 110
| 983
| 7.054545
| 0.272727
| 0.216495
| 0.237113
| 0.190722
| 0.744845
| 0.744845
| 0.744845
| 0.744845
| 0.744845
| 0.744845
| 0
| 0
| 0.08647
| 983
| 23
| 91
| 42.73913
| 0.864143
| 0
| 0
| 0.285714
| 0
| 0
| 0.312309
| 0.085453
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8e703d83dad0b41ea8529fe08787f699379561af
| 637
|
py
|
Python
|
relay/network_graph/fees.py
|
weilbith/relay
|
ab1fc05cbb0ce664409a055f18a67255917c6959
|
[
"MIT"
] | 7
|
2017-07-21T09:13:56.000Z
|
2019-03-06T05:38:45.000Z
|
relay/network_graph/fees.py
|
weilbith/relay
|
ab1fc05cbb0ce664409a055f18a67255917c6959
|
[
"MIT"
] | 223
|
2019-06-28T13:49:09.000Z
|
2021-10-20T18:04:59.000Z
|
relay/network_graph/fees.py
|
weilbith/relay
|
ab1fc05cbb0ce664409a055f18a67255917c6959
|
[
"MIT"
] | 5
|
2019-04-07T14:31:02.000Z
|
2022-03-08T10:25:42.000Z
|
def calculate_fees(imbalance_generated, capacity_imbalance_fee_divisor):
if capacity_imbalance_fee_divisor == 0 or imbalance_generated == 0:
return 0
return (imbalance_generated - 1) // capacity_imbalance_fee_divisor + 1
def calculate_fees_reverse(imbalance_generated, capacity_imbalance_fee_divisor):
if capacity_imbalance_fee_divisor == 0 or imbalance_generated == 0:
return 0
return (imbalance_generated - 1) // (capacity_imbalance_fee_divisor - 1) + 1
def imbalance_generated(*, value, balance):
assert value >= 0
if balance <= 0:
return value
return max(value - balance, 0)
| 31.85
| 80
| 0.734694
| 80
| 637
| 5.5
| 0.225
| 0.286364
| 0.272727
| 0.368182
| 0.709091
| 0.709091
| 0.709091
| 0.709091
| 0.709091
| 0.709091
| 0
| 0.027132
| 0.189953
| 637
| 19
| 81
| 33.526316
| 0.825581
| 0
| 0
| 0.307692
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.230769
| false
| 0
| 0
| 0
| 0.692308
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 11
|
8ea49e02a069a182eccb1b6a9484c50bded0f24b
| 9,293
|
py
|
Python
|
src/tests.py
|
davoshack/pokemon-design
|
d1ea2bdefa41416229f9288cb065b7744d1efa51
|
[
"MIT"
] | null | null | null |
src/tests.py
|
davoshack/pokemon-design
|
d1ea2bdefa41416229f9288cb065b7744d1efa51
|
[
"MIT"
] | null | null | null |
src/tests.py
|
davoshack/pokemon-design
|
d1ea2bdefa41416229f9288cb065b7744d1efa51
|
[
"MIT"
] | null | null | null |
import unittest
import mock
from .pokemon import Pokemon, PokeBall, StatusPokemon, TypePokeBall
class PokemonTestCase(unittest.TestCase):
def setUp(self) -> None:
self.catch_rate = 0.35
self.pokemon = Pokemon("Pikachu", self.catch_rate)
def test_poke_ball_master(self):
poke_ball = PokeBall(TypePokeBall.MASTER_BALL)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# Capture guarantee by master ball
self.assertEqual(self.pokemon.is_caught, True)
# No changes for catch_rate
self.assertEqual(self.pokemon.catch_rate, self.catch_rate)
def test_threshold_less_than_25(self):
# Set random number N < 25
with mock.patch('random.randint', return_value=18):
poke_ball = PokeBall(TypePokeBall.ULTRA_BALL)
################################
# Trying without status
################################
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, self.catch_rate)
updated_catch_rate = self.pokemon.catch_rate
################################
# Now trying with status:ASLEEP
################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.ASLEEP)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# Now the pokemon was caught
self.assertEqual(self.pokemon.is_caught, True)
# No changes for catch_rate
self.assertEqual(self.pokemon.catch_rate, updated_catch_rate)
#################################
# Now trying with status:FROZEN
#################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.FROZEN)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# Now the pokemon was caught
self.assertEqual(self.pokemon.is_caught, True)
# No changes for catch_rate
self.assertEqual(self.pokemon.catch_rate, updated_catch_rate)
def test_threshold_greater_than_25(self):
# Set random number N > 25
with mock.patch('random.randint', return_value=35):
poke_ball = PokeBall(TypePokeBall.GREAT_BALL)
################################
# Trying without status
################################
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, self.catch_rate)
updated_catch_rate = self.pokemon.catch_rate
################################
# Now trying with status:ASLEEP
################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.ASLEEP)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# No caught
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, updated_catch_rate)
updated_catch_rate = self.pokemon.catch_rate
#################################
# Now trying with status:FROZEN
#################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.FROZEN)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# No caught
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, updated_catch_rate)
def test_threshold_less_than_12(self):
# Set random number N < 12
with mock.patch('random.randint', return_value=10):
poke_ball = PokeBall(TypePokeBall.POKE_BALL)
################################
# Trying without status
################################
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, self.catch_rate)
updated_catch_rate = self.pokemon.catch_rate
#################################
# Now trying with status:BURNED
#################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.BURNED)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# Now the pokemon was caught
self.assertEqual(self.pokemon.is_caught, True)
# No changes for catch rate
self.assertEqual(self.pokemon.catch_rate, updated_catch_rate)
#################################
# Now trying with status:PARALYZED
#################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.PARALYZED)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# Now the pokemon was caught
self.assertEqual(self.pokemon.is_caught, True)
# No changes for catch rate
self.assertEqual(self.pokemon.catch_rate, updated_catch_rate)
#################################
# Now trying with status:POISONED
#################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.POISONED)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# Now the pokemon was caught
self.assertEqual(self.pokemon.is_caught, True)
# No changes for catch rate
self.assertEqual(self.pokemon.catch_rate, updated_catch_rate)
def test_threshold_greater_than_12(self):
# Set random number N > 12
with mock.patch('random.randint', return_value=15):
poke_ball = PokeBall(TypePokeBall.POKE_BALL)
################################
# Trying without status
################################
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, self.catch_rate)
updated_catch_rate = self.pokemon.catch_rate
#################################
# Now trying with status:BURNED
#################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.BURNED)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# No caught
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, updated_catch_rate)
updated_catch_rate = self.pokemon.catch_rate
#################################
# Now trying with status:PARALYZED
#################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.PARALYZED)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# No caught
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, updated_catch_rate)
updated_catch_rate = self.pokemon.catch_rate
#################################
# Now trying with status:POISONED
#################################
# Set pokemon status
self.pokemon.set_status(StatusPokemon.POISONED)
self.pokemon.catch_attempt(poke_ball)
# Asserts expected
# No caught
self.assertEqual(self.pokemon.is_caught, False)
# Catch rate updated
self.assertNotEqual(self.pokemon.catch_rate, updated_catch_rate)
def test_change_catch_rate(self):
self.assertEqual(self.pokemon.catch_rate, self.catch_rate)
with mock.patch('random.randint', return_value=200):
poke_ball = PokeBall(TypePokeBall.POKE_BALL)
self.pokemon.set_catch_rate(poke_ball)
# Asserts expected
self.assertEqual(self.pokemon.catch_rate, 0.475)
def test_str_pokemon(self):
self.pokemon.set_status(StatusPokemon.FROZEN)
self.assertEqual(str(self.pokemon), 'Name: Pikachu - Status: FROZEN')
def test_str_poke_ball(self):
with mock.patch('random.randint', return_value=18):
poke_ball = PokeBall(TypePokeBall.POKE_BALL)
self.assertEqual(str(poke_ball),
'Type: POKE_BALL - Associate Random Number: 18')
| 38.242798
| 77
| 0.546002
| 903
| 9,293
| 5.423034
| 0.086379
| 0.152747
| 0.127425
| 0.098019
| 0.898918
| 0.894834
| 0.892996
| 0.861139
| 0.84174
| 0.829283
| 0
| 0.005791
| 0.293877
| 9,293
| 242
| 78
| 38.400826
| 0.740475
| 0.161842
| 0
| 0.721649
| 0
| 0
| 0.024394
| 0
| 0
| 0
| 0
| 0
| 0.350515
| 1
| 0.092784
| false
| 0
| 0.030928
| 0
| 0.134021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8ea658b920c3c6b73148363e0df9ff4219f08b27
| 247
|
py
|
Python
|
django_evercookie/middleware.py
|
saczuac/django-evercookie
|
dc80bd8e836e73c8380bc7cb30bdcbae6a8e4771
|
[
"MIT"
] | null | null | null |
django_evercookie/middleware.py
|
saczuac/django-evercookie
|
dc80bd8e836e73c8380bc7cb30bdcbae6a8e4771
|
[
"MIT"
] | null | null | null |
django_evercookie/middleware.py
|
saczuac/django-evercookie
|
dc80bd8e836e73c8380bc7cb30bdcbae6a8e4771
|
[
"MIT"
] | null | null | null |
from django_dont_vary_on.middleware import RemoveUnneededVaryHeadersMiddleware
from django.utils.deprecation import MiddlewareMixin
class RemoveUnneededVaryHeadersMiddlewareCompat(RemoveUnneededVaryHeadersMiddleware, MiddlewareMixin):
pass
| 30.875
| 102
| 0.894737
| 19
| 247
| 11.473684
| 0.736842
| 0.091743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 247
| 7
| 103
| 35.285714
| 0.95614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
8ee0cc5e2a9a322be644299c83c23f9acb2ffc60
| 6,864
|
py
|
Python
|
src/libs/layers.py
|
swang423/SpeechVAE
|
6cecc489d713ef549f91d89bab717f17d6831449
|
[
"Apache-2.0"
] | 155
|
2017-11-09T14:39:00.000Z
|
2022-02-18T14:54:57.000Z
|
src/libs/layers.py
|
swang423/SpeechVAE
|
6cecc489d713ef549f91d89bab717f17d6831449
|
[
"Apache-2.0"
] | 7
|
2018-03-11T12:24:04.000Z
|
2019-06-07T05:56:13.000Z
|
src/libs/layers.py
|
swang423/SpeechVAE
|
6cecc489d713ef549f91d89bab717f17d6831449
|
[
"Apache-2.0"
] | 25
|
2017-11-29T10:37:54.000Z
|
2021-06-10T08:20:44.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected, conv2d, conv2d_transpose
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.python.ops import init_ops
def dense_latent(inputs,
num_outputs,
mu_nl=None,
logvar_nl=None,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""a latent variable layer"""
# normalizer is disabled for now
assert(normalizer_fn is None and normalizer_params is None)
with tf.variable_scope(scope, "dense_latent", [inputs], reuse=reuse) as sc:
mu = fully_connected(inputs,
num_outputs,
activation_fn=mu_nl,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_initializer=weights_initializer,
weights_regularizer=weights_regularizer,
biases_initializer=biases_initializer,
biases_regularizer=biases_regularizer,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
scope="mu")
logvar = fully_connected(inputs,
num_outputs,
activation_fn=logvar_nl,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_initializer=weights_initializer,
weights_regularizer=weights_regularizer,
biases_initializer=biases_initializer,
biases_regularizer=biases_regularizer,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
scope="logvar")
epsilon = tf.random_normal(tf.shape(logvar), name='epsilon')
sample = mu + tf.exp(0.5 * logvar) * epsilon
return mu, logvar, sample
def deconv_latent(inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
mu_nl=None,
logvar_nl=None,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
post_trim=None,
scope=None):
"""a deconvolutional latent variable layer"""
# normalizer is disabled for now
assert(normalizer_fn is None and normalizer_params is None)
with tf.variable_scope(scope, "deconv_latent", [inputs], reuse=reuse) as sc:
mu = conv2d_transpose(inputs,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=mu_nl,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_initializer=weights_initializer,
weights_regularizer=weights_regularizer,
biases_initializer=biases_initializer,
biases_regularizer=biases_regularizer,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
scope="mu")
logvar = conv2d_transpose(inputs,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=logvar_nl,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_initializer=weights_initializer,
weights_regularizer=weights_regularizer,
biases_initializer=biases_initializer,
biases_regularizer=biases_regularizer,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
scope="logvar")
if post_trim:
# print("before cropping: %s" % (mu.shape.as_list()))
if data_format == "NCHW":
mu = mu[..., post_trim[0], post_trim[1]]
logvar = logvar[..., post_trim[0], post_trim[1]]
elif data_format == "NHWC":
mu = mu[..., post_trim[0], post_trim[1], :]
logvar = logvar[..., post_trim[0], post_trim[1], :]
else:
raise ValueError("data_format %s not supported" % data_format)
epsilon = tf.random_normal(tf.shape(logvar), name='epsilon')
sample = mu + tf.exp(0.5 * logvar) * epsilon
return mu, logvar, sample
| 48.680851
| 80
| 0.504225
| 545
| 6,864
| 6.049541
| 0.183486
| 0.043676
| 0.053382
| 0.029117
| 0.814073
| 0.814073
| 0.814073
| 0.77889
| 0.77889
| 0.77889
| 0
| 0.004145
| 0.437646
| 6,864
| 140
| 81
| 49.028571
| 0.85
| 0.030886
| 0
| 0.755906
| 0
| 0
| 0.013709
| 0
| 0
| 0
| 0
| 0
| 0.015748
| 1
| 0.015748
| false
| 0
| 0.070866
| 0
| 0.102362
| 0.007874
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d94304f2de59ade5db02f2eb1647c94b61fae3a7
| 5,386
|
py
|
Python
|
poetry/migrations/0001_initial.py
|
KadogoKenya/PoetryHub
|
80769c7b7657ee03b6bdcbc420c022a8518d9fc4
|
[
"MIT"
] | null | null | null |
poetry/migrations/0001_initial.py
|
KadogoKenya/PoetryHub
|
80769c7b7657ee03b6bdcbc420c022a8518d9fc4
|
[
"MIT"
] | null | null | null |
poetry/migrations/0001_initial.py
|
KadogoKenya/PoetryHub
|
80769c7b7657ee03b6bdcbc420c022a8518d9fc4
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2021-01-13 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Anger',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Christian',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Coronavirus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Death',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Famous',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Friendship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Holiday',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Life',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Love',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Nature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Sad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Spiritual',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('post', models.TextField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
]
| 40.80303
| 114
| 0.525808
| 494
| 5,386
| 5.548583
| 0.119433
| 0.099599
| 0.11857
| 0.109084
| 0.910981
| 0.910981
| 0.910981
| 0.910981
| 0.910981
| 0.910981
| 0
| 0.011332
| 0.328258
| 5,386
| 131
| 115
| 41.114504
| 0.746269
| 0.008355
| 0
| 0.83871
| 1
| 0
| 0.067054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008065
| 0
| 0.040323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d9980fd559c894b05926d5d8ef5817076d0d13e8
| 168
|
py
|
Python
|
aiohug/tests/tests_default_args.py
|
atmo/aiohug
|
e66cebde7982da2b623ad926501309030440bfe5
|
[
"MIT"
] | 5
|
2019-06-24T09:11:08.000Z
|
2020-02-03T11:32:28.000Z
|
aiohug/tests/tests_default_args.py
|
atmo/aiohug
|
e66cebde7982da2b623ad926501309030440bfe5
|
[
"MIT"
] | 3
|
2020-04-15T10:50:39.000Z
|
2020-04-23T07:02:31.000Z
|
aiohug/tests/tests_default_args.py
|
atmo/aiohug
|
e66cebde7982da2b623ad926501309030440bfe5
|
[
"MIT"
] | 1
|
2020-04-15T10:41:07.000Z
|
2020-04-15T10:41:07.000Z
|
from aiohug.arguments import get_default_args
def test_get_default_args():
def fn(a, b=5, c=5):
pass
assert get_default_args(fn) == {"b": 5, "c": 5}
| 18.666667
| 51
| 0.636905
| 29
| 168
| 3.448276
| 0.551724
| 0.3
| 0.42
| 0.34
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030534
| 0.220238
| 168
| 8
| 52
| 21
| 0.732824
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.4
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
79478bc46ec6852442ca3e7d0a214a79d0c9d16f
| 150
|
py
|
Python
|
majavahbot/api/__init__.py
|
supertassu/MajavahBot
|
79313fc1b40d7adcd2161f5f3d31c6563856c451
|
[
"MIT"
] | 2
|
2021-02-01T07:52:06.000Z
|
2022-01-17T19:05:32.000Z
|
majavahbot/api/__init__.py
|
supertassu/MajavahBot
|
79313fc1b40d7adcd2161f5f3d31c6563856c451
|
[
"MIT"
] | null | null | null |
majavahbot/api/__init__.py
|
supertassu/MajavahBot
|
79313fc1b40d7adcd2161f5f3d31c6563856c451
|
[
"MIT"
] | null | null | null |
from majavahbot.api.mediawiki import MediawikiApi, get_mediawiki_api
from majavahbot.api.database import ReplicaDatabase, TaskDatabase, task_database
| 50
| 80
| 0.88
| 18
| 150
| 7.166667
| 0.611111
| 0.217054
| 0.263566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073333
| 150
| 2
| 81
| 75
| 0.928058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
79b285ddce56c2b09a82471727be7fbced899d84
| 153,197
|
py
|
Python
|
test/test_joint_parser.py
|
napulen/AugmentedNet
|
16aaeeccf15508478ac5987f9cf5d148ea44876e
|
[
"MIT"
] | 14
|
2021-09-03T05:15:09.000Z
|
2022-03-30T07:46:29.000Z
|
test/test_joint_parser.py
|
napulen/AugmentedNet
|
16aaeeccf15508478ac5987f9cf5d148ea44876e
|
[
"MIT"
] | 27
|
2021-11-10T15:29:47.000Z
|
2022-03-23T02:09:17.000Z
|
test/test_joint_parser.py
|
napulen/AugmentedNet
|
16aaeeccf15508478ac5987f9cf5d148ea44876e
|
[
"MIT"
] | null | null | null |
import io
import unittest
import pandas as pd
import AugmentedNet.joint_parser
haydnOp20no4iAnnotation = """
Composer: Haydn, Franz Joseph
Title: String Quartet in D Major - No.1: Allegro di molto
Analyst: Néstor Nápoles López, https://doi.org/10.5281/zenodo.1095617
Proofreader: Automated translation from **harm to RomanText
Time Signature: 3/4
m1 a: i
m3 Ger7
m5 V b3 viio/V
m6 V b3 V2
m7 A: I6
m8 V
m9 I b2 IV b3 viio/V
m10 V
m11 I
m12 ii
m13 V2 b3 I6
m14 V7 b3 I
m15 V2 b3 I6
m16 V7 b3 I
m17 ii65
m18 V2
m19 I6
m20 IV
m21 ii6
m22 I6
m25 V43
m27 I6
m28 b3 I6
m29 V43 b3 I
m30 ii6 b3 V7
m31 I
"""
haydnOp20no4iScore = """
!!!COM: Haydn, Franz Joseph
!!!CDT: 1732/3/31/-1809/5/31/
!!!CNT: Oesterreich
!!!OTL: String Quartet in D Major
!!!OPS: Opus 20
!!!ONM: No. 4
!!!OMD: Allegro di molto
!!!OMV: 1
!!!SCT: H III:34
!!!SCA: Thematisch-bibliographisches Werkevererzeichnis (A. van Hoboken)
!!!YOR: Altmann Edition
!!!ENC: Nestor Napoles
!!!YEC: Copyright (c) 2017 Universitat Pompeu Fabra
!!!YEM: Rights to all derivative editions reserved.
!!!YEM: Refer to licensing agreement for further details.
!!!YEN: Barcelona, Spain
**kern **kern **kern **kern
*k[f#c#] *k[f#c#] *k[f#c#] *k[f#c#]
*D: *D: *D: *D:
*clefF4 *clefC3 *clefG2 *clefG2
*M3/4 *M3/4 *M3/4 *M3/4
=1 =1 =1 =1
4AA' 4cn' 4e' 4e' 4ccn' 12a
. . . 12ccn
. . . 12ee
4AA' 4c' 4e' 4e' 4cc' 12aa
. . . 12ee
. . . 12cc
4AA' 4c' 4e' 4e' 4cc' 12a
. . . 12e
. . . 12cn
=2 =2 =2 =2
4AA' 4cn' 4e' 4e' 4ccn' 4A
4r 4r 4r 4r
4r 4r 4r 4r
=3 =3 =3 =3
4Fn' 4cn' 4d#' 4a'
4F' 4c' 4d#' 4a'
4F' 4c' 4d#' 4a'
=4 =4 =4 =4
2.Fn 4cn 2.d# 4a
. 4B . 4b
. [4A . 4ccn
=5 =5 =5 =5
[2.E 4A] 2e 4ccn
. 4G# . 4b
. 4F# 4a 4dd#
=6 =6 =6 =6
2E] 4G# 4b [2.ee
. 4B 4g# .
4D 4B 4g# .
=7 =7 =7 =7
!!linebreak:original
4C# 4A 4a 12ee]
. . . 12dd#
. . . 12ee
4r 4d 2f# 12ff#
. . . 12ee
. . . 12dd#
4r 4d# . 12cc#
. . . 12b
. . . 12a
=8 =8 =8 =8
12EE 4e 4e 4g#
12GG# . . .
12BB . . .
12E 4r 4r 4r
12F# . . .
12E . . .
12D 4r 4r [4ee
12C# . . .
12BB . . .
=9 =9 =9 =9
4C# 4c# 4a 12ee]
. . . 12dd#
. . . 12ee
4r 4d 4f# 12ff#
. . . 12ee
. . . 12dd#
4r 4d# 4f# 12cc#
. . . 12b
. . . 12a
=10 =10 =10 =10
12EE 4e 4e 4g#
12GG# . . .
12BB . . .
12E 4r 4r 4r
12F# . . .
12E . . .
12D 4r 4r 4r
12C# . . .
12BB . . .
=11 =11 =11 =11
!!pagebreak:original
4AA 4c# 4e 12a
. . . 12cc#
. . . 12ee
4r 4r 4r 12aa
. . . 12gg#
. . . 12ff#
4r 4r 4r 12ee
. . . 12dd
. . . 12cc#
=12 =12 =12 =12
4d 4d 4f# 12b
. . . 12ff#
. . . 12ee
4r 4r 4r 12dd
. . . 12cc#
. . . 12b
4r 4r 4r 12a
. . . 12g#
. . . 12f#
=13 =13 =13 =13
2d [2.e 4B 12e
. . . 12g#
. . . 12a
. . 4r 12b
. . . 12a
. . . 12g#
4c# . 4r 12a
. . . 12b
. . . 12cc#
=14 =14 =14 =14
12E 2.e_ 2b 2dd
12G# . . .
12A . . .
12B . . .
12A . . .
12G# . . .
12A . 4a 4cc#
12B . . .
12c# . . .
=15 =15 =15 =15
!!linebreak:original
2d 2.e_ 2.r 12e
. . . 12g#
. . . 12a
. . . 12b
. . . 12a
. . . 12g#
4c# . . 12a
. . . 12b
. . . 12cc#
=16 =16 =16 =16
12E 2.e] 2b 2dd
12G# . . .
12A . . .
12B . . .
12A . . .
12G# . . .
12A . 4cc# 4cc#
12B . . .
12c# . . .
=17 =17 =17 =17
[2.d 12f# 4cc# 2.ff#
. 12g# . .
. 12a . .
. 12cc# 4b .
. 12dd . .
. 12cc# . .
. 12b 4b .
. 12cc# . .
. 12a . .
=18 =18 =18 =18
!!linebreak:original
2.d] 4c# 2.b 12ee
. . . 12gg#
. . . 12ff#
. 4g# . 12ee
. . . 12gg#
. . . 12aa
. [4e . 12bb
. . . 12ccc#
. . . 12ddd
=19 =19 =19 =19
2.c# 4e] 4a [2.eee
. 4a 4cc# .
. 4a 4ee .
=20 =20 =20 =20
[2.d 2a [2.ff# 4eee]
. . . 4ddd
. 4a# . 4ccc#
=21 =21 =21 =21
4d] [2.b 4ff#] [2.ddd
4c# . 4ee .
4B . 4dd .
=22 =22 =22 =22
[2.c# 4b] [2.ee 4ddd]
. 4a . 4ccc#
. 4g# . 4bb#
=23 =23 =23 =23
4c#] [2.a 4ee] [2.ccc#
4B . 4dd .
4A . 4cc# .
=24 =24 =24 =24
!!pagebreak:original
2.B 4a] [2.dd 12ccc#]
. . . 12bb#
. . . 12ccc#
. 4g# . 12ddd
. . . 12ccc#
. . . 12bbn
. 4g# . 12aa
. . . 12gg#
. . . 12ff#
=25 =25 =25 =25
[2.B [2.g# 2.dd_ 4ee
. . . 4ee'
. . . 4ee'
=26 =26 =26 =26
2.B] 2.g#] 2.dd] [2.ee
=27 =27 =27 =27
2.c# 4a 4cc# 4ee]
. 4a' 4a' 4aa
. 4a' 4a' 4gg#
=28 =28 =28 =28
2d 2.a 2.a 4gg#
. . . 4ff#
4c# . . [4ee
=29 =29 =29 =29
2B 2d 2g# 4ee]
. . . 12ff#
. . . 12dd
. . . 12b
4A 4c# 4a [4cc#
=30 =30 =30 =30
2D 2d 2f# 4cc#]
. . . 4b
4E 4B 4d 12dd
. . . 12b
. . . 12g#
=31 =31 =31 =31
!!linebreak:original
4AA 4c# 4c# 4a
4r 4r 4r 4r
4r 4r 4r 4r
== == == ==
*- *- *- *-
"""
haydnOp20no4iDataFrameGT = """
j_offset,s_duration,s_measure,s_notes,s_intervals,s_isOnset,a_measure,a_duration,a_annotationNumber,a_romanNumeral,a_isOnset,a_pitchNames,a_bass,a_root,a_inversion,a_quality,a_pcset,a_localKey,a_tonicizedKey,a_degree1,a_degree2,measureMisalignment,qualityScoreNotes,qualityNonChordTones,qualityMissingChordTones,qualitySquaredSum,incongruentBass
0.0,0.3333,1.0,"['A2', 'C4', 'E4', 'A4', 'C5']","['m3', 'P5', 'P1', 'm3']","[True, True, True, True, True]",1.0,3.0,0.0,i,True,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
0.25,0.3333,1.0,"['A2', 'C4', 'E4', 'A4', 'C5']","['m3', 'P5', 'P1', 'm3']","[False, False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
0.5,0.3333,1.0,"['A2', 'C4', 'E4', 'C5']","['m3', 'P5', 'm3']","[False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
0.75,0.3333,1.0,"['A2', 'C4', 'E4', 'C5', 'E5']","['m3', 'P5', 'm3', 'P5']","[False, False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
1.0,0.3333,1.0,"['A2', 'C4', 'E4', 'C5', 'A5']","['m3', 'P5', 'm3', 'P1']","[True, True, True, True, True]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
1.25,0.3333,1.0,"['A2', 'C4', 'E4', 'C5', 'A5']","['m3', 'P5', 'm3', 'P1']","[False, False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
1.5,0.3333,1.0,"['A2', 'C4', 'E4', 'C5', 'E5']","['m3', 'P5', 'm3', 'P5']","[False, False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
1.75,0.3333,1.0,"['A2', 'C4', 'E4', 'C5']","['m3', 'P5', 'm3']","[False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
2.0,0.3333,1.0,"['A2', 'C4', 'E4', 'A4', 'C5']","['m3', 'P5', 'P1', 'm3']","[True, True, True, True, True]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
2.25,0.3333,1.0,"['A2', 'C4', 'E4', 'A4', 'C5']","['m3', 'P5', 'P1', 'm3']","[False, False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
2.5,0.3333,1.0,"['A2', 'C4', 'E4', 'C5']","['m3', 'P5', 'm3']","[False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
2.75,0.3333,1.0,"['A2', 'C4', 'E4', 'C5']","['m3', 'P5', 'm3']","[False, False, False, False]",1.0,3.0,0.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'E', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'A', 'C', 'A', 'C', 'E', 'C', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
3.0,1.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[True, True, True, True, True]",2.0,3.0,1.0,i,True,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
3.25,1.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
3.5,1.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
3.75,1.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
4.0,2.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
4.25,2.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
4.5,2.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
4.75,2.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
5.0,2.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
5.25,2.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
5.5,2.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
5.75,2.0,2.0,"['A2', 'A3', 'C4', 'E4', 'C5']","['P1', 'm3', 'P5', 'm3']","[False, False, False, False, False]",2.0,3.0,1.0,i,False,"('A', 'C', 'E')",A,A,0.0,minor triad,"(0, 4, 9)",a,a,1,None,False,"['A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C', 'A', 'A', 'C', 'E', 'C']",0.0,0.0,0.0,0.0
6.0,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[True, True, True, True]",3.0,3.0,2.0,Ger7,True,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
6.25,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
6.5,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
6.75,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
7.0,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[True, True, True, True]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
7.25,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
7.5,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
7.75,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
8.0,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[True, True, True, True]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
8.25,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
8.5,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
8.75,1.0,3.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",3.0,3.0,2.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A']",0.0,0.0,0.0,1.0
9.0,1.0,4.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[True, True, True, True]",4.0,3.0,3.0,Ger7,True,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
9.25,1.0,4.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
9.5,1.0,4.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
9.75,1.0,4.0,"['F3', 'C4', 'D#4', 'A4']","['P5', 'A6', 'M3']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
10.0,1.0,4.0,"['F3', 'B3', 'D#4', 'B4']","['A4', 'A6', 'A4']","[False, True, False, True]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
10.25,1.0,4.0,"['F3', 'B3', 'D#4', 'B4']","['A4', 'A6', 'A4']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
10.5,1.0,4.0,"['F3', 'B3', 'D#4', 'B4']","['A4', 'A6', 'A4']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
10.75,1.0,4.0,"['F3', 'B3', 'D#4', 'B4']","['A4', 'A6', 'A4']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
11.0,1.0,4.0,"['F3', 'A3', 'D#4', 'C5']","['M3', 'A6', 'P5']","[False, True, False, True]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
11.25,1.0,4.0,"['F3', 'A3', 'D#4', 'C5']","['M3', 'A6', 'P5']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
11.5,1.0,4.0,"['F3', 'A3', 'D#4', 'C5']","['M3', 'A6', 'P5']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
11.75,1.0,4.0,"['F3', 'A3', 'D#4', 'C5']","['M3', 'A6', 'P5']","[False, False, False, False]",4.0,3.0,3.0,Ger7,False,"('D#', 'F', 'A', 'C')",D#,D#,0.0,enharmonic to dominant seventh chord,"(0, 3, 5, 9)",a,a,#4,None,False,"['F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'C', 'D#', 'A', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'B', 'D#', 'B', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C', 'F', 'A', 'D#', 'C']",0.17,0.0,0.03,1.0
12.0,1.0,5.0,"['E3', 'A3', 'E4', 'C5']","['P4', 'P1', 'm6']","[True, False, True, True]",5.0,2.0,4.0,V,True,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B']",0.25,0.0,0.06,0.0
12.25,1.0,5.0,"['E3', 'A3', 'E4', 'C5']","['P4', 'P1', 'm6']","[False, False, False, False]",5.0,2.0,4.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B']",0.25,0.0,0.06,0.0
12.5,1.0,5.0,"['E3', 'A3', 'E4', 'C5']","['P4', 'P1', 'm6']","[False, False, False, False]",5.0,2.0,4.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B']",0.25,0.0,0.06,0.0
12.75,1.0,5.0,"['E3', 'A3', 'E4', 'C5']","['P4', 'P1', 'm6']","[False, False, False, False]",5.0,2.0,4.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B']",0.25,0.0,0.06,0.0
13.0,1.0,5.0,"['E3', 'G#3', 'E4', 'B4']","['M3', 'P1', 'P5']","[False, True, False, True]",5.0,2.0,4.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B']",0.25,0.0,0.06,0.0
13.25,1.0,5.0,"['E3', 'G#3', 'E4', 'B4']","['M3', 'P1', 'P5']","[False, False, False, False]",5.0,2.0,4.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B']",0.25,0.0,0.06,0.0
13.5,1.0,5.0,"['E3', 'G#3', 'E4', 'B4']","['M3', 'P1', 'P5']","[False, False, False, False]",5.0,2.0,4.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B']",0.25,0.0,0.06,0.0
13.75,1.0,5.0,"['E3', 'G#3', 'E4', 'B4']","['M3', 'P1', 'P5']","[False, False, False, False]",5.0,2.0,4.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'A', 'E', 'C', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B', 'E', 'G#', 'E', 'B']",0.25,0.0,0.06,0.0
14.0,1.0,5.0,"['E3', 'F#3', 'A4', 'D#5']","['M2', 'P4', 'M7']","[False, True, True, True]",5.0,1.0,5.0,viio/V,True,"('D#', 'F#', 'A')",D#,D#,0.0,diminished triad,"(3, 6, 9)",a,E,7,5,False,"['E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#']",0.25,0.0,0.06,1.0
14.25,1.0,5.0,"['E3', 'F#3', 'A4', 'D#5']","['M2', 'P4', 'M7']","[False, False, False, False]",5.0,1.0,5.0,viio/V,False,"('D#', 'F#', 'A')",D#,D#,0.0,diminished triad,"(3, 6, 9)",a,E,7,5,False,"['E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#']",0.25,0.0,0.06,1.0
14.5,1.0,5.0,"['E3', 'F#3', 'A4', 'D#5']","['M2', 'P4', 'M7']","[False, False, False, False]",5.0,1.0,5.0,viio/V,False,"('D#', 'F#', 'A')",D#,D#,0.0,diminished triad,"(3, 6, 9)",a,E,7,5,False,"['E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#']",0.25,0.0,0.06,1.0
14.75,1.0,5.0,"['E3', 'F#3', 'A4', 'D#5']","['M2', 'P4', 'M7']","[False, False, False, False]",5.0,1.0,5.0,viio/V,False,"('D#', 'F#', 'A')",D#,D#,0.0,diminished triad,"(3, 6, 9)",a,E,7,5,False,"['E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#', 'E', 'F#', 'A', 'D#']",0.25,0.0,0.06,1.0
15.0,1.0,6.0,"['E3', 'G#3', 'B4', 'E5']","['M3', 'P5', 'P1']","[False, True, True, True]",6.0,2.0,6.0,V,True,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
15.25,1.0,6.0,"['E3', 'G#3', 'B4', 'E5']","['M3', 'P5', 'P1']","[False, False, False, False]",6.0,2.0,6.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
15.5,1.0,6.0,"['E3', 'G#3', 'B4', 'E5']","['M3', 'P5', 'P1']","[False, False, False, False]",6.0,2.0,6.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
15.75,1.0,6.0,"['E3', 'G#3', 'B4', 'E5']","['M3', 'P5', 'P1']","[False, False, False, False]",6.0,2.0,6.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
16.0,1.0,6.0,"['E3', 'B3', 'G#4', 'E5']","['P5', 'M3', 'P1']","[False, True, True, False]",6.0,2.0,6.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
16.25,1.0,6.0,"['E3', 'B3', 'G#4', 'E5']","['P5', 'M3', 'P1']","[False, False, False, False]",6.0,2.0,6.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
16.5,1.0,6.0,"['E3', 'B3', 'G#4', 'E5']","['P5', 'M3', 'P1']","[False, False, False, False]",6.0,2.0,6.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
16.75,1.0,6.0,"['E3', 'B3', 'G#4', 'E5']","['P5', 'M3', 'P1']","[False, False, False, False]",6.0,2.0,6.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",a,a,5,None,False,"['E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'G#', 'B', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E', 'E', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
17.0,1.0,6.0,"['D3', 'B3', 'G#4', 'E5']","['M6', 'A4', 'M2']","[True, True, True, False]",6.0,1.0,7.0,V7,True,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",a,a,5,None,False,"['D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
17.25,1.0,6.0,"['D3', 'B3', 'G#4', 'E5']","['M6', 'A4', 'M2']","[False, False, False, False]",6.0,1.0,7.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",a,a,5,None,False,"['D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
17.5,1.0,6.0,"['D3', 'B3', 'G#4', 'E5']","['M6', 'A4', 'M2']","[False, False, False, False]",6.0,1.0,7.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",a,a,5,None,False,"['D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
17.75,1.0,6.0,"['D3', 'B3', 'G#4', 'E5']","['M6', 'A4', 'M2']","[False, False, False, False]",6.0,1.0,7.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",a,a,5,None,False,"['D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E', 'D', 'B', 'G#', 'E']",0.0,0.0,0.0,0.0
18.0,0.3333,7.0,"['C#3', 'A3', 'A4', 'E5']","['m6', 'm6', 'm3']","[True, True, True, False]",7.0,3.0,8.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
18.25,0.3333,7.0,"['C#3', 'A3', 'A4', 'E5']","['m6', 'm6', 'm3']","[False, False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
18.5,0.3333,7.0,"['C#3', 'A3', 'A4', 'D#5']","['m6', 'm6', 'M2']","[False, False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
18.75,0.3333,7.0,"['C#3', 'A3', 'A4', 'E5']","['m6', 'm6', 'm3']","[False, False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
19.0,0.3333,7.0,"['D4', 'F#4', 'F#5']","['M3', 'M3']","[True, True, True]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
19.25,0.3333,7.0,"['D4', 'F#4', 'F#5']","['M3', 'M3']","[False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
19.5,0.3333,7.0,"['D4', 'F#4', 'E5']","['M3', 'M2']","[False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
19.75,0.3333,7.0,"['D4', 'F#4', 'D#5']","['M3', 'A1']","[False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
20.0,0.3333,7.0,"['D#4', 'F#4', 'C#5']","['m3', 'm7']","[True, False, True]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
20.25,0.3333,7.0,"['D#4', 'F#4', 'C#5']","['m3', 'm7']","[False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
20.5,0.3333,7.0,"['D#4', 'F#4', 'B4']","['m3', 'm6']","[False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
20.75,0.3333,7.0,"['D#4', 'F#4', 'A4']","['m3', 'd5']","[False, False, False]",7.0,3.0,8.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'E', 'C#', 'A', 'A', 'D#', 'C#', 'A', 'A', 'E', 'D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#', 'D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.53,0.0,0.28,0.67
21.0,0.3333,8.0,"['E2', 'E4', 'G#4']","['P1', 'M3']","[True, True, True]",8.0,3.0,9.0,V,True,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
21.25,0.3333,8.0,"['E2', 'E4', 'G#4']","['P1', 'M3']","[False, False, False]",8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
21.5,0.3333,8.0,"['G#2', 'E4', 'G#4']","['m6', 'P1']","[False, False, False]",8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
21.75,0.3333,8.0,"['B2', 'E4', 'G#4']","['P4', 'M6']","[False, False, False]",8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
22.0,0.3333,8.0,['E3'],[],[True],8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
22.25,0.3333,8.0,['E3'],[],[False],8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
22.5,0.3333,8.0,['F#3'],[],[False],8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
22.75,0.3333,8.0,['E3'],[],[False],8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
23.0,0.3333,8.0,"['D3', 'E5']",['M2'],"[True, True]",8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
23.25,0.3333,8.0,"['D3', 'E5']",['M2'],"[False, False]",8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
23.5,0.3333,8.0,"['C#3', 'E5']",['m3'],"[False, False]",8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
23.75,0.3333,8.0,"['B2', 'E5']",['P4'],"[False, False]",8.0,3.0,9.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'E', 'D', 'E', 'C#', 'E', 'B', 'E']",0.17,0.0,0.03,0.58
24.0,0.3333,9.0,"['C#3', 'C#4', 'A4', 'E5']","['P1', 'm6', 'm3']","[True, True, True, False]",9.0,1.0,10.0,I,True,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'D#', 'C#', 'C#', 'A', 'E']",0.06,0.0,0.0,1.0
24.25,0.3333,9.0,"['C#3', 'C#4', 'A4', 'E5']","['P1', 'm6', 'm3']","[False, False, False, False]",9.0,1.0,10.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'D#', 'C#', 'C#', 'A', 'E']",0.06,0.0,0.0,1.0
24.5,0.3333,9.0,"['C#3', 'C#4', 'A4', 'D#5']","['P1', 'm6', 'M2']","[False, False, False, False]",9.0,1.0,10.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'D#', 'C#', 'C#', 'A', 'E']",0.06,0.0,0.0,1.0
24.75,0.3333,9.0,"['C#3', 'C#4', 'A4', 'E5']","['P1', 'm6', 'm3']","[False, False, False, False]",9.0,1.0,10.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'D#', 'C#', 'C#', 'A', 'E']",0.06,0.0,0.0,1.0
25.0,0.3333,9.0,"['D4', 'F#4', 'F#5']","['M3', 'M3']","[True, True, True]",9.0,1.0,11.0,IV,True,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#']",0.17,0.33,0.25,0.0
25.25,0.3333,9.0,"['D4', 'F#4', 'F#5']","['M3', 'M3']","[False, False, False]",9.0,1.0,11.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#']",0.17,0.33,0.25,0.0
25.5,0.3333,9.0,"['D4', 'F#4', 'E5']","['M3', 'M2']","[False, False, False]",9.0,1.0,11.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#']",0.17,0.33,0.25,0.0
25.75,0.3333,9.0,"['D4', 'F#4', 'D#5']","['M3', 'A1']","[False, False, False]",9.0,1.0,11.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'F#', 'F#', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'F#', 'D#']",0.17,0.33,0.25,0.0
26.0,0.3333,9.0,"['D#4', 'F#4', 'C#5']","['m3', 'm7']","[True, True, True]",9.0,1.0,12.0,viio/V,True,"('D#', 'F#', 'A')",D#,D#,0.0,diminished triad,"(3, 6, 9)",A,E,7,5,False,"['D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.25,0.0,0.06,0.0
26.25,0.3333,9.0,"['D#4', 'F#4', 'C#5']","['m3', 'm7']","[False, False, False]",9.0,1.0,12.0,viio/V,False,"('D#', 'F#', 'A')",D#,D#,0.0,diminished triad,"(3, 6, 9)",A,E,7,5,False,"['D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.25,0.0,0.06,0.0
26.5,0.3333,9.0,"['D#4', 'F#4', 'B4']","['m3', 'm6']","[False, False, False]",9.0,1.0,12.0,viio/V,False,"('D#', 'F#', 'A')",D#,D#,0.0,diminished triad,"(3, 6, 9)",A,E,7,5,False,"['D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.25,0.0,0.06,0.0
26.75,0.3333,9.0,"['D#4', 'F#4', 'A4']","['m3', 'd5']","[False, False, False]",9.0,1.0,12.0,viio/V,False,"('D#', 'F#', 'A')",D#,D#,0.0,diminished triad,"(3, 6, 9)",A,E,7,5,False,"['D#', 'F#', 'C#', 'D#', 'F#', 'C#', 'D#', 'F#', 'B', 'D#', 'F#', 'A']",0.25,0.0,0.06,0.0
27.0,0.3333,10.0,"['E2', 'E4', 'G#4']","['P1', 'M3']","[True, True, True]",10.0,3.0,13.0,V,True,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
27.25,0.3333,10.0,"['E2', 'E4', 'G#4']","['P1', 'M3']","[False, False, False]",10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
27.5,0.3333,10.0,"['G#2', 'E4', 'G#4']","['m6', 'P1']","[False, False, False]",10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
27.75,0.3333,10.0,"['B2', 'E4', 'G#4']","['P4', 'M6']","[False, False, False]",10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
28.0,0.3333,10.0,['E3'],[],[True],10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
28.25,0.3333,10.0,['E3'],[],[False],10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
28.5,0.3333,10.0,['F#3'],[],[False],10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
28.75,0.3333,10.0,['E3'],[],[False],10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
29.0,0.3333,10.0,['D3'],[],[True],10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
29.25,0.3333,10.0,['D3'],[],[False],10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
29.5,0.3333,10.0,['C#3'],[],[False],10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
29.75,0.3333,10.0,['B2'],[],[False],10.0,3.0,13.0,V,False,"('E', 'G#', 'B')",E,E,0.0,major triad,"(4, 8, 11)",A,A,5,None,False,"['E', 'E', 'G#', 'E', 'E', 'G#', 'G#', 'E', 'G#', 'B', 'E', 'G#', 'E', 'E', 'F#', 'E', 'D', 'D', 'C#', 'B']",0.2,0.0,0.04,0.58
30.0,0.3333,11.0,"['A2', 'C#4', 'E4', 'A4']","['M3', 'P5', 'P1']","[True, True, True, True]",11.0,3.0,14.0,I,True,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
30.25,0.3333,11.0,"['A2', 'C#4', 'E4', 'A4']","['M3', 'P5', 'P1']","[False, False, False, False]",11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
30.5,0.3333,11.0,"['A2', 'C#4', 'E4', 'C#5']","['M3', 'P5', 'M3']","[False, False, False, False]",11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
30.75,0.3333,11.0,"['A2', 'C#4', 'E4', 'E5']","['M3', 'P5', 'P5']","[False, False, False, False]",11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
31.0,0.3333,11.0,['A5'],[],[True],11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
31.25,0.3333,11.0,['A5'],[],[False],11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
31.5,0.3333,11.0,['G#5'],[],[False],11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
31.75,0.3333,11.0,['F#5'],[],[False],11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
32.0,0.3333,11.0,['E5'],[],[True],11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
32.25,0.3333,11.0,['E5'],[],[False],11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
32.5,0.3333,11.0,['D5'],[],[False],11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
32.75,0.3333,11.0,['C#5'],[],[False],11.0,3.0,14.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'A', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'E', 'A', 'A', 'G#', 'F#', 'E', 'E', 'D', 'C#']",0.12,0.0,0.02,0.5
33.0,0.3333,12.0,"['D4', 'F#4', 'B4']","['M3', 'M6']","[True, True, True]",12.0,3.0,15.0,ii,True,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
33.25,0.3333,12.0,"['D4', 'F#4', 'B4']","['M3', 'M6']","[False, False, False]",12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
33.5,0.3333,12.0,"['D4', 'F#4', 'F#5']","['M3', 'M3']","[False, False, False]",12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
33.75,0.3333,12.0,"['D4', 'F#4', 'E5']","['M3', 'M2']","[False, False, False]",12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
34.0,0.3333,12.0,['D5'],[],[True],12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
34.25,0.3333,12.0,['D5'],[],[False],12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
34.5,0.3333,12.0,['C#5'],[],[False],12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
34.75,0.3333,12.0,['B4'],[],[False],12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
35.0,0.3333,12.0,['A4'],[],[True],12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
35.25,0.3333,12.0,['A4'],[],[False],12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
35.5,0.3333,12.0,['G#4'],[],[False],12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
35.75,0.3333,12.0,['F#4'],[],[False],12.0,3.0,15.0,ii,False,"('B', 'D', 'F#')",B,B,0.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'F#', 'B', 'D', 'F#', 'B', 'D', 'F#', 'F#', 'D', 'F#', 'E', 'D', 'D', 'C#', 'B', 'A', 'A', 'G#', 'F#']",0.25,0.0,0.06,0.92
36.0,0.3333,13.0,"['B3', 'D4', 'E4']","['m3', 'P4']","[True, True, True]",13.0,2.0,16.0,V7,True,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'G#', 'B', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.08,0.0,0.01,0.5
36.25,0.3333,13.0,"['B3', 'D4', 'E4']","['m3', 'P4']","[False, False, False]",13.0,2.0,16.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'G#', 'B', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.08,0.0,0.01,0.5
36.5,0.3333,13.0,"['B3', 'D4', 'E4', 'G#4']","['m3', 'P4', 'M6']","[False, False, False, False]",13.0,2.0,16.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'G#', 'B', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.08,0.0,0.01,0.5
36.75,0.3333,13.0,"['B3', 'D4', 'E4', 'A4']","['m3', 'P4', 'm7']","[False, False, False, False]",13.0,2.0,16.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'G#', 'B', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.08,0.0,0.01,0.5
37.0,0.3333,13.0,"['D4', 'E4', 'B4']","['M2', 'M6']","[False, False, True]",13.0,2.0,16.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'G#', 'B', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.08,0.0,0.01,0.5
37.25,0.3333,13.0,"['D4', 'E4', 'B4']","['M2', 'M6']","[False, False, False]",13.0,2.0,16.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'G#', 'B', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.08,0.0,0.01,0.5
37.5,0.3333,13.0,"['D4', 'E4', 'A4']","['M2', 'P5']","[False, False, False]",13.0,2.0,16.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'G#', 'B', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.08,0.0,0.01,0.5
37.75,0.3333,13.0,"['D4', 'E4', 'G#4']","['M2', 'A4']","[False, False, False]",13.0,2.0,16.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'G#', 'B', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.08,0.0,0.01,0.5
38.0,0.3333,13.0,"['C#4', 'E4', 'A4']","['m3', 'm6']","[True, False, True]",13.0,1.0,17.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'C#', 'E', 'A', 'C#', 'E', 'B', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.0
38.25,0.3333,13.0,"['C#4', 'E4', 'A4']","['m3', 'm6']","[False, False, False]",13.0,1.0,17.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'C#', 'E', 'A', 'C#', 'E', 'B', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.0
38.5,0.3333,13.0,"['C#4', 'E4', 'B4']","['m3', 'm7']","[False, False, False]",13.0,1.0,17.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'C#', 'E', 'A', 'C#', 'E', 'B', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.0
38.75,0.3333,13.0,"['C#4', 'E4', 'C#5']","['m3', 'P1']","[False, False, False]",13.0,1.0,17.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'C#', 'E', 'A', 'C#', 'E', 'B', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.0
39.0,0.3333,14.0,"['E3', 'E4', 'B4', 'D5']","['P1', 'P5', 'm7']","[True, False, True, True]",14.0,2.0,18.0,V7,True,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
39.25,0.3333,14.0,"['E3', 'E4', 'B4', 'D5']","['P1', 'P5', 'm7']","[False, False, False, False]",14.0,2.0,18.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
39.5,0.3333,14.0,"['G#3', 'E4', 'B4', 'D5']","['m6', 'm3', 'd5']","[False, False, False, False]",14.0,2.0,18.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
39.75,0.3333,14.0,"['A3', 'E4', 'B4', 'D5']","['P5', 'M2', 'P4']","[False, False, False, False]",14.0,2.0,18.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
40.0,0.3333,14.0,"['B3', 'E4', 'B4', 'D5']","['P4', 'P1', 'm3']","[True, False, False, False]",14.0,2.0,18.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
40.25,0.3333,14.0,"['B3', 'E4', 'B4', 'D5']","['P4', 'P1', 'm3']","[False, False, False, False]",14.0,2.0,18.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
40.5,0.3333,14.0,"['A3', 'E4', 'B4', 'D5']","['P5', 'M2', 'P4']","[False, False, False, False]",14.0,2.0,18.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
40.75,0.3333,14.0,"['G#3', 'E4', 'B4', 'D5']","['m6', 'm3', 'd5']","[False, False, False, False]",14.0,2.0,18.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
41.0,0.3333,14.0,"['A3', 'E4', 'A4', 'C#5']","['P5', 'P1', 'M3']","[True, False, True, True]",14.0,1.0,19.0,I,True,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'E', 'A', 'C#', 'A', 'E', 'A', 'C#', 'B', 'E', 'A', 'C#', 'C#', 'E', 'A', 'C#']",0.06,0.0,0.0,0.5
41.25,0.3333,14.0,"['A3', 'E4', 'A4', 'C#5']","['P5', 'P1', 'M3']","[False, False, False, False]",14.0,1.0,19.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'E', 'A', 'C#', 'A', 'E', 'A', 'C#', 'B', 'E', 'A', 'C#', 'C#', 'E', 'A', 'C#']",0.06,0.0,0.0,0.5
41.5,0.3333,14.0,"['B3', 'E4', 'A4', 'C#5']","['P4', 'm7', 'M2']","[False, False, False, False]",14.0,1.0,19.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'E', 'A', 'C#', 'A', 'E', 'A', 'C#', 'B', 'E', 'A', 'C#', 'C#', 'E', 'A', 'C#']",0.06,0.0,0.0,0.5
41.75,0.3333,14.0,"['C#4', 'E4', 'A4', 'C#5']","['m3', 'm6', 'P1']","[False, False, False, False]",14.0,1.0,19.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'E', 'A', 'C#', 'A', 'E', 'A', 'C#', 'B', 'E', 'A', 'C#', 'C#', 'E', 'A', 'C#']",0.06,0.0,0.0,0.5
42.0,0.3333,15.0,"['D4', 'E4']",['M2'],"[True, False]",15.0,2.0,20.0,V7,True,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['D', 'E', 'D', 'E', 'D', 'E', 'G#', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.09,0.0,0.01,0.0
42.25,0.3333,15.0,"['D4', 'E4']",['M2'],"[False, False]",15.0,2.0,20.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['D', 'E', 'D', 'E', 'D', 'E', 'G#', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.09,0.0,0.01,0.0
42.5,0.3333,15.0,"['D4', 'E4', 'G#4']","['M2', 'A4']","[False, False, False]",15.0,2.0,20.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['D', 'E', 'D', 'E', 'D', 'E', 'G#', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.09,0.0,0.01,0.0
42.75,0.3333,15.0,"['D4', 'E4', 'A4']","['M2', 'P5']","[False, False, False]",15.0,2.0,20.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['D', 'E', 'D', 'E', 'D', 'E', 'G#', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.09,0.0,0.01,0.0
43.0,0.3333,15.0,"['D4', 'E4', 'B4']","['M2', 'M6']","[False, False, True]",15.0,2.0,20.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['D', 'E', 'D', 'E', 'D', 'E', 'G#', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.09,0.0,0.01,0.0
43.25,0.3333,15.0,"['D4', 'E4', 'B4']","['M2', 'M6']","[False, False, False]",15.0,2.0,20.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['D', 'E', 'D', 'E', 'D', 'E', 'G#', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.09,0.0,0.01,0.0
43.5,0.3333,15.0,"['D4', 'E4', 'A4']","['M2', 'P5']","[False, False, False]",15.0,2.0,20.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['D', 'E', 'D', 'E', 'D', 'E', 'G#', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.09,0.0,0.01,0.0
43.75,0.3333,15.0,"['D4', 'E4', 'G#4']","['M2', 'A4']","[False, False, False]",15.0,2.0,20.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['D', 'E', 'D', 'E', 'D', 'E', 'G#', 'D', 'E', 'A', 'D', 'E', 'B', 'D', 'E', 'B', 'D', 'E', 'A', 'D', 'E', 'G#']",0.09,0.0,0.01,0.0
44.0,0.3333,15.0,"['C#4', 'E4', 'A4']","['m3', 'm6']","[True, False, True]",15.0,1.0,21.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'C#', 'E', 'A', 'C#', 'E', 'B', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.0
44.25,0.3333,15.0,"['C#4', 'E4', 'A4']","['m3', 'm6']","[False, False, False]",15.0,1.0,21.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'C#', 'E', 'A', 'C#', 'E', 'B', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.0
44.5,0.3333,15.0,"['C#4', 'E4', 'B4']","['m3', 'm7']","[False, False, False]",15.0,1.0,21.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'C#', 'E', 'A', 'C#', 'E', 'B', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.0
44.75,0.3333,15.0,"['C#4', 'E4', 'C#5']","['m3', 'P1']","[False, False, False]",15.0,1.0,21.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'C#', 'E', 'A', 'C#', 'E', 'B', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.0
45.0,0.3333,16.0,"['E3', 'E4', 'B4', 'D5']","['P1', 'P5', 'm7']","[True, False, True, True]",16.0,2.0,22.0,V7,True,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
45.25,0.3333,16.0,"['E3', 'E4', 'B4', 'D5']","['P1', 'P5', 'm7']","[False, False, False, False]",16.0,2.0,22.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
45.5,0.3333,16.0,"['G#3', 'E4', 'B4', 'D5']","['m6', 'm3', 'd5']","[False, False, False, False]",16.0,2.0,22.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
45.75,0.3333,16.0,"['A3', 'E4', 'B4', 'D5']","['P5', 'M2', 'P4']","[False, False, False, False]",16.0,2.0,22.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
46.0,0.3333,16.0,"['B3', 'E4', 'B4', 'D5']","['P4', 'P1', 'm3']","[True, False, False, False]",16.0,2.0,22.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
46.25,0.3333,16.0,"['B3', 'E4', 'B4', 'D5']","['P4', 'P1', 'm3']","[False, False, False, False]",16.0,2.0,22.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
46.5,0.3333,16.0,"['A3', 'E4', 'B4', 'D5']","['P5', 'M2', 'P4']","[False, False, False, False]",16.0,2.0,22.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
46.75,0.3333,16.0,"['G#3', 'E4', 'B4', 'D5']","['m6', 'm3', 'd5']","[False, False, False, False]",16.0,2.0,22.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'E', 'B', 'D', 'E', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'A', 'E', 'B', 'D', 'G#', 'E', 'B', 'D']",0.06,0.0,0.0,0.75
47.0,0.3333,16.0,"['A3', 'E4', 'C#5']","['P5', 'M3']","[True, False, True]",16.0,1.0,23.0,I,True,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'E', 'C#', 'A', 'E', 'C#', 'B', 'E', 'C#', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.5
47.25,0.3333,16.0,"['A3', 'E4', 'C#5']","['P5', 'M3']","[False, False, False]",16.0,1.0,23.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'E', 'C#', 'A', 'E', 'C#', 'B', 'E', 'C#', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.5
47.5,0.3333,16.0,"['B3', 'E4', 'C#5']","['P4', 'M2']","[False, False, False]",16.0,1.0,23.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'E', 'C#', 'A', 'E', 'C#', 'B', 'E', 'C#', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.5
47.75,0.3333,16.0,"['C#4', 'E4', 'C#5']","['m3', 'P1']","[False, False, False]",16.0,1.0,23.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'E', 'C#', 'A', 'E', 'C#', 'B', 'E', 'C#', 'C#', 'E', 'C#']",0.08,0.0,0.01,0.5
48.0,0.3333,17.0,"['D4', 'F#4', 'C#5', 'F#5']","['M3', 'M7', 'M3']","[True, True, True, True]",17.0,3.0,24.0,ii7,True,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
48.25,0.3333,17.0,"['D4', 'F#4', 'C#5', 'F#5']","['M3', 'M7', 'M3']","[False, False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
48.5,0.3333,17.0,"['D4', 'G#4', 'C#5', 'F#5']","['A4', 'M7', 'M3']","[False, False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
48.75,0.3333,17.0,"['D4', 'A4', 'C#5', 'F#5']","['P5', 'M7', 'M3']","[False, False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
49.0,0.3333,17.0,"['D4', 'B4', 'C#5', 'F#5']","['M6', 'M7', 'M3']","[False, True, True, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
49.25,0.3333,17.0,"['D4', 'B4', 'C#5', 'F#5']","['M6', 'M7', 'M3']","[False, False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
49.5,0.3333,17.0,"['D4', 'B4', 'D5', 'F#5']","['M6', 'P1', 'M3']","[False, False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
49.75,0.3333,17.0,"['D4', 'B4', 'C#5', 'F#5']","['M6', 'M7', 'M3']","[False, False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
50.0,0.3333,17.0,"['D4', 'B4', 'F#5']","['M6', 'M3']","[False, True, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
50.25,0.3333,17.0,"['D4', 'B4', 'F#5']","['M6', 'M3']","[False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
50.5,0.3333,17.0,"['D4', 'B4', 'C#5', 'F#5']","['M6', 'M7', 'M3']","[False, False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
50.75,0.3333,17.0,"['D4', 'A4', 'B4', 'F#5']","['P5', 'M6', 'M3']","[False, False, False, False]",17.0,3.0,24.0,ii7,False,"('D', 'F#', 'A', 'B')",D,B,1.0,minor seventh chord,"(2, 6, 9, 11)",A,A,2,None,False,"['D', 'F#', 'C#', 'F#', 'D', 'F#', 'C#', 'F#', 'D', 'G#', 'C#', 'F#', 'D', 'A', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'D', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'B', 'F#', 'D', 'B', 'F#', 'D', 'B', 'C#', 'F#', 'D', 'A', 'B', 'F#']",0.2,0.0,0.04,0.0
51.0,0.3333,18.0,"['C#4', 'D4', 'B4', 'E5']","['m2', 'm7', 'm3']","[True, False, True, True]",18.0,3.0,25.0,V7,True,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
51.25,0.3333,18.0,"['C#4', 'D4', 'B4', 'E5']","['m2', 'm7', 'm3']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
51.5,0.3333,18.0,"['C#4', 'D4', 'B4', 'G#5']","['m2', 'm7', 'P5']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
51.75,0.3333,18.0,"['C#4', 'D4', 'B4', 'F#5']","['m2', 'm7', 'P4']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
52.0,0.3333,18.0,"['D4', 'G#4', 'B4', 'E5']","['A4', 'M6', 'M2']","[False, True, False, True]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
52.25,0.3333,18.0,"['D4', 'G#4', 'B4', 'E5']","['A4', 'M6', 'M2']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
52.5,0.3333,18.0,"['D4', 'G#4', 'B4', 'G#5']","['A4', 'M6', 'A4']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
52.75,0.3333,18.0,"['D4', 'G#4', 'B4', 'A5']","['A4', 'M6', 'P5']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
53.0,0.3333,18.0,"['D4', 'E4', 'B4', 'B5']","['M2', 'M6', 'M6']","[False, True, False, True]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
53.25,0.3333,18.0,"['D4', 'E4', 'B4', 'B5']","['M2', 'M6', 'M6']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
53.5,0.3333,18.0,"['D4', 'E4', 'B4', 'C#6']","['M2', 'M6', 'M7']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
53.75,0.3333,18.0,"['D4', 'E4', 'B4', 'D6']","['M2', 'M6', 'P1']","[False, False, False, False]",18.0,3.0,25.0,V7,False,"('D', 'E', 'G#', 'B')",D,E,3.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'E', 'C#', 'D', 'B', 'G#', 'C#', 'D', 'B', 'F#', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'E', 'D', 'G#', 'B', 'G#', 'D', 'G#', 'B', 'A', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'B', 'D', 'E', 'B', 'C#', 'D', 'E', 'B', 'D']",0.15,0.0,0.02,0.33
54.0,1.0,19.0,"['C#4', 'E4', 'A4', 'E6']","['m3', 'm6', 'm3']","[True, False, True, True]",19.0,3.0,26.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
54.25,1.0,19.0,"['C#4', 'E4', 'A4', 'E6']","['m3', 'm6', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
54.5,1.0,19.0,"['C#4', 'E4', 'A4', 'E6']","['m3', 'm6', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
54.75,1.0,19.0,"['C#4', 'E4', 'A4', 'E6']","['m3', 'm6', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
55.0,1.0,19.0,"['C#4', 'A4', 'C#5', 'E6']","['m6', 'P1', 'm3']","[False, True, True, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
55.25,1.0,19.0,"['C#4', 'A4', 'C#5', 'E6']","['m6', 'P1', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
55.5,1.0,19.0,"['C#4', 'A4', 'C#5', 'E6']","['m6', 'P1', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
55.75,1.0,19.0,"['C#4', 'A4', 'C#5', 'E6']","['m6', 'P1', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
56.0,1.0,19.0,"['C#4', 'A4', 'E5', 'E6']","['m6', 'm3', 'm3']","[False, True, True, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
56.25,1.0,19.0,"['C#4', 'A4', 'E5', 'E6']","['m6', 'm3', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
56.5,1.0,19.0,"['C#4', 'A4', 'E5', 'E6']","['m6', 'm3', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
56.75,1.0,19.0,"['C#4', 'A4', 'E5', 'E6']","['m6', 'm3', 'm3']","[False, False, False, False]",19.0,3.0,26.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'E', 'A', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E', 'C#', 'A', 'E', 'E']",0.0,0.0,0.0,0.0
57.0,1.0,20.0,"['D4', 'A4', 'F#5', 'E6']","['P5', 'M3', 'M2']","[True, True, True, False]",20.0,3.0,27.0,IV,True,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
57.25,1.0,20.0,"['D4', 'A4', 'F#5', 'E6']","['P5', 'M3', 'M2']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
57.5,1.0,20.0,"['D4', 'A4', 'F#5', 'E6']","['P5', 'M3', 'M2']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
57.75,1.0,20.0,"['D4', 'A4', 'F#5', 'E6']","['P5', 'M3', 'M2']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
58.0,1.0,20.0,"['D4', 'A4', 'F#5', 'D6']","['P5', 'M3', 'P1']","[False, False, False, True]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
58.25,1.0,20.0,"['D4', 'A4', 'F#5', 'D6']","['P5', 'M3', 'P1']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
58.5,1.0,20.0,"['D4', 'A4', 'F#5', 'D6']","['P5', 'M3', 'P1']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
58.75,1.0,20.0,"['D4', 'A4', 'F#5', 'D6']","['P5', 'M3', 'P1']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
59.0,1.0,20.0,"['D4', 'A#4', 'F#5', 'C#6']","['A5', 'M3', 'M7']","[False, True, False, True]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
59.25,1.0,20.0,"['D4', 'A#4', 'F#5', 'C#6']","['A5', 'M3', 'M7']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
59.5,1.0,20.0,"['D4', 'A#4', 'F#5', 'C#6']","['A5', 'M3', 'M7']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
59.75,1.0,20.0,"['D4', 'A#4', 'F#5', 'C#6']","['A5', 'M3', 'M7']","[False, False, False, False]",20.0,3.0,27.0,IV,False,"('D', 'F#', 'A')",D,D,0.0,major triad,"(2, 6, 9)",A,A,4,None,False,"['D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'E', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A', 'F#', 'D', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#', 'D', 'A#', 'F#', 'C#']",0.25,0.0,0.06,0.0
60.0,1.0,21.0,"['D4', 'B4', 'F#5', 'D6']","['M6', 'M3', 'P1']","[False, True, False, True]",21.0,3.0,28.0,ii,True,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
60.25,1.0,21.0,"['D4', 'B4', 'F#5', 'D6']","['M6', 'M3', 'P1']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
60.5,1.0,21.0,"['D4', 'B4', 'F#5', 'D6']","['M6', 'M3', 'P1']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
60.75,1.0,21.0,"['D4', 'B4', 'F#5', 'D6']","['M6', 'M3', 'P1']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
61.0,1.0,21.0,"['C#4', 'B4', 'E5', 'D6']","['m7', 'm3', 'm2']","[True, False, True, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
61.25,1.0,21.0,"['C#4', 'B4', 'E5', 'D6']","['m7', 'm3', 'm2']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
61.5,1.0,21.0,"['C#4', 'B4', 'E5', 'D6']","['m7', 'm3', 'm2']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
61.75,1.0,21.0,"['C#4', 'B4', 'E5', 'D6']","['m7', 'm3', 'm2']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
62.0,1.0,21.0,"['B3', 'B4', 'D5', 'D6']","['P1', 'm3', 'm3']","[True, False, True, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
62.25,1.0,21.0,"['B3', 'B4', 'D5', 'D6']","['P1', 'm3', 'm3']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
62.5,1.0,21.0,"['B3', 'B4', 'D5', 'D6']","['P1', 'm3', 'm3']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
62.75,1.0,21.0,"['B3', 'B4', 'D5', 'D6']","['P1', 'm3', 'm3']","[False, False, False, False]",21.0,3.0,28.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'D', 'B', 'F#', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D', 'B', 'B', 'D', 'D']",0.17,0.0,0.03,0.67
63.0,1.0,22.0,"['C#4', 'B4', 'E5', 'D6']","['m7', 'm3', 'm2']","[True, False, True, False]",22.0,3.0,29.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
63.25,1.0,22.0,"['C#4', 'B4', 'E5', 'D6']","['m7', 'm3', 'm2']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
63.5,1.0,22.0,"['C#4', 'B4', 'E5', 'D6']","['m7', 'm3', 'm2']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
63.75,1.0,22.0,"['C#4', 'B4', 'E5', 'D6']","['m7', 'm3', 'm2']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
64.0,1.0,22.0,"['C#4', 'A4', 'E5', 'C#6']","['m6', 'm3', 'P1']","[False, True, False, True]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
64.25,1.0,22.0,"['C#4', 'A4', 'E5', 'C#6']","['m6', 'm3', 'P1']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
64.5,1.0,22.0,"['C#4', 'A4', 'E5', 'C#6']","['m6', 'm3', 'P1']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
64.75,1.0,22.0,"['C#4', 'A4', 'E5', 'C#6']","['m6', 'm3', 'P1']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
65.0,1.0,22.0,"['C#4', 'G#4', 'E5', 'B#5']","['P5', 'm3', 'M7']","[False, True, False, True]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
65.25,1.0,22.0,"['C#4', 'G#4', 'E5', 'B#5']","['P5', 'm3', 'M7']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
65.5,1.0,22.0,"['C#4', 'G#4', 'E5', 'B#5']","['P5', 'm3', 'M7']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
65.75,1.0,22.0,"['C#4', 'G#4', 'E5', 'B#5']","['P5', 'm3', 'M7']","[False, False, False, False]",22.0,3.0,29.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'B', 'E', 'D', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#', 'C#', 'G#', 'E', 'B#']",0.33,0.0,0.11,0.0
66.0,1.0,23.0,"['C#4', 'A4', 'E5', 'C#6']","['m6', 'm3', 'P1']","[False, True, False, True]",23.0,3.0,30.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
66.25,1.0,23.0,"['C#4', 'A4', 'E5', 'C#6']","['m6', 'm3', 'P1']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
66.5,1.0,23.0,"['C#4', 'A4', 'E5', 'C#6']","['m6', 'm3', 'P1']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
66.75,1.0,23.0,"['C#4', 'A4', 'E5', 'C#6']","['m6', 'm3', 'P1']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
67.0,1.0,23.0,"['B3', 'A4', 'D5', 'C#6']","['m7', 'm3', 'M2']","[True, False, True, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
67.25,1.0,23.0,"['B3', 'A4', 'D5', 'C#6']","['m7', 'm3', 'M2']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
67.5,1.0,23.0,"['B3', 'A4', 'D5', 'C#6']","['m7', 'm3', 'M2']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
67.75,1.0,23.0,"['B3', 'A4', 'D5', 'C#6']","['m7', 'm3', 'M2']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
68.0,1.0,23.0,"['A3', 'A4', 'C#5', 'C#6']","['P1', 'M3', 'M3']","[True, False, True, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
68.25,1.0,23.0,"['A3', 'A4', 'C#5', 'C#6']","['P1', 'M3', 'M3']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
68.5,1.0,23.0,"['A3', 'A4', 'C#5', 'C#6']","['P1', 'M3', 'M3']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
68.75,1.0,23.0,"['A3', 'A4', 'C#5', 'C#6']","['P1', 'M3', 'M3']","[False, False, False, False]",23.0,3.0,30.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'C#', 'A', 'E', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#', 'A', 'A', 'C#', 'C#']",0.17,0.0,0.03,0.67
69.0,0.3333,24.0,"['B3', 'A4', 'D5', 'C#6']","['m7', 'm3', 'M2']","[True, False, True, False]",24.0,3.0,31.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
69.25,0.3333,24.0,"['B3', 'A4', 'D5', 'C#6']","['m7', 'm3', 'M2']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
69.5,0.3333,24.0,"['B3', 'A4', 'D5', 'B#5']","['m7', 'm3', 'A1']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
69.75,0.3333,24.0,"['B3', 'A4', 'D5', 'C#6']","['m7', 'm3', 'M2']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
70.0,0.3333,24.0,"['B3', 'G#4', 'D5', 'D6']","['M6', 'm3', 'm3']","[False, True, False, True]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
70.25,0.3333,24.0,"['B3', 'G#4', 'D5', 'D6']","['M6', 'm3', 'm3']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
70.5,0.3333,24.0,"['B3', 'G#4', 'D5', 'C#6']","['M6', 'm3', 'M2']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
70.75,0.3333,24.0,"['B3', 'G#4', 'D5', 'B5']","['M6', 'm3', 'P1']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
71.0,0.3333,24.0,"['B3', 'G#4', 'D5', 'A5']","['M6', 'm3', 'm7']","[False, True, False, True]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
71.25,0.3333,24.0,"['B3', 'G#4', 'D5', 'A5']","['M6', 'm3', 'm7']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
71.5,0.3333,24.0,"['B3', 'G#4', 'D5', 'G#5']","['M6', 'm3', 'M6']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
71.75,0.3333,24.0,"['B3', 'G#4', 'D5', 'F#5']","['M6', 'm3', 'P5']","[False, False, False, False]",24.0,3.0,31.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['B', 'A', 'D', 'C#', 'B', 'A', 'D', 'C#', 'B', 'A', 'D', 'B#', 'B', 'A', 'D', 'C#', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'D', 'B', 'G#', 'D', 'C#', 'B', 'G#', 'D', 'B', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'A', 'B', 'G#', 'D', 'G#', 'B', 'G#', 'D', 'F#']",0.79,0.33,1.27,1.0
72.0,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[True, True, False, True]",25.0,3.0,32.0,V7,True,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
72.25,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
72.5,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
72.75,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
73.0,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, True]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
73.25,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
73.5,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
73.75,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
74.0,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, True]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
74.25,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
74.5,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
74.75,1.0,25.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",25.0,3.0,32.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
75.0,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, True]",26.0,3.0,33.0,V7,True,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
75.25,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
75.5,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
75.75,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
76.0,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
76.25,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
76.5,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
76.75,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
77.0,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
77.25,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
77.5,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
77.75,3.0,26.0,"['B3', 'G#4', 'D5', 'E5']","['M6', 'm3', 'P4']","[False, False, False, False]",26.0,3.0,33.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E', 'B', 'G#', 'D', 'E']",0.0,0.0,0.0,0.0
78.0,1.0,27.0,"['C#4', 'A4', 'C#5', 'E5']","['m6', 'P1', 'm3']","[True, True, True, False]",27.0,3.0,34.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
78.25,1.0,27.0,"['C#4', 'A4', 'C#5', 'E5']","['m6', 'P1', 'm3']","[False, False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
78.5,1.0,27.0,"['C#4', 'A4', 'C#5', 'E5']","['m6', 'P1', 'm3']","[False, False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
78.75,1.0,27.0,"['C#4', 'A4', 'C#5', 'E5']","['m6', 'P1', 'm3']","[False, False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
79.0,1.0,27.0,"['C#4', 'A4', 'A5']","['m6', 'm6']","[False, True, True]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
79.25,1.0,27.0,"['C#4', 'A4', 'A5']","['m6', 'm6']","[False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
79.5,1.0,27.0,"['C#4', 'A4', 'A5']","['m6', 'm6']","[False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
79.75,1.0,27.0,"['C#4', 'A4', 'A5']","['m6', 'm6']","[False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
80.0,1.0,27.0,"['C#4', 'A4', 'G#5']","['m6', 'P5']","[False, True, True]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
80.25,1.0,27.0,"['C#4', 'A4', 'G#5']","['m6', 'P5']","[False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
80.5,1.0,27.0,"['C#4', 'A4', 'G#5']","['m6', 'P5']","[False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
80.75,1.0,27.0,"['C#4', 'A4', 'G#5']","['m6', 'P5']","[False, False, False]",27.0,3.0,34.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'C#', 'E', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#', 'C#', 'A', 'G#']",0.1,0.0,0.01,0.0
81.0,1.0,28.0,"['D4', 'A4', 'G#5']","['P5', 'A4']","[True, True, True]",28.0,2.0,35.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#']",0.67,0.67,1.78,1.0
81.25,1.0,28.0,"['D4', 'A4', 'G#5']","['P5', 'A4']","[False, False, False]",28.0,2.0,35.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#']",0.67,0.67,1.78,1.0
81.5,1.0,28.0,"['D4', 'A4', 'G#5']","['P5', 'A4']","[False, False, False]",28.0,2.0,35.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#']",0.67,0.67,1.78,1.0
81.75,1.0,28.0,"['D4', 'A4', 'G#5']","['P5', 'A4']","[False, False, False]",28.0,2.0,35.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#']",0.67,0.67,1.78,1.0
82.0,1.0,28.0,"['D4', 'A4', 'F#5']","['P5', 'M3']","[False, False, True]",28.0,2.0,35.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#']",0.67,0.67,1.78,1.0
82.25,1.0,28.0,"['D4', 'A4', 'F#5']","['P5', 'M3']","[False, False, False]",28.0,2.0,35.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#']",0.67,0.67,1.78,1.0
82.5,1.0,28.0,"['D4', 'A4', 'F#5']","['P5', 'M3']","[False, False, False]",28.0,2.0,35.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#']",0.67,0.67,1.78,1.0
82.75,1.0,28.0,"['D4', 'A4', 'F#5']","['P5', 'M3']","[False, False, False]",28.0,2.0,35.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'G#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#', 'D', 'A', 'F#']",0.67,0.67,1.78,1.0
83.0,1.0,28.0,"['C#4', 'A4', 'E5']","['m6', 'm3']","[True, False, True]",28.0,1.0,36.0,I,True,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'A', 'E', 'C#', 'A', 'E', 'C#', 'A', 'E']",0.0,0.0,0.0,0.0
83.25,1.0,28.0,"['C#4', 'A4', 'E5']","['m6', 'm3']","[False, False, False]",28.0,1.0,36.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'A', 'E', 'C#', 'A', 'E', 'C#', 'A', 'E']",0.0,0.0,0.0,0.0
83.5,1.0,28.0,"['C#4', 'A4', 'E5']","['m6', 'm3']","[False, False, False]",28.0,1.0,36.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'A', 'E', 'C#', 'A', 'E', 'C#', 'A', 'E']",0.0,0.0,0.0,0.0
83.75,1.0,28.0,"['C#4', 'A4', 'E5']","['m6', 'm3']","[False, False, False]",28.0,1.0,36.0,I,False,"('C#', 'E', 'A')",C#,A,1.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['C#', 'A', 'E', 'C#', 'A', 'E', 'C#', 'A', 'E', 'C#', 'A', 'E']",0.0,0.0,0.0,0.0
84.0,1.0,29.0,"['B3', 'D4', 'G#4', 'E5']","['m3', 'M6', 'P4']","[True, True, True, False]",29.0,2.0,37.0,V7,True,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'D', 'B', 'D', 'G#', 'B']",0.06,0.0,0.0,0.0
84.25,1.0,29.0,"['B3', 'D4', 'G#4', 'E5']","['m3', 'M6', 'P4']","[False, False, False, False]",29.0,2.0,37.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'D', 'B', 'D', 'G#', 'B']",0.06,0.0,0.0,0.0
84.5,1.0,29.0,"['B3', 'D4', 'G#4', 'E5']","['m3', 'M6', 'P4']","[False, False, False, False]",29.0,2.0,37.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'D', 'B', 'D', 'G#', 'B']",0.06,0.0,0.0,0.0
84.75,1.0,29.0,"['B3', 'D4', 'G#4', 'E5']","['m3', 'M6', 'P4']","[False, False, False, False]",29.0,2.0,37.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'D', 'B', 'D', 'G#', 'B']",0.06,0.0,0.0,0.0
85.0,0.3333,29.0,"['B3', 'D4', 'G#4', 'F#5']","['m3', 'M6', 'P5']","[False, False, False, True]",29.0,2.0,37.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'D', 'B', 'D', 'G#', 'B']",0.06,0.0,0.0,0.0
85.25,0.3333,29.0,"['B3', 'D4', 'G#4', 'F#5']","['m3', 'M6', 'P5']","[False, False, False, False]",29.0,2.0,37.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'D', 'B', 'D', 'G#', 'B']",0.06,0.0,0.0,0.0
85.5,0.3333,29.0,"['B3', 'D4', 'G#4', 'D5']","['m3', 'M6', 'm3']","[False, False, False, False]",29.0,2.0,37.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'D', 'B', 'D', 'G#', 'B']",0.06,0.0,0.0,0.0
85.75,0.3333,29.0,"['B3', 'D4', 'G#4', 'B4']","['m3', 'M6', 'P1']","[False, False, False, False]",29.0,2.0,37.0,V7,False,"('B', 'D', 'E', 'G#')",B,E,2.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'E', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'F#', 'B', 'D', 'G#', 'D', 'B', 'D', 'G#', 'B']",0.06,0.0,0.0,0.0
86.0,1.0,29.0,"['A3', 'C#4', 'A4', 'C#5']","['M3', 'P1', 'M3']","[True, True, True, True]",29.0,1.0,38.0,I,True,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#']",0.0,0.33,0.11,0.0
86.25,1.0,29.0,"['A3', 'C#4', 'A4', 'C#5']","['M3', 'P1', 'M3']","[False, False, False, False]",29.0,1.0,38.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#']",0.0,0.33,0.11,0.0
86.5,1.0,29.0,"['A3', 'C#4', 'A4', 'C#5']","['M3', 'P1', 'M3']","[False, False, False, False]",29.0,1.0,38.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#']",0.0,0.33,0.11,0.0
86.75,1.0,29.0,"['A3', 'C#4', 'A4', 'C#5']","['M3', 'P1', 'M3']","[False, False, False, False]",29.0,1.0,38.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#', 'A', 'C#']",0.0,0.33,0.11,0.0
87.0,1.0,30.0,"['D3', 'D4', 'F#4', 'C#5']","['P1', 'M3', 'M7']","[True, True, True, False]",30.0,2.0,39.0,ii,True,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B']",0.12,0.0,0.02,0.0
87.25,1.0,30.0,"['D3', 'D4', 'F#4', 'C#5']","['P1', 'M3', 'M7']","[False, False, False, False]",30.0,2.0,39.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B']",0.12,0.0,0.02,0.0
87.5,1.0,30.0,"['D3', 'D4', 'F#4', 'C#5']","['P1', 'M3', 'M7']","[False, False, False, False]",30.0,2.0,39.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B']",0.12,0.0,0.02,0.0
87.75,1.0,30.0,"['D3', 'D4', 'F#4', 'C#5']","['P1', 'M3', 'M7']","[False, False, False, False]",30.0,2.0,39.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B']",0.12,0.0,0.02,0.0
88.0,1.0,30.0,"['D3', 'D4', 'F#4', 'B4']","['P1', 'M3', 'M6']","[False, False, False, True]",30.0,2.0,39.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B']",0.12,0.0,0.02,0.0
88.25,1.0,30.0,"['D3', 'D4', 'F#4', 'B4']","['P1', 'M3', 'M6']","[False, False, False, False]",30.0,2.0,39.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B']",0.12,0.0,0.02,0.0
88.5,1.0,30.0,"['D3', 'D4', 'F#4', 'B4']","['P1', 'M3', 'M6']","[False, False, False, False]",30.0,2.0,39.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B']",0.12,0.0,0.02,0.0
88.75,1.0,30.0,"['D3', 'D4', 'F#4', 'B4']","['P1', 'M3', 'M6']","[False, False, False, False]",30.0,2.0,39.0,ii,False,"('D', 'F#', 'B')",D,B,1.0,minor triad,"(2, 6, 11)",A,A,2,None,False,"['D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'C#', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B', 'D', 'D', 'F#', 'B']",0.12,0.0,0.02,0.0
89.0,0.3333,30.0,"['E3', 'B3', 'D4', 'D5']","['P5', 'm7', 'm7']","[True, True, True, True]",30.0,1.0,40.0,V7,True,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'B', 'D', 'D', 'E', 'B', 'D', 'D', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'G#']",0.0,0.0,0.0,0.0
89.25,0.3333,30.0,"['E3', 'B3', 'D4', 'D5']","['P5', 'm7', 'm7']","[False, False, False, False]",30.0,1.0,40.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'B', 'D', 'D', 'E', 'B', 'D', 'D', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'G#']",0.0,0.0,0.0,0.0
89.5,0.3333,30.0,"['E3', 'B3', 'D4', 'B4']","['P5', 'm7', 'P5']","[False, False, False, False]",30.0,1.0,40.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'B', 'D', 'D', 'E', 'B', 'D', 'D', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'G#']",0.0,0.0,0.0,0.0
89.75,0.3333,30.0,"['E3', 'B3', 'D4', 'G#4']","['P5', 'm7', 'M3']","[False, False, False, False]",30.0,1.0,40.0,V7,False,"('E', 'G#', 'B', 'D')",E,E,0.0,dominant seventh chord,"(2, 4, 8, 11)",A,A,5,None,False,"['E', 'B', 'D', 'D', 'E', 'B', 'D', 'D', 'E', 'B', 'D', 'B', 'E', 'B', 'D', 'G#']",0.0,0.0,0.0,0.0
90.0,1.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[True, True, True]",31.0,3.0,41.0,I,True,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
90.25,1.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
90.5,1.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
90.75,1.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
91.0,2.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
91.25,2.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
91.5,2.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
91.75,2.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
92.0,2.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
92.25,2.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
92.5,2.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
92.75,2.0,31.0,"['A2', 'C#4', 'A4']","['M3', 'P1']","[False, False, False]",31.0,3.0,41.0,I,False,"('A', 'C#', 'E')",A,A,0.0,major triad,"(1, 4, 9)",A,A,1,None,False,"['A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A', 'A', 'C#', 'A']",0.0,0.33,0.11,0.0
"""
def _load_dfgt(csvGT):
csvGTF = io.StringIO(csvGT)
dfGT = pd.read_csv(csvGTF)
dfGT.set_index("j_offset", inplace=True)
for col in AugmentedNet.joint_parser.J_LISTTYPE_COLUMNS:
dfGT[col] = dfGT[col].apply(eval)
return dfGT
class TestScoreParser(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_haydn_annotation_and_score(self):
dfGT = _load_dfgt(haydnOp20no4iDataFrameGT)
df = AugmentedNet.joint_parser.parseAnnotationAndScore(
haydnOp20no4iAnnotation, haydnOp20no4iScore, fixedOffset=0.25
)
for rowGT, row in zip(dfGT.itertuples(), df.itertuples()):
with self.subTest(gt_index=rowGT.Index, index=row.Index):
self.assertEqual(rowGT._asdict(), row._asdict())
if __name__ == "__main__":
unittest.main()
| 222.024638
| 516
| 0.351143
| 33,365
| 153,197
| 1.61076
| 0.012168
| 0.058054
| 0.048453
| 0.037735
| 0.942876
| 0.928139
| 0.921348
| 0.911933
| 0.904769
| 0.900415
| 0
| 0.117583
| 0.127705
| 153,197
| 689
| 517
| 222.34688
| 0.284585
| 0
| 0
| 0.154532
| 0
| 0.555721
| 0.993414
| 0.282525
| 0
| 0
| 0
| 0
| 0.001486
| 1
| 0.004458
| false
| 0
| 0.005944
| 0
| 0.013373
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.