hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
deaac779860688359de12a6c954d1c5c707b8a6f
| 12,985
|
py
|
Python
|
tests/augmentation/test_transforms.py
|
techthiyanes/textacy
|
c7a5e1f881a3df63a89991accefcbd375ede5353
|
[
"Apache-2.0"
] | null | null | null |
tests/augmentation/test_transforms.py
|
techthiyanes/textacy
|
c7a5e1f881a3df63a89991accefcbd375ede5353
|
[
"Apache-2.0"
] | null | null | null |
tests/augmentation/test_transforms.py
|
techthiyanes/textacy
|
c7a5e1f881a3df63a89991accefcbd375ede5353
|
[
"Apache-2.0"
] | null | null | null |
from textacy.augmentation import transforms
from textacy.augmentation import utils as aug_utils
from textacy.types import AugTok
import pytest
@pytest.fixture(scope="module")
def aug_toks(doc_en):
return aug_utils.to_aug_toks(doc_en)
@pytest.mark.skipif(
aug_utils.concept_net.filepath is None,
reason="ConceptNet resource must be downloaded before running tests",
)
class TestSubstituteWordSynonyms:
def test_noop(self, aug_toks):
for num in [0, 0.0]:
new_aug_toks = transforms.substitute_word_synonyms(aug_toks, num=num)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks):
assert aug_tok.text == new_aug_tok.text
def test_num_int(self, aug_toks):
for num in [1, 3]:
new_aug_toks = transforms.substitute_word_synonyms(aug_toks, num=num)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) == len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
assert any(
aug_tok.text != new_aug_tok.text
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
def test_num_float(self, aug_toks):
for num in [0.1, 0.3]:
_ = transforms.substitute_word_synonyms(aug_toks, num=num)
def test_pos(self, aug_toks):
for pos in ["NOUN", ("NOUN", "VERB", "ADJ", "ADV")]:
new_aug_toks = transforms.substitute_word_synonyms(aug_toks, num=1, pos=pos)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) == len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
assert any(
aug_tok.text != new_aug_tok.text
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
def test_errors(self, aug_toks):
for num in [-1, 2.0]:
with pytest.raises(ValueError):
_ = transforms.substitute_word_synonyms(aug_toks, num=num)
for obj in [["foo", "bar"], "foo bar"]:
with pytest.raises(TypeError):
_ = transforms.substitute_word_synonyms(obj, num=1)
@pytest.mark.skipif(
aug_utils.concept_net.filepath is None,
reason="ConceptNet resource must be downloaded before running tests",
)
class TestInsertWordSynonyms:
def test_noop(self, aug_toks):
for num in [0, 0.0]:
new_aug_toks = transforms.insert_word_synonyms(aug_toks, num=num)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks):
assert aug_tok.text == new_aug_tok.text
def test_num_int(self, aug_toks):
for num in [1, 3]:
new_aug_toks = transforms.insert_word_synonyms(aug_toks, num=num)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) > len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
def test_num_float(self, aug_toks):
for num in [0.1, 0.3]:
_ = transforms.insert_word_synonyms(aug_toks, num=num)
def test_pos(self, aug_toks):
for pos in ["NOUN", ("NOUN", "VERB", "ADJ", "ADV")]:
new_aug_toks = transforms.insert_word_synonyms(aug_toks, num=1, pos=pos)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) > len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
def test_errors(self, aug_toks):
for num in [-1, 2.0]:
with pytest.raises(ValueError):
_ = transforms.insert_word_synonyms(aug_toks, num=num)
for obj in [["foo", "bar"], "foo bar"]:
with pytest.raises(TypeError):
_ = transforms.insert_word_synonyms(obj, num=1)
class TestSwapWords:
def test_noop(self, aug_toks):
for num in [0, 0.0]:
new_aug_toks = transforms.swap_words(aug_toks, num=num)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks):
assert aug_tok.text == new_aug_tok.text
def test_num_int(self, aug_toks):
for num in [1, 3]:
new_aug_toks = transforms.swap_words(aug_toks, num=num)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) == len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
assert any(
aug_tok.text != new_aug_tok.text
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
def test_num_float(self, aug_toks):
for num in [0.1, 0.3]:
_ = transforms.swap_words(aug_toks, num=num)
def test_pos(self, aug_toks):
for pos in ["NOUN", ("NOUN", "VERB", "ADJ", "ADV")]:
new_aug_toks = transforms.swap_words(aug_toks, num=1, pos=pos)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) == len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
assert any(
aug_tok.text != new_aug_tok.text
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
def test_errors(self, aug_toks):
for num in [-1, 2.0]:
with pytest.raises(ValueError):
_ = transforms.swap_words(aug_toks, num=num)
for obj in [["foo", "bar"], "foo bar"]:
with pytest.raises(TypeError):
_ = transforms.swap_words(obj, num=1)
class TestDeleteWords:
def test_noop(self, aug_toks):
for num in [0, 0.0]:
new_aug_toks = transforms.delete_words(aug_toks, num=num)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks):
assert aug_tok.text == new_aug_tok.text
def test_num_int(self, aug_toks):
for num in [1, 3]:
new_aug_toks = transforms.delete_words(aug_toks, num=num)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) < len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
def test_num_float(self, aug_toks):
for num in [0.1, 0.3]:
_ = transforms.delete_words(aug_toks, num=num)
def test_pos(self, aug_toks):
for pos in ["NOUN", ("NOUN", "VERB", "ADJ", "ADV")]:
new_aug_toks = transforms.delete_words(aug_toks, num=1, pos=pos)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) < len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
def test_errors(self, aug_toks):
for num in [-1, 2.0]:
with pytest.raises(ValueError):
_ = transforms.delete_words(aug_toks, num=num)
for obj in [["foo", "bar"], "foo bar"]:
with pytest.raises(TypeError):
_ = transforms.delete_words(obj, num=1)
@pytest.mark.skipif(
aug_utils.udhr.index is None,
reason="UDHR dataset must be downloaded before running tests",
)
class TestSubstituteChars:
def test_noop(self, aug_toks):
for num in [0, 0.0]:
new_aug_toks = transforms.substitute_chars(aug_toks, num=num, lang="en")
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks):
assert aug_tok.text == new_aug_tok.text
def test_num_int(self, aug_toks):
# using higher nums here to prevent the very unlikely case
# that all characters are substituted by the same character
for num in [3, 5]:
new_aug_toks = transforms.substitute_chars(aug_toks, num=num, lang="en")
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) == len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
assert any(
aug_tok.text != new_aug_tok.text
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
assert all(
len(aug_tok.text) == len(new_aug_tok.text)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
def test_num_float(self, aug_toks):
for num in [0.1, 0.3]:
_ = transforms.substitute_chars(aug_toks, num=num, lang="en")
def test_errors(self, aug_toks):
for num in [-1, 2.0]:
with pytest.raises(ValueError):
_ = transforms.substitute_chars(aug_toks, num=num, lang="en")
for obj in [["foo", "bar"], "foo bar"]:
with pytest.raises(TypeError):
_ = transforms.substitute_chars(obj, num=1, lang="en")
@pytest.mark.skipif(
aug_utils.udhr.index is None,
reason="UDHR dataset must be downloaded before running tests",
)
class TestInsertChars:
def test_noop(self, aug_toks):
for num in [0, 0.0]:
new_aug_toks = transforms.insert_chars(aug_toks, num=num, lang="en")
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks):
assert aug_tok.text == new_aug_tok.text
def test_num_int(self, aug_toks):
for num in [1, 3]:
new_aug_toks = transforms.insert_chars(aug_toks, num=num, lang="en")
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) == len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
assert all(
(
aug_tok.text == new_aug_tok.text
or len(aug_tok.text) < len(new_aug_tok.text)
)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
def test_num_float(self, aug_toks):
for num in [0.1, 0.3]:
_ = transforms.insert_chars(aug_toks, num=num, lang="en")
def test_errors(self, aug_toks):
for num in [-1, 2.0]:
with pytest.raises(ValueError):
_ = transforms.insert_chars(aug_toks, num=num, lang="en")
for obj in [["foo", "bar"], "foo bar"]:
with pytest.raises(TypeError):
_ = transforms.insert_chars(obj, num=1, lang="en")
class TestSwapChars:
def test_noop(self, aug_toks):
for num in [0, 0.0]:
new_aug_toks = transforms.swap_chars(aug_toks, num=num)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks):
assert aug_tok.text == new_aug_tok.text
def test_num_int(self, aug_toks):
# using higher nums here to prevent the very unlikely case
# that all characters are swapped with the same character
for num in [3, 5]:
new_aug_toks = transforms.swap_chars(aug_toks, num=num)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) == len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
assert any(
aug_tok.text != new_aug_tok.text
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
assert all(
len(aug_tok.text) == len(new_aug_tok.text)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
def test_num_float(self, aug_toks):
for num in [0.1, 0.3]:
_ = transforms.swap_chars(aug_toks, num=num)
def test_errors(self, aug_toks):
for num in [-1, 2.0]:
with pytest.raises(ValueError):
_ = transforms.swap_chars(aug_toks, num=num)
for obj in [["foo", "bar"], "foo bar"]:
with pytest.raises(TypeError):
_ = transforms.swap_chars(obj, num=1)
class TestDeleteChars:
def test_noop(self, aug_toks):
for num in [0, 0.0]:
new_aug_toks = transforms.delete_chars(aug_toks, num=num)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks):
assert aug_tok.text == new_aug_tok.text
def test_num_int(self, aug_toks):
for num in [1, 3]:
new_aug_toks = transforms.delete_chars(aug_toks, num=num)
assert isinstance(new_aug_toks, list)
assert len(new_aug_toks) == len(aug_toks)
assert all(isinstance(aug_tok, AugTok) for aug_tok in new_aug_toks)
assert all(
(
aug_tok.text == new_aug_tok.text
or len(aug_tok.text) > len(new_aug_tok.text)
)
for aug_tok, new_aug_tok in zip(aug_toks, new_aug_toks)
)
def test_num_float(self, aug_toks):
for num in [0.1, 0.3]:
_ = transforms.delete_chars(aug_toks, num=num)
def test_errors(self, aug_toks):
for num in [-1, 2.0]:
with pytest.raises(ValueError):
_ = transforms.delete_chars(aug_toks, num=num)
for obj in [["foo", "bar"], "foo bar"]:
with pytest.raises(TypeError):
_ = transforms.delete_chars(obj, num=1)
| 40.451713
| 88
| 0.605853
| 1,877
| 12,985
| 3.907299
| 0.059137
| 0.169894
| 0.1009
| 0.064903
| 0.94396
| 0.939733
| 0.934551
| 0.934551
| 0.932642
| 0.931279
| 0
| 0.011774
| 0.29357
| 12,985
| 320
| 89
| 40.578125
| 0.787747
| 0.017482
| 0
| 0.825279
| 0
| 0
| 0.033247
| 0
| 0
| 0
| 0
| 0
| 0.200743
| 1
| 0.137546
| false
| 0
| 0.01487
| 0.003717
| 0.185874
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ded0a4e990941c37d93014c7049360fcdac56d95
| 202
|
py
|
Python
|
18 Spring/4511W Intro to Artifical Intelligence/Homework/hw3/test_nq_csp.py
|
oway13/Schoolwork
|
294f407c288ef532f8f187a6ee0bd9fd0e7559ab
|
[
"MIT"
] | null | null | null |
18 Spring/4511W Intro to Artifical Intelligence/Homework/hw3/test_nq_csp.py
|
oway13/Schoolwork
|
294f407c288ef532f8f187a6ee0bd9fd0e7559ab
|
[
"MIT"
] | null | null | null |
18 Spring/4511W Intro to Artifical Intelligence/Homework/hw3/test_nq_csp.py
|
oway13/Schoolwork
|
294f407c288ef532f8f187a6ee0bd9fd0e7559ab
|
[
"MIT"
] | null | null | null |
from csp import *
import pytest
#backtracking_search(NQueensCSP(11))
#backtracking_search(NQueensCSP(20))
#min_conflicts(NQueensCSP(11))
min_conflicts(NQueensCSP(2000))
#min_conflicts(NQueensCSP(40))
| 20.2
| 36
| 0.811881
| 25
| 202
| 6.36
| 0.52
| 0.226415
| 0.415094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 0.064356
| 202
| 9
| 37
| 22.444444
| 0.777778
| 0.633663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a0e923c22ef3a1e8b360428bc7cb37dbe236aeba
| 7
|
py
|
Python
|
package-test/spam-p2/spam/package1.py
|
plant99/import-system-talk-resources
|
a48620ee8e6eda5c3a1c09a708804770781d4bea
|
[
"MIT"
] | 1
|
2020-08-20T16:37:49.000Z
|
2020-08-20T16:37:49.000Z
|
package-test/spam1/package1.py
|
plant99/import-system-talk-resources
|
a48620ee8e6eda5c3a1c09a708804770781d4bea
|
[
"MIT"
] | null | null | null |
package-test/spam1/package1.py
|
plant99/import-system-talk-resources
|
a48620ee8e6eda5c3a1c09a708804770781d4bea
|
[
"MIT"
] | 1
|
2020-08-20T16:38:22.000Z
|
2020-08-20T16:38:22.000Z
|
x = 3
| 2.333333
| 5
| 0.285714
| 2
| 7
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.571429
| 7
| 2
| 6
| 3.5
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9d0ca67cc2295067864bc6db0b57286043265732
| 16,356
|
py
|
Python
|
hchztests/tests/test_website_tool_session.py
|
codedsk/hubcheck-hubzero-tests
|
89dd7164fed9161a5bf80e0a5635cec3da5be31d
|
[
"MIT"
] | 1
|
2016-01-02T01:36:14.000Z
|
2016-01-02T01:36:14.000Z
|
hchztests/tests/test_website_tool_session.py
|
codedsk/hubcheck-hubzero-tests
|
89dd7164fed9161a5bf80e0a5635cec3da5be31d
|
[
"MIT"
] | null | null | null |
hchztests/tests/test_website_tool_session.py
|
codedsk/hubcheck-hubzero-tests
|
89dd7164fed9161a5bf80e0a5635cec3da5be31d
|
[
"MIT"
] | null | null | null |
import pytest
import hubcheck
from hubcheck.testcase import TestCase2
from hubcheck.shell import ContainerManager
pytestmark = [ pytest.mark.website,
pytest.mark.tool_session,
pytest.mark.weekly,
pytest.mark.upgrade,
pytest.mark.prod_safe_upgrade,
pytest.mark.reboot,
]
class TestToolSessionApp(TestCase2):
def setup_method(self,method):
# start up a tool session container
self.hubname = self.testdata.find_url_for('https')
self.username,self.userpass = \
self.testdata.find_account_for('registeredworkspace')
self.cm = ContainerManager()
self.ws = self.cm.access(host=self.hubname,
username=self.username,
password=self.userpass)
self.session_number,es = self.ws.execute('echo $SESSION')
self.ws.close()
# setup a web browser
self.browser.get(self.https_authority)
self.utils.account.login_as(self.username,self.userpass)
self.po = self.catalog.load_pageobject('ToolSessionPage',
'workspace',int(self.session_number))
self.po.goto_page()
def teardown_method(self,method):
# get out of the workspace
# shut down the ssh connection
self.cm.sync_open_sessions(self.hubname,self.username)
def test_terminate_container(self):
"""
test pressing the terminate button on the app
"""
# press the terminate button
self.po.app.do_terminate()
# check that the container terminated
po = self.catalog.load_pageobject('MembersDashboardPage')
po.goto_page()
open_sessions = po.modules.my_sessions.get_session_numbers()
assert int(self.session_number) not in open_sessions,\
"after terminating session %s," % (self.session_number) \
+ " session still listed as open in my_sessions module"
def test_keep_container(self):
"""
test pressing the keep button on the app
"""
# press the keep button
self.po.app.do_keep()
# check that the container is still open
po = self.catalog.load_pageobject('MembersDashboardPage')
po.goto_page()
open_sessions = po.modules.my_sessions.get_session_numbers()
assert int(self.session_number) in open_sessions,\
"after keeping session %s," % (self.session_number) \
+ " session not listed as open in my_sessions module"
# def test_popout_container(self):
# """
# test pressing the popout button on the app to popout the app
# """
#
# browser = self.browser._browser
#
# # get current window info
# url1 = browser.current_url
# current_window = browser.current_window_handle
#
# # press the popout button
# self.po.app.do_popout()
#
# # find the popup window
# other_window = None
# for w in browser.window_handles:
# if w != current_window:
# other_window = w
#
# assert other_window is not None, \
# "after pressing the popout button, no window popped out"
#
#
# def test_popout_container_close(self):
# """
# test closing the popped out app does not end the session
# """
#
# browser = self.browser._browser
#
# # get current window info
# url1 = browser.current_url
# current_window = browser.current_window_handle
#
# # press the popout button
# self.po.app.do_popout()
#
# # find the popup window
# other_window = None
# for w in browser.window_handles:
# if w != current_window:
# other_window = w
#
# assert other_window is not None, \
# "after pressing the popout button, no window popped out"
#
# # switch to the popup window
# browser.switch_to_window(other_window)
#
# # close the popup window
# browser.close()
# browser.switch_to_window(current_window)
#
# # check that the container is still open
# po = self.catalog.load_pageobject('MembersDashboardPage')
# po.goto_page()
# open_sessions = po.modules.my_sessions.get_session_numbers()
#
# assert int(self.session_number) in open_sessions,\
# "after closing popped out app," \
# + " session %s not listed as open in my_sessions module" \
# % (self.session_number)
#
#
# def test_popout_container_popin(self):
# """
# test popping-in a popped out app
# """
#
# browser = self.browser._browser
#
# # get current window info
# url1 = browser.current_url
# current_window = browser.current_window_handle
#
# # press the popout button
# self.po.app.do_popout()
#
# # find the popup window
# other_window = None
# for w in browser.window_handles:
# if w != current_window:
# other_window = w
#
# assert other_window is not None, \
# "after pressing the popout button, no window popped out"
#
# # pop the container back in the browser
# self.po.app.do_popout()
#
# # make sure the popped-out window closes
# other_window = None
# for w in browser.window_handles:
# if w != current_window:
# other_window = w
#
# assert other_window is None, \
# "after pressing the 'pop in' button," \
# + " the popped out window still exists"
#
# # check that the container is still open
# po = self.catalog.load_pageobject('MembersDashboardPage')
# po.goto_page()
# open_sessions = po.modules.my_sessions.get_session_numbers()
#
# assert int(self.session_number) in open_sessions,\
# "after popping in the tool session container app," \
# + " session %s not listed as open in my_sessions module" \
# % (self.session_number)
@hubcheck.utils.hub_version(min_version='1.1.2')
@pytest.mark.user_storage
def test_storage_meter(self):
"""
retrieve the free storage amount
"""
storage_amount = self.po.app.storage.storage_meter()
assert storage_amount != '', \
"invalid storage amount returned: %s" % (storage_amount)
assert storage_amount != '0% of 0GB', \
"user quotas not activated: storage_amount = %s" % (storage_amount)
class TestToolSessionShare(TestCase2):
def setup_method(self,method):
# start up a tool session container
self.hubname = self.testdata.find_url_for('https')
self.username,self.userpass = \
self.testdata.find_account_for('registeredworkspace')
self.cm = ContainerManager()
self.ws = self.cm.access(host=self.hubname,
username=self.username,
password=self.userpass)
self.session_number,es = self.ws.execute('echo $SESSION')
self.ws.close()
# setup a web browser
self.browser.get(self.https_authority)
self.utils.account.login_as(self.username,self.userpass)
self.po = self.catalog.load_pageobject('ToolSessionPage',
'workspace',int(self.session_number))
self.po.goto_page()
def teardown_method(self,method):
# disconnect all users from workspace
self.po.goto_page()
self.po.share.disconnect_all()
def test_share_session_with_1(self):
"""
test sharing the session with nobody
"""
shared_with_1 = self.po.share.get_shared_with()
self.po.share.share.click()
self.po.share.wait_for_overlay()
shared_with_2 = self.po.share.get_shared_with()
assert len(shared_with_1) == len(shared_with_2), \
"after pressing the share button, shared list changed: " \
+ "before: %s, after: %s" % (shared_with_1,shared_with_2)
s1 = set(shared_with_1)
s2 = set(shared_with_2)
s_union = s1 | s2
assert len(s_union) == len(shared_with_1), \
"after pressing the share button, shared list changed: " \
+ "before: %s, after: %s" % (shared_with_1,shared_with_2)
def test_share_session_with_2(self):
"""
test sharing the session with another user
"""
shared_with_1 = self.po.share.get_shared_with()
username2,junk = \
self.testdata.find_account_for('purdueworkspace')
user2_data = self.testdata.get_userdata_for(username2)
user2_name = '{0} {1}'.format(user2_data.firstname,user2_data.lastname)
self.po.share.share_session_with(username2)
shared_with_2 = self.po.share.get_shared_with()
assert len(shared_with_1)+1 == len(shared_with_2), \
"after sharing the session, wrong # users listed: " \
+ "before: %s, after: %s" % (shared_with_1,shared_with_2)
assert user2_name in shared_with_2, \
"after sharing session with %s, user %s" % (username2,user2_name) \
+ " does not show up in shared with list: %s" % (shared_with_2)
def test_share_session_with_3(self):
"""
test sharing the session with a fake user
"""
shared_with_1 = self.po.share.get_shared_with()
self.po.share.share_session_with('fakeuserthatshouldnotexist')
shared_with_2 = self.po.share.get_shared_with()
assert len(shared_with_1) == len(shared_with_2), \
"after sharing the session with a fake user, shared list changed: " \
+ "before: %s, after: %s" % (shared_with_1,shared_with_2)
s1 = set(shared_with_1)
s2 = set(shared_with_2)
s_union = s1 | s2
assert len(s_union) == len(shared_with_1), \
"after sharing the session with a fake user, shared list changed: " \
+ "before: %s, after: %s" % (shared_with_1,shared_with_2)
# def test_share_session_with_4(self):
# """
# test sharing the session with a group
# """
#
# self.po.share.share_session_with(group=0)
# def test_share_session_with_5(self):
# """
# test sharing the session with another user, read only
# """
#
# shared_with_1 = self.po.share.get_shared_with()
#
# username2,junk = \
# self.testdata.find_account_for('purdueworkspace')
# user2_data = self.testdata.get_userdata_for(username2)
# user2_name = '{0} {1}'.format(user2_data.firstname,user2_data.lastname)
# self.po.share.share_session_with(username2,readonly=True)
#
# shared_with_2 = self.po.share.get_shared_with()
#
# assert len(shared_with_1)+1 == len(shared_with_2), \
# "after sharing the session, wrong # users listed: " \
# + "before: %s, after: %s" % (shared_with_1,shared_with_2)
#
# assert user2_name in shared_with_2, \
# "after sharing session with %s, user %s" % (username2,user2_name) \
# + " does not show up in shared with list: %s" % (shared_with_2)
#
# # check if the user was added to the list with the "read only" property
def test_share_session_with_6(self):
"""
test sharing the session with another user twice
user should only show up once in list
"""
shared_with_1 = self.po.share.get_shared_with()
username2,junk = \
self.testdata.find_account_for('purdueworkspace')
user2_data = self.testdata.get_userdata_for(username2)
user2_name = '{0} {1}'.format(user2_data.firstname,user2_data.lastname)
self.po.share.share_session_with(username2)
self.po.share.share_session_with(username2)
shared_with_2 = self.po.share.get_shared_with()
assert len(shared_with_1)+1 == len(shared_with_2), \
"after sharing the session, wrong # users listed: " \
+ "before: %s, after: %s" % (shared_with_1,shared_with_2)
assert user2_name in shared_with_2, \
"after sharing session with %s, user %s" % (username2,user2_name) \
+ " does not show up in shared with list: %s" % (shared_with_2)
def test_share_session_with_7(self):
"""
test sharing the session with multiple users
"""
shared_with_1 = self.po.share.get_shared_with()
username2,junk = \
self.testdata.find_account_for('purdueworkspace')
user2_data = self.testdata.get_userdata_for(username2)
user2_name = '{0} {1}'.format(user2_data.firstname,user2_data.lastname)
username3,junk = \
self.testdata.find_account_for('networkworkspace')
user3_data = self.testdata.get_userdata_for(username3)
user3_name = '{0} {1}'.format(user3_data.firstname,user3_data.lastname)
self.po.share.share_session_with([username2,username3])
shared_with_2 = self.po.share.get_shared_with()
assert len(shared_with_1)+2 == len(shared_with_2), \
"after sharing the session, wrong # users listed: " \
+ "before: %s, after: %s" % (shared_with_1,shared_with_2)
assert user2_name in shared_with_2, \
"after sharing session with %s, user %s" % (username2,user2_name) \
+ " does not show up in shared with list: %s" % (shared_with_2)
assert user3_name in shared_with_2, \
"after sharing session with %s, user %s" % (username3,user3_name) \
+ " does not show up in shared with list: %s" % (shared_with_2)
def test_share_session_with_8(self):
"""
test sharing the session with multiple users, one at a time
"""
shared_with_1 = self.po.share.get_shared_with()
username2,junk = \
self.testdata.find_account_for('purdueworkspace')
user2_data = self.testdata.get_userdata_for(username2)
user2_name = '{0} {1}'.format(user2_data.firstname,user2_data.lastname)
username3,junk = \
self.testdata.find_account_for('networkworkspace')
user3_data = self.testdata.get_userdata_for(username3)
user3_name = '{0} {1}'.format(user3_data.firstname,user3_data.lastname)
self.po.share.share_session_with([username2])
self.po.share.share_session_with([username3])
shared_with_2 = self.po.share.get_shared_with()
assert len(shared_with_1)+2 == len(shared_with_2), \
"after sharing the session, wrong # users listed: " \
+ "before: %s, after: %s" % (shared_with_1,shared_with_2)
assert user2_name in shared_with_2, \
"after sharing session with %s, user %s" % (username2,user2_name) \
+ " does not show up in shared with list: %s" % (shared_with_2)
assert user3_name in shared_with_2, \
"after sharing session with %s, user %s" % (username3,user3_name) \
+ " does not show up in shared with list: %s" % (shared_with_2)
def test_disconnect_1(self):
"""
test disconnecting a connected user from a tool session container
"""
shared_with_1 = self.po.share.get_shared_with()
username2,junk = \
self.testdata.find_account_for('purdueworkspace')
user2_data = self.testdata.get_userdata_for(username2)
user2_name = '{0} {1}'.format(user2_data.firstname,user2_data.lastname)
# share the session with someone
self.po.share.share_session_with(username2)
shared_with_2 = self.po.share.get_shared_with()
assert user2_name in shared_with_2, \
"after sharing session with %s, user does" % (username2) \
+ " not show up in shared with list %s" % (shared_with_2)
# disconnect user from session
self.po.share.disconnect(username2)
# check that user was disconnected
shared_with_3 = self.po.share.get_shared_with()
assert user2_name not in shared_with_3, \
"after unsharing session with %s, user %s" \
% (username2, user2_name) \
+ " still shows up in shared with list: %s" \
% (shared_with_3)
| 33.516393
| 81
| 0.617082
| 2,043
| 16,356
| 4.713656
| 0.103279
| 0.102804
| 0.047975
| 0.024714
| 0.818276
| 0.797092
| 0.77892
| 0.774974
| 0.744548
| 0.732814
| 0
| 0.017613
| 0.281426
| 16,356
| 487
| 82
| 33.585216
| 0.801753
| 0.326119
| 0
| 0.708995
| 0
| 0
| 0.171787
| 0.00245
| 0
| 0
| 0
| 0
| 0.10582
| 1
| 0.074074
| false
| 0.031746
| 0.021164
| 0
| 0.10582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9d3ac23cc6d282b9fc72d6e8931713f96a1a7cd4
| 167
|
py
|
Python
|
caldera/models/__init__.py
|
jvrana/pyro-graphnets
|
1c9809253e47414ecf3f6604c2147d5676ff76c0
|
[
"MIT"
] | null | null | null |
caldera/models/__init__.py
|
jvrana/pyro-graphnets
|
1c9809253e47414ecf3f6604c2147d5676ff76c0
|
[
"MIT"
] | null | null | null |
caldera/models/__init__.py
|
jvrana/pyro-graphnets
|
1c9809253e47414ecf3f6604c2147d5676ff76c0
|
[
"MIT"
] | null | null | null |
from caldera.models.encoder_core_decoder import EncodeCoreDecode
from caldera.models.graph_core import GraphCore
from caldera.models.graph_encoder import GraphEncoder
| 41.75
| 64
| 0.892216
| 22
| 167
| 6.590909
| 0.5
| 0.227586
| 0.351724
| 0.303448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071856
| 167
| 3
| 65
| 55.666667
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c227f05fdd0bf14adfcd1a5af14f61aa3cfdeafc
| 20,814
|
py
|
Python
|
models/vgg_rep.py
|
transmuteAI/Rescaling-CNN-through-Learnable-Repetition-of-Network-Parameters
|
d097d52e7c4d4bb3548da0b56dd61fc6b33282ad
|
[
"MIT"
] | 5
|
2021-01-14T16:39:30.000Z
|
2021-01-20T11:58:17.000Z
|
models/vgg_rep.py
|
transmuteAI/Rescaling-CNN-through-Learnable-Repetition-of-Network-Parameters
|
d097d52e7c4d4bb3548da0b56dd61fc6b33282ad
|
[
"MIT"
] | null | null | null |
models/vgg_rep.py
|
transmuteAI/Rescaling-CNN-through-Learnable-Repetition-of-Network-Parameters
|
d097d52e7c4d4bb3548da0b56dd61fc6b33282ad
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import conv2d
from .conv2d_repeat import Conv2dRepeat
cfg = {
'VGG11' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG10' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 'M'],
'VGG9' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M'],
'VGG8' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 'M'],
'VGG7' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M'],
'VGG6' : [64, 64, 'M', 128, 128, 'M', 256, 'M'],
'VGG5' : [64, 64, 'M', 128, 128, 'M'],
'VGG4' : [64, 64, 'M', 128, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name, num_classes):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.vgg_name = vgg_name
if vgg_name not in ['VGG4','VGG5','VGG6','VGG7']:
self.classifier = nn.Linear(512, num_classes)
elif vgg_name in ['VGG6', 'VGG7']:
self.classifier = nn.Linear(256, num_classes)
else:
self.classifier = nn.Linear(128, num_classes)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
out = self.features(x)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
class CVGG11_4(nn.Module):
def __init__(self, num_classes=10, args=None):
super(CVGG11_4, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = Conv2dRepeat((128,64,3,3), (128,128,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv3_1 = Conv2dRepeat((128,64,3,3), (256,128,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv3_2 = Conv2dRepeat((128,64,3,3), (256,256,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv4_1 = Conv2dRepeat((128,64,3,3), (512,256,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv4_2 = Conv2dRepeat((128,64,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_1 = Conv2dRepeat((128,64,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_2 = Conv2dRepeat((128,64,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(256)
self.bn6 = nn.BatchNorm2d(256)
self.bn7 = nn.BatchNorm2d(512)
self.bn8 = nn.BatchNorm2d(512)
self.bn9 = nn.BatchNorm2d(512)
self.bn10 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.conv1_1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv2_1(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv2_2(x, self.conv2_1.weight)
x = self.bn4(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv3_1(x, self.conv2_1.weight)
x = self.bn5(x)
x = F.relu(x)
x = self.conv3_2(x, self.conv2_1.weight)
x = self.bn6(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv4_1(x, self.conv2_1.weight)
x = self.bn7(x)
x = F.relu(x)
x = self.conv4_2(x, self.conv2_1.weight)
x = self.bn8(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv5_1(x, self.conv2_1.weight)
x = self.bn9(x)
x = F.relu(x)
x = self.conv5_2(x, self.conv2_1.weight)
x = self.bn10(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class CVGG11_5(nn.Module):
def __init__(self, num_classes=10, args=None):
super(CVGG11_5, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = Conv2dRepeat((128,64,3,3), (256,128,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv3_2 = Conv2dRepeat((128,128,3,3), (256,256,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv4_1 = Conv2dRepeat((128,64,3,3), (512,256,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv4_2 = Conv2dRepeat((128,128,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_1 = Conv2dRepeat((128,64,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_2 = Conv2dRepeat((128,128,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(256)
self.bn6 = nn.BatchNorm2d(256)
self.bn7 = nn.BatchNorm2d(512)
self.bn8 = nn.BatchNorm2d(512)
self.bn9 = nn.BatchNorm2d(512)
self.bn10 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.conv1_1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv2_1(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv2_2(x)
x = self.bn4(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv3_1(x, self.conv2_1.weight)
x = self.bn5(x)
x = F.relu(x)
x = self.conv3_2(x, self.conv2_2.weight)
x = self.bn6(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv4_1(x, self.conv2_1.weight)
x = self.bn7(x)
x = F.relu(x)
x = self.conv4_2(x, self.conv2_2.weight)
x = self.bn8(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv5_1(x, self.conv2_1.weight)
x = self.bn9(x)
x = F.relu(x)
x = self.conv5_2(x, self.conv2_2.weight)
x = self.bn10(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class CVGG11_6(nn.Module):
def __init__(self, num_classes=10, args=None):
super(CVGG11_6, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = Conv2dRepeat((128,64,3,3), (256,256,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv4_1 = Conv2dRepeat((128,128,3,3), (512,256,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv4_2 = Conv2dRepeat((256,128,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_1 = Conv2dRepeat((128,128,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_2 = Conv2dRepeat((256,128,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(256)
self.bn6 = nn.BatchNorm2d(256)
self.bn7 = nn.BatchNorm2d(512)
self.bn8 = nn.BatchNorm2d(512)
self.bn9 = nn.BatchNorm2d(512)
self.bn10 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.conv1_1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv2_1(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv2_2(x)
x = self.bn4(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv3_1(x)
x = self.bn5(x)
x = F.relu(x)
x = self.conv3_2(x, self.conv2_1.weight)
x = self.bn6(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv4_1(x, self.conv2_2.weight)
x = self.bn7(x)
x = F.relu(x)
x = self.conv4_2(x, self.conv3_1.weight)
x = self.bn8(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv5_1(x, self.conv2_2.weight)
x = self.bn9(x)
x = F.relu(x)
x = self.conv5_2(x, self.conv3_1.weight)
x = self.bn10(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class CVGG11_7(nn.Module):
def __init__(self, num_classes=10, args=None):
super(CVGG11_7, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv4_1 = Conv2dRepeat((128,64,3,3), (512,256,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv4_2 = Conv2dRepeat((128,128,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_1 = Conv2dRepeat((256,128,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_2 = Conv2dRepeat((256,256,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(256)
self.bn6 = nn.BatchNorm2d(256)
self.bn7 = nn.BatchNorm2d(512)
self.bn8 = nn.BatchNorm2d(512)
self.bn9 = nn.BatchNorm2d(512)
self.bn10 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.conv1_1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv2_1(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv2_2(x)
x = self.bn4(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv3_1(x)
x = self.bn5(x)
x = F.relu(x)
x = self.conv3_2(x)
x = self.bn6(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv4_1(x, self.conv2_1.weight)
x = self.bn7(x)
x = F.relu(x)
x = self.conv4_2(x, self.conv2_2.weight)
x = self.bn8(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv5_1(x, self.conv3_1.weight)
x = self.bn9(x)
x = F.relu(x)
x = self.conv5_2(x, self.conv3_2.weight)
x = self.bn10(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class CVGG11_8(nn.Module):
def __init__(self, num_classes=10, args=None):
super(CVGG11_8, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = Conv2dRepeat((256,128,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_1 = Conv2dRepeat((256,256,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_2 = Conv2dRepeat((512,256,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(256)
self.bn6 = nn.BatchNorm2d(256)
self.bn7 = nn.BatchNorm2d(512)
self.bn8 = nn.BatchNorm2d(512)
self.bn9 = nn.BatchNorm2d(512)
self.bn10 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.conv1_1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv2_1(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv2_2(x)
x = self.bn4(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv3_1(x)
x = self.bn5(x)
x = F.relu(x)
x = self.conv3_2(x)
x = self.bn6(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv4_1(x)
x = self.bn7(x)
x = F.relu(x)
x = self.conv4_2(x, self.conv3_1.weight)
x = self.bn8(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv5_1(x, self.conv3_2.weight)
x = self.bn9(x)
x = F.relu(x)
x = self.conv5_2(x, self.conv4_1.weight)
x = self.bn10(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class CVGG11_9(nn.Module):
def __init__(self, num_classes=10, args=None):
super(CVGG11_9, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_1 = Conv2dRepeat((512,256,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.conv5_2 = Conv2dRepeat((512,512,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(256)
self.bn6 = nn.BatchNorm2d(256)
self.bn7 = nn.BatchNorm2d(512)
self.bn8 = nn.BatchNorm2d(512)
self.bn9 = nn.BatchNorm2d(512)
self.bn10 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.conv1_1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv2_1(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv2_2(x)
x = self.bn4(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv3_1(x)
x = self.bn5(x)
x = F.relu(x)
x = self.conv3_2(x)
x = self.bn6(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv4_1(x)
x = self.bn7(x)
x = F.relu(x)
x = self.conv4_2(x)
x = self.bn8(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv5_1(x, self.conv4_1.weight)
x = self.bn9(x)
x = F.relu(x)
x = self.conv5_2(x, self.conv4_2.weight)
x = self.bn10(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class CVGG11_10(nn.Module):
def __init__(self, num_classes=10, args=None):
super(CVGG11_10, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = Conv2dRepeat((512,512,3,3), (512,512,3,3), stride = 1, padding = 1, conv_type="inter", args = args)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(256)
self.bn6 = nn.BatchNorm2d(256)
self.bn7 = nn.BatchNorm2d(512)
self.bn8 = nn.BatchNorm2d(512)
self.bn9 = nn.BatchNorm2d(512)
self.bn10 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.conv1_1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv2_1(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv2_2(x)
x = self.bn4(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv3_1(x)
x = self.bn5(x)
x = F.relu(x)
x = self.conv3_2(x)
x = self.bn6(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv4_1(x)
x = self.bn7(x)
x = F.relu(x)
x = self.conv4_2(x)
x = self.bn8(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.conv5_1(x)
x = self.bn9(x)
x = F.relu(x)
x = self.conv5_2(x, self.conv5_1.weight)
x = self.bn10(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 37.035587
| 122
| 0.537859
| 3,292
| 20,814
| 3.291312
| 0.03068
| 0.043932
| 0.085279
| 0.045224
| 0.938256
| 0.933549
| 0.924873
| 0.916844
| 0.915182
| 0.913429
| 0
| 0.132905
| 0.304843
| 20,814
| 561
| 123
| 37.101604
| 0.615938
| 0
| 0
| 0.82874
| 0
| 0
| 0.010906
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033465
| false
| 0
| 0.009843
| 0
| 0.076772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c229be0c695d4dc2a3a76bd7d2faedecb344f937
| 14,911
|
py
|
Python
|
contrastiveFC/models.py
|
yewon-lee/transfuser
|
f749e1446238cda04131037e2fdea463a3d04802
|
[
"MIT"
] | 1
|
2021-07-23T02:06:39.000Z
|
2021-07-23T02:06:39.000Z
|
contrastiveFC/models.py
|
yewon-lee/transfuser
|
f749e1446238cda04131037e2fdea463a3d04802
|
[
"MIT"
] | null | null | null |
contrastiveFC/models.py
|
yewon-lee/transfuser
|
f749e1446238cda04131037e2fdea463a3d04802
|
[
"MIT"
] | null | null | null |
from collections import deque
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import models
def normalize_imagenet(x):
""" Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
"""
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
class ContrastiveLearningModel(nn.Module):
def __init__(self, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.ImageCNN = models.resnet34(pretrained=True)
self.ImageCNN.fc = nn.Sequential()
self.normalize = normalize
self.LidarEncoder = models.resnet18()
self.LidarEncoder.fc = nn.Sequential()
_tmp = self.LidarEncoder.conv1
self.LidarEncoder.conv1 = nn.Conv2d(in_channels, out_channels=_tmp.out_channels,
kernel_size=_tmp.kernel_size, stride=_tmp.stride,
padding=_tmp.padding,
bias=_tmp.bias)
self.flatten = nn.Sequential(
nn.Flatten()
)
def forward(self, image, lidar):
if self.normalize:
image = normalize_imagenet(image)
image_ft = self.ImageCNN(image)
#print("img ft shape:", image_ft.shape)
lidar_ft = self.LidarEncoder(lidar)
#print("lidar ft shape:",lidar_ft.shape)
return image_ft, lidar_ft # dims: 512
class ContrastiveLearningModel_ImageOnly(nn.Module):
def __init__(self, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.ImageCNN = models.resnet34(pretrained=True)
self.ImageCNN.fc = nn.Sequential()
self.normalize = normalize
self.flatten = nn.Sequential(
nn.Flatten()
)
def forward(self, image):
if self.normalize:
image = normalize_imagenet(image)
image_ft = self.ImageCNN(image)
return image_ft # dims: 512
class ContrastiveLearningModel_LidarOnly(nn.Module):
def __init__(self, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.normalize = normalize
self.LidarEncoder = models.resnet18()
self.LidarEncoder.fc = nn.Sequential()
_tmp = self.LidarEncoder.conv1
self.LidarEncoder.conv1 = nn.Conv2d(in_channels, out_channels=_tmp.out_channels,
kernel_size=_tmp.kernel_size, stride=_tmp.stride,
padding=_tmp.padding,
bias=_tmp.bias)
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 1),
)
embed_dim = num_classes
def forward(self, lidar):
lidar_ft = self.LidarEncoder(lidar) # dims: 512
return lidar_ft
class ContrastiveLearningModel_merge(nn.Module):
"""Contrastive Learning model with merge layer integrated"""
def __init__(self, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.ImageCNN = models.resnet34(pretrained=True)
self.ImageCNN.fc = nn.Sequential()
self.normalize = normalize
self.LidarEncoder = models.resnet18()
self.LidarEncoder.fc = nn.Sequential()
_tmp = self.LidarEncoder.conv1
self.LidarEncoder.conv1 = nn.Conv2d(in_channels, out_channels=_tmp.out_channels,
kernel_size=_tmp.kernel_size, stride=_tmp.stride,
padding=_tmp.padding,
bias=_tmp.bias)
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 1),
)
embed_dim = num_classes
self.merge = torch.nn.Sequential(
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, 2 * embed_dim),
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, embed_dim))
def forward(self, image, lidar):
if self.normalize:
image = normalize_imagenet(image)
image_ft = self.ImageCNN(image) # dims: 512
lidar_ft = self.LidarEncoder(lidar) # dims: 512
concat_ft = (image_ft, lidar_ft)
concat_ft = torch.cat(concat_ft, dim=1)
concat_ft = self.merge(concat_ft)
concat_ft = self.flatten(concat_ft)
return concat_ft
class ImitationLearningModel(nn.Module):
def __init__ (self, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.ImageCNN = models.resnet34(pretrained=True)
self.ImageCNN.fc = nn.Sequential()
self.normalize = normalize
self.LidarEncoder = models.resnet18()
self.LidarEncoder.fc = nn.Sequential()
_tmp = self.LidarEncoder.conv1
self.LidarEncoder.conv1 = nn.Conv2d(in_channels, out_channels=_tmp.out_channels,
kernel_size=_tmp.kernel_size, stride=_tmp.stride, padding=_tmp.padding, bias=_tmp.bias)
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 100),
nn.ReLU(True),
nn.Dropout(p=0.2),
nn.Linear(100, 10),
nn.ReLU(True),
nn.Linear(10, 1)
)
embed_dim = num_classes
self.merge = torch.nn.Sequential(
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, 2 * embed_dim),
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Dropout(0.1), torch.nn.Linear(2 * embed_dim, embed_dim))
def forward(self, image, lidar):
if self.normalize:
image = normalize_imagenet(image)
image_ft = self.ImageCNN(image)
lidar_ft = self.LidarEncoder(lidar)
final_ft = (image_ft,lidar_ft)
final_ft = torch.cat(final_ft, dim=1)
final_ft = self.merge(final_ft)
final_ft = self.flatten(final_ft)
final_ft = self.fullyconn(final_ft)
return final_ft
class ImitationLearningModel_ImageOnly(nn.Module):
def __init__ (self, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.ImageCNN = models.resnet34(pretrained=True)
self.ImageCNN.fc = nn.Sequential()
self.normalize = normalize
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 1)
)
def forward(self, image):
if self.normalize:
image = normalize_imagenet(image)
image_ft = self.ImageCNN(image)
final_ft = image_ft
final_ft = self.flatten(final_ft)
final_ft = self.fullyconn(final_ft)
return final_ft
class ImitationLearningModel_LidarOnly(nn.Module):
def __init__ (self, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.LidarEncoder = models.resnet18()
self.LidarEncoder.fc = nn.Sequential()
_tmp = self.LidarEncoder.conv1
self.LidarEncoder.conv1 = nn.Conv2d(in_channels, out_channels=_tmp.out_channels,
kernel_size=_tmp.kernel_size, stride=_tmp.stride, padding=_tmp.padding, bias=_tmp.bias)
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 1)
)
def forward(self, lidar):
lidar_ft = self.LidarEncoder(lidar)
final_ft = lidar_ft
final_ft = self.flatten(final_ft)
final_ft = self.fullyconn(final_ft)
return final_ft
class ImageCNN(nn.Module):
""" Encoder network for image input list.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
"""
def __init__(self, c_dim, normalize=True):
super().__init__()
self.normalize = normalize
self.features = models.resnet34(pretrained=True)
self.features.fc = nn.Sequential()
def forward(self, inputs):
inputs = normalize_imagenet(inputs)
return self.features(inputs)
class LidarEncoder(nn.Module):
"""
Encoder network for LiDAR input list
Args:
num_classes: output feature dimension
in_channels: input channels
"""
def __init__(self, num_classes=512, in_channels=2):
super().__init__()
self._model = models.resnet18()
self._model.fc = nn.Sequential()
_tmp = self._model.conv1
self._model.conv1 = nn.Conv2d(in_channels, out_channels=_tmp.out_channels,
kernel_size=_tmp.kernel_size, stride=_tmp.stride, padding=_tmp.padding, bias=_tmp.bias)
def forward(self, inputs):
features = self._model(lidar_data)
return features
class ImitationLearningModel_Contrastive(nn.Module):
def __init__ (self, contrastive_model, num_classes=512, in_channels=2, normalize=True):
super().__init__()
#self.ImageCNN = models.resnet34(pretrained=True)
#self.ImageCNN.fc = nn.Sequential()
self.normalize = normalize
#self.LidarEncoder = models.resnet18()
#self.LidarEncoder.fc = nn.Sequential()
#_tmp = self.LidarEncoder.conv1
#self.LidarEncoder.conv1 = nn.Conv2d(in_channels, out_channels=_tmp.out_channels,
# kernel_size=_tmp.kernel_size, stride=_tmp.stride, padding=_tmp.padding, bias=_tmp.bias)
self.contrastive = contrastive_model
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 100),
nn.ReLU(True),
#nn.Dropout(p=0.2),
nn.Linear(100, 10),
nn.ReLU(True),
nn.Linear(10, 1)
)
embed_dim = num_classes
self.merge = torch.nn.Sequential(
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, 2 * embed_dim),
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Dropout(0.1), torch.nn.Linear(2 * embed_dim, embed_dim))
def forward(self, image, lidar):
if self.normalize:
image = normalize_imagenet(image)
with torch.no_grad():
image_ft, lidar_ft = self.contrastive(image, lidar)
final_ft = (image_ft,lidar_ft)
final_ft = torch.cat(final_ft, dim=1)
final_ft = self.merge(final_ft)
final_ft = self.flatten(final_ft)
final_ft = self.fullyconn(final_ft)
return final_ft
class ControlsModel_linear(nn.Module):
"""Linear mapping from merged lidar/img ft to steering angle. No merge layer."""
def __init__ (self, contrastive_model, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.normalize = normalize
self.contrastive = contrastive_model
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 1),
)
def forward(self, image, lidar):
if self.normalize:
image = normalize_imagenet(image)
with torch.no_grad():
concat_ft = self.contrastive(image, lidar)
steering_angle = self.fullyconn(concat_ft)
return steering_angle
class ControlsModel_linear_ImageOnly(nn.Module):
"""Linear mapping from merged img ft to steering angle. No merge layer."""
def __init__ (self, contrastive_model, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.normalize = normalize
self.contrastive = contrastive_model
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 1),
)
def forward(self, image):
if self.normalize:
image = normalize_imagenet(image)
with torch.no_grad():
img_ft = self.contrastive(image)
steering_angle = self.fullyconn(img_ft)
return steering_angle
class ControlsModel_linear_LidarOnly(nn.Module):
"""Linear mapping from merged lidar ft to steering angle. No merge layer."""
def __init__ (self, contrastive_model, num_classes=512, in_channels=2, normalize=True):
super().__init__()
self.normalize = normalize
self.contrastive = contrastive_model
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 1),
)
def forward(self, lidar):
with torch.no_grad():
lidar_ft = self.contrastive(image, lidar)
steering_angle = self.fullyconn(lidar_ft)
return steering_angle
class ControlsModel_FC(nn.Module):
def __init__ (self, contrastive_model, num_classes=512, in_channels=2, normalize=True):
super().__init__()
#self.ImageCNN = models.resnet34(pretrained=True)
#self.ImageCNN.fc = nn.Sequential()
self.normalize = normalize
#self.LidarEncoder = models.resnet18()
#self.LidarEncoder.fc = nn.Sequential()
#_tmp = self.LidarEncoder.conv1
#self.LidarEncoder.conv1 = nn.Conv2d(in_channels, out_channels=_tmp.out_channels,
# kernel_size=_tmp.kernel_size, stride=_tmp.stride, padding=_tmp.padding, bias=_tmp.bias)
self.contrastive = contrastive_model
self.flatten = nn.Sequential(
nn.Flatten()
)
self.fullyconn = nn.Sequential(
nn.Linear(512, 1),
)
embed_dim = num_classes
self.merge = torch.nn.Sequential(
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, 2 * embed_dim),
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Dropout(0.1), torch.nn.Linear(2 * embed_dim, embed_dim))
def forward(self, image, lidar):
if self.normalize:
image = normalize_imagenet(image)
with torch.no_grad():
image_ft, lidar_ft = self.contrastive(image, lidar)
final_ft = (image_ft,lidar_ft)
final_ft = torch.cat(final_ft, dim=1)
final_ft = self.merge(final_ft)
final_ft = self.flatten(final_ft)
final_ft = self.fullyconn(final_ft)
return final_ft
| 37.845178
| 100
| 0.604185
| 1,726
| 14,911
| 4.987833
| 0.071842
| 0.058543
| 0.035777
| 0.022651
| 0.856313
| 0.829829
| 0.821117
| 0.793937
| 0.788361
| 0.763271
| 0
| 0.024102
| 0.290457
| 14,911
| 393
| 101
| 37.941476
| 0.789603
| 0.101335
| 0
| 0.752381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092063
| false
| 0
| 0.019048
| 0
| 0.203175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c247f457e88cf1ec1307404051be483ee6125cc2
| 41,702
|
py
|
Python
|
logistic-regression/distrib_algs.py
|
sands-lab/rethinking-sparsification
|
0687b1b360f5c95068261c81a9de1bc967f75e50
|
[
"MIT"
] | null | null | null |
logistic-regression/distrib_algs.py
|
sands-lab/rethinking-sparsification
|
0687b1b360f5c95068261c81a9de1bc967f75e50
|
[
"MIT"
] | null | null | null |
logistic-regression/distrib_algs.py
|
sands-lab/rethinking-sparsification
|
0687b1b360f5c95068261c81a9de1bc967f75e50
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
import time
import pickle
from numpy.linalg import norm
from scipy.sparse import csr_matrix
from scipy.optimize import minimize
from scipy.stats import norm as norm_d
from scipy.stats import randint
from scipy.stats import bernoulli
from functions import *
import scipy
from copy import deepcopy
def ec_l_svrg_diana(filename, x_init, A, y, gamma, p, sparsificator, sparsificator_params, quant, quant_params, alpha,
data_split,
l2=0, sparse_full=True, sparse_stoch=False, S=50, max_t=np.inf,
batch_size=1, save_info_period=100, x_star=None, f_star=None):
#m -- total number of datasamples
#n -- dimension of the problem
m, n = A.shape
assert(len(x_init) == n)
assert(len(y) == m)
if x_star is None:
x_star = np.zeros(n)
if f_star is None:
f_star = 0
ref_point = np.array(x_star)
x = np.array(x_init)
num_of_workers = len(data_split)
num_of_local_data = A[data_split[0]].shape[0]
assert(m == num_of_workers*num_of_local_data)
error_vectors = np.tile(np.zeros(n), [num_of_workers,1])
h_vectors = np.tile(np.zeros(n), [num_of_workers,1])
h = np.zeros(n)
data_sizes = np.array([])
for i in range(num_of_workers):
data_sizes = np.append(data_sizes, len(data_split[i]))
#this array below is needed to reduce the time of sampling stochastic gradients
indices_arr = randint.rvs(low=0, high=data_sizes[0], size=1000)
num_of_indices = len(indices_arr)
for i in range(num_of_workers-1):
indices_arr = np.vstack((indices_arr, randint.rvs(low=0, high=data_sizes[i+1], size=num_of_indices)))
indices_counter = 0
#it is needed for l-svrg updates
bernoulli_arr = bernoulli.rvs(p, size=num_of_workers*1000)
bernoulli_size = len(bernoulli_arr)
w_vectors = np.tile(deepcopy(x), [num_of_workers,1])
grads_w = logreg_grad(x, [A[data_split[0]], y[data_split[0]], l2, sparse_full])
for i in range(num_of_workers-1):
grads_w = np.vstack((grads_w, logreg_grad(x, [A[data_split[i+1]], y[data_split[i+1]], l2, sparse_full])))
its = np.array([0])
tim = np.array([0.0])
data_passes = np.array([0.0])
func_val = np.array([F(x, [A, y, l2, sparse_full, 0])-f_star])
sq_distances = np.array([norm(x - ref_point) ** 2])
number_of_bits = np.array([0]) #counts the number of bits per worker
avg_ecgrad_norms = np.array([0]) # average error-compensated gradient norm (across workers)
avg_grad_norms = np.array([0]) # average gradient norm (across workers)
avg_error_norms = np.array([0]) # average error norm (across workers)
t_start = time.time()
num_of_data_passes = 0.0
num_of_bits = 0.0
if sparse_stoch:
A_for_batch = A
else:
A_for_batch = A.toarray()
indices_counter = 0
bernoulli_counter = 0
for it in range(int(S*num_of_local_data)):
if indices_counter == num_of_indices:
indices_arr = randint.rvs(low=0, high=data_sizes[0], size=num_of_indices)
num_of_indices = len(indices_arr)
for i in range(num_of_workers-1):
indices_arr = np.vstack((indices_arr, randint.rvs(low=0, high=data_sizes[i+1], size=num_of_indices)))
indices_counter = 0
if bernoulli_counter == bernoulli_size:
bernoulli_arr = bernoulli.rvs(p, size=bernoulli_size)
bernoulli_counter = 0
#below we emulate the workers behavior and aggregate their updates on-the-fly
v = np.zeros(n)
avg_ecgrad_norm = 0
avg_grad_norm = 0
bits_sum_temp = 0
avg_error_norm = 0
for i in range(num_of_workers):
A_i = A_for_batch[data_split[i]]
y_i = y[data_split[i]]
hat_g_i = logreg_grad(x, [A_i[indices_arr[i][indices_counter:indices_counter+1]],
y_i[indices_arr[i][indices_counter:indices_counter+1]], l2, sparse_stoch]) - logreg_grad(w_vectors[i], [A_i[indices_arr[i][indices_counter:indices_counter+1]],
y_i[indices_arr[i][indices_counter:indices_counter+1]], l2, sparse_stoch]) + grads_w[i]
h_i = h_vectors[i]
g_i = hat_g_i - h_i + h
e_i = error_vectors[i]
v_i, bits_i = sparsificator(e_i+gamma*g_i, sparsificator_params)
error_vectors[i] = e_i + gamma*g_i - v_i
quant_diff, q_bits_i = quant(hat_g_i - h_i, quant_params)
h_vectors[i] = h_i + alpha * quant_diff
v += v_i
avg_error_norm += norm(e_i)**2
avg_ecgrad_norm += norm(e_i + gamma*g_i)**2
avg_grad_norm += norm(gamma*g_i)**2
bits_sum_temp += bits_i
bits_sum_temp += q_bits_i
if (bernoulli_arr[bernoulli_counter] == 1):
w_vectors[i] = deepcopy(x)
grads_w[i] = logreg_grad(w_vectors[i], [A_i, y_i, l2, sparse_stoch])
num_of_data_passes += 1.0/num_of_workers
bernoulli_counter += 1
v = v / num_of_workers
avg_ecgrad_norm = avg_ecgrad_norm / num_of_workers
avg_error_norm = avg_error_norm / num_of_workers
avg_grad_norm = avg_grad_norm / num_of_workers
h = np.mean(h_vectors, axis=0)
x = x - v
indices_counter += 1
num_of_data_passes += 2.0/num_of_local_data
num_of_bits += bits_sum_temp*1.0/num_of_workers #we count number of bits per worker
if ((it + 1) % save_info_period == 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
if tim[-1] > max_t:
break
if ((it + 1) % save_info_period != 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
res = {'last_iter':x, 'func_vals':func_val, 'iters':its, 'time':tim, 'data_passes':data_passes,
'squared_distances':sq_distances, 'bits':number_of_bits, 'avg_ecgrad_norms':avg_ecgrad_norms, 'avg_grad_norms':avg_grad_norms, 'avg_error_norms':avg_error_norms}
with open("dump/"+filename+"_EC_L_SVRG_DIANA_gamma_"+str(gamma)+"_l2_"+str(l2)+"_alpha_"+str(alpha)
+"_p_"+str(p)+"_num_of_epochs_"+str(S)
+"_num_of_workers_"+str(num_of_workers)+"_sparsificator_"
+sparsificator_params[0]+"_quantization_"+quant_params[0]+".txt", 'wb') as file:
pickle.dump(res, file)
return res
def ec_l_svrg(filename, x_init, A, y, gamma, p, sparsificator, sparsificator_params, data_split,
l2=0, sparse_full=True, sparse_stoch=False, S=50, max_t=np.inf,
batch_size=1, save_info_period=100, x_star=None, f_star=None):
#m -- total number of datasamples
#n -- dimension of the problem
m, n = A.shape
assert(len(x_init) == n)
assert(len(y) == m)
if x_star is None:
x_star = np.zeros(n)
if f_star is None:
f_star = 0
ref_point = np.array(x_star)
x = np.array(x_init)
num_of_workers = len(data_split)
num_of_local_data = A[data_split[0]].shape[0]
assert(m == num_of_workers*num_of_local_data)
error_vectors = np.tile(np.zeros(n), [num_of_workers,1])
data_sizes = np.array([])
for i in range(num_of_workers):
data_sizes = np.append(data_sizes, len(data_split[i]))
#this array below is needed to reduce the time of sampling stochastic gradients
indices_arr = randint.rvs(low=0, high=data_sizes[0], size=1000)
num_of_indices = len(indices_arr)
for i in range(num_of_workers-1):
indices_arr = np.vstack((indices_arr, randint.rvs(low=0, high=data_sizes[i+1], size=num_of_indices)))
indices_counter = 0
#it is needed for l-svrg updates
bernoulli_arr = bernoulli.rvs(p, size=num_of_workers*1000)
bernoulli_size = len(bernoulli_arr)
w_vectors = np.tile(deepcopy(x), [num_of_workers,1])
grads_w = logreg_grad(x, [A[data_split[0]], y[data_split[0]], l2, sparse_full])
for i in range(num_of_workers-1):
grads_w = np.vstack((grads_w, logreg_grad(x, [A[data_split[i+1]], y[data_split[i+1]], l2, sparse_full])))
its = np.array([0])
tim = np.array([0.0])
data_passes = np.array([0.0])
func_val = np.array([F(x, [A, y, l2, sparse_full, 0])-f_star])
sq_distances = np.array([norm(x - ref_point) ** 2])
number_of_bits = np.array([0]) #counts the number of bits per worker
avg_ecgrad_norms = np.array([0]) # average error-compensated gradient norm (across workers)
avg_grad_norms = np.array([0]) # average gradient norm (across workers)
avg_error_norms = np.array([0]) # average error norm (across workers)
t_start = time.time()
num_of_data_passes = 0.0
num_of_bits = 0.0
if sparse_stoch:
A_for_batch = A
else:
A_for_batch = A.toarray()
indices_counter = 0
bernoulli_counter = 0
for it in range(int(S*num_of_local_data)):
if indices_counter == num_of_indices:
indices_arr = randint.rvs(low=0, high=data_sizes[0], size=num_of_indices)
num_of_indices = len(indices_arr)
for i in range(num_of_workers-1):
indices_arr = np.vstack((indices_arr, randint.rvs(low=0, high=data_sizes[i+1], size=num_of_indices)))
indices_counter = 0
if bernoulli_counter == bernoulli_size:
bernoulli_arr = bernoulli.rvs(p, size=bernoulli_size)
bernoulli_counter = 0
#below we emulate the workers behavior and aggregate their updates on-the-fly
v = np.zeros(n)
avg_ecgrad_norm = 0
avg_grad_norm = 0
bits_sum_temp = 0
avg_error_norm = 0
for i in range(num_of_workers):
A_i = A_for_batch[data_split[i]]
y_i = y[data_split[i]]
g_i = logreg_grad(x, [A_i[indices_arr[i][indices_counter:indices_counter+1]],
y_i[indices_arr[i][indices_counter:indices_counter+1]], l2, sparse_stoch]) - logreg_grad(w_vectors[i], [A_i[indices_arr[i][indices_counter:indices_counter+1]],
y_i[indices_arr[i][indices_counter:indices_counter+1]], l2, sparse_stoch]) + grads_w[i]
e_i = error_vectors[i]
v_i, bits_i = sparsificator(e_i+gamma*g_i, sparsificator_params)
error_vectors[i] = e_i + gamma*g_i - v_i
v += v_i
avg_error_norm += norm(e_i)**2
avg_ecgrad_norm += norm(e_i + gamma*g_i)**2
avg_grad_norm += norm(gamma*g_i)**2
bits_sum_temp += bits_i
if (bernoulli_arr[bernoulli_counter] == 1):
w_vectors[i] = deepcopy(x)
grads_w[i] = logreg_grad(w_vectors[i], [A_i, y_i, l2, sparse_stoch])
num_of_data_passes += 1.0/num_of_workers
bernoulli_counter += 1
v = v / num_of_workers
avg_ecgrad_norm = avg_ecgrad_norm / num_of_workers
avg_error_norm = avg_error_norm / num_of_workers
avg_grad_norm = avg_grad_norm / num_of_workers
x = x - v
indices_counter += 1
num_of_data_passes += 2.0/num_of_local_data
num_of_bits += bits_sum_temp*1.0/num_of_workers #we count number of bits per worker
if ((it + 1) % save_info_period == 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
if tim[-1] > max_t:
break
if ((it + 1) % save_info_period != 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
res = {'last_iter':x, 'func_vals':func_val, 'iters':its, 'time':tim, 'data_passes':data_passes,
'squared_distances':sq_distances, 'bits':number_of_bits, 'avg_ecgrad_norms':avg_ecgrad_norms, 'avg_grad_norms':avg_grad_norms, 'avg_error_norms':avg_error_norms}
with open("dump/"+filename+"_EC_L_SVRG_gamma_"+str(gamma)+"_l2_"+str(l2)+"_p_"+str(p)+"_num_of_epochs_"+str(S)
+"_num_of_workers_"+str(num_of_workers)+"_sparsificator_"
+sparsificator_params[0]+".txt", 'wb') as file:
pickle.dump(res, file)
return res
def ec_diana_sgd(filename, x_init, A, y, gamma, sparsificator, sparsificator_params, quant, quant_params, alpha,
data_split,
l2=0, sparse_full=True, sparse_stoch=False, S=50, max_t=np.inf,
batch_size=1, save_info_period=100, x_star=None, f_star=None):
#m -- total number of datasamples
#n -- dimension of the problem
m, n = A.shape
assert(len(x_init) == n)
assert(len(y) == m)
if x_star is None:
x_star = np.zeros(n)
if f_star is None:
f_star = 0
ref_point = np.array(x_star)
x = np.array(x_init)
num_of_workers = len(data_split)
num_of_local_data = A[data_split[0]].shape[0]
assert(m == num_of_workers*(A[data_split[0]].shape[0]))
error_vectors = np.tile(np.zeros(n), [num_of_workers,1])
h_vectors = np.tile(np.zeros(n), [num_of_workers,1])
h = np.zeros(n)
data_sizes = np.array([])
for i in range(num_of_workers):
data_sizes = np.append(data_sizes, len(data_split[i]))
#this array below is needed to reduce the time of sampling stochastic gradients
indices_arr = randint.rvs(low=0, high=data_sizes[0], size=1000)
num_of_indices = len(indices_arr)
for i in range(num_of_workers-1):
indices_arr = np.vstack((indices_arr, randint.rvs(low=0, high=data_sizes[i+1], size=num_of_indices)))
indices_counter = 0
its = np.array([0])
tim = np.array([0.0])
data_passes = np.array([0.0])
func_val = np.array([F(x, [A, y, l2, sparse_full, 0])-f_star])
sq_distances = np.array([norm(x - ref_point) ** 2])
number_of_bits = np.array([0]) #counts the number of bits per worker
avg_ecgrad_norms = np.array([0]) # average error-compensated gradient norm (across workers)
avg_grad_norms = np.array([0]) # average gradient norm (across workers)
avg_error_norms = np.array([0]) # average error norm (across workers)
t_start = time.time()
num_of_data_passes = 0.0
num_of_bits = 0.0
if sparse_stoch:
A_for_batch = A
else:
A_for_batch = A.toarray()
for it in range(S*num_of_local_data):
if indices_counter == num_of_indices:
indices_arr = randint.rvs(low=0, high=data_sizes[0], size=num_of_indices)
num_of_indices = len(indices_arr)
for i in range(num_of_workers-1):
indices_arr = np.vstack((indices_arr, randint.rvs(low=0, high=data_sizes[i+1], size=num_of_indices)))
indices_counter = 0
#below we emulate the workers behavior and aggregate their updates on-the-fly
v = np.zeros(n)
avg_ecgrad_norm = 0
avg_grad_norm = 0
bits_sum_temp = 0
avg_error_norm = 0
for i in range(num_of_workers):
A_i = A_for_batch[data_split[i]]
y_i = y[data_split[i]]
hat_g_i = logreg_grad(x, [A_i[indices_arr[i][indices_counter:indices_counter+1]],
y_i[indices_arr[i][indices_counter:indices_counter+1]], l2, sparse_stoch])
h_i = h_vectors[i]
g_i = hat_g_i - h_i + h
e_i = error_vectors[i]
v_i, bits_i = sparsificator(e_i+gamma*g_i, sparsificator_params)
error_vectors[i] = e_i + gamma*g_i - v_i
quant_diff, q_bits_i = quant(hat_g_i - h_i, quant_params)
h_vectors[i] = h_i + alpha * quant_diff
v += v_i
avg_error_norm += norm(e_i)**2
avg_ecgrad_norm += norm(e_i + gamma*g_i)**2
avg_grad_norm += norm(gamma*g_i)**2
bits_sum_temp += bits_i
bits_sum_temp += q_bits_i
v = v / num_of_workers
avg_ecgrad_norm = avg_ecgrad_norm / num_of_workers
avg_error_norm = avg_error_norm / num_of_workers
avg_grad_norm = avg_grad_norm / num_of_workers
h = np.mean(h_vectors, axis=0)
x = x - v
indices_counter += 1
num_of_data_passes += 1.0/num_of_local_data
num_of_bits += bits_sum_temp*1.0/num_of_workers #we count number of bits per worker
if ((it + 1) % save_info_period == 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
if tim[-1] > max_t:
break
if ((it + 1) % save_info_period != 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
res = {'last_iter':x, 'func_vals':func_val, 'iters':its, 'time':tim, 'data_passes':data_passes,
'squared_distances':sq_distances, 'bits':number_of_bits, 'avg_ecgrad_norms':avg_ecgrad_norms, 'avg_grad_norms':avg_grad_norms, 'avg_error_norms':avg_error_norms}
with open("dump/"+filename+"_EC_DIANA_SGD_gamma_"+str(gamma)+"_alpha_"+str(alpha)
+"_l2_"+str(l2)+"_num_of_epochs_"+str(S)
+"_num_of_workers_"+str(num_of_workers)+"_sparsificator_"
+sparsificator_params[0]+"_quantization_"+quant_params[0]+".txt", 'wb') as file:
pickle.dump(res, file)
return res
def ec_diana_gd(filename, x_init, A, y, gamma, sparsificator, sparsificator_params, quant, quant_params, alpha,
data_split,
l2=0, sparse_full=True, sparse_stoch=False, S=50, max_t=np.inf,
batch_size=1, save_info_period=100, x_star=None, f_star=None):
#m -- total number of datasamples
#n -- dimension of the problem
m, n = A.shape
assert(len(x_init) == n)
assert(len(y) == m)
if x_star is None:
x_star = np.zeros(n)
if f_star is None:
f_star = 0
ref_point = np.array(x_star)
x = np.array(x_init)
num_of_workers = len(data_split)
assert(m == num_of_workers*(A[data_split[0]].shape[0]))
error_vectors = np.tile(np.zeros(n), [num_of_workers,1])
h_vectors = np.tile(np.zeros(n), [num_of_workers,1])
h = np.zeros(n)
data_sizes = np.array([])
for i in range(num_of_workers):
data_sizes = np.append(data_sizes, len(data_split[i]))
its = np.array([0])
tim = np.array([0.0])
data_passes = np.array([0.0])
func_val = np.array([F(x, [A, y, l2, sparse_full, 0])-f_star])
sq_distances = np.array([norm(x - ref_point) ** 2])
number_of_bits = np.array([0]) #counts the number of bits per worker
avg_ecgrad_norms = np.array([0]) # average error-compensated gradient norm (across workers)
avg_grad_norms = np.array([0]) # average gradient norm (across workers)
avg_error_norms = np.array([0]) # average error norm (across workers)
t_start = time.time()
num_of_data_passes = 0.0
num_of_bits = 0.0
if sparse_stoch:
A_for_batch = A
else:
A_for_batch = A.toarray()
for it in range(S):
#below we emulate the workers behavior and aggregate their updates on-the-fly
v = np.zeros(n)
avg_ecgrad_norm = 0
avg_grad_norm = 0
bits_sum_temp = 0
avg_error_norm = 0
for i in range(num_of_workers):
A_i = A_for_batch[data_split[i]]
y_i = y[data_split[i]]
hat_g_i = logreg_grad(x, [A_i, y_i, l2, sparse_stoch])
h_i = h_vectors[i]
g_i = hat_g_i - h_i + h
e_i = error_vectors[i]
v_i, bits_i = sparsificator(e_i+gamma*g_i, sparsificator_params)
error_vectors[i] = e_i + gamma*g_i - v_i
quant_diff, q_bits_i = quant(hat_g_i - h_i, quant_params)
h_vectors[i] = h_i + alpha * quant_diff
v += v_i
avg_error_norm += norm(e_i)**2
avg_ecgrad_norm += norm(e_i + gamma*g_i)**2
avg_grad_norm += norm(gamma*g_i)**2
bits_sum_temp += bits_i
bits_sum_temp += q_bits_i
v = v / num_of_workers
avg_ecgrad_norm = avg_ecgrad_norm / num_of_workers
avg_error_norm = avg_error_norm / num_of_workers
avg_grad_norm = avg_grad_norm / num_of_workers
h = np.mean(h_vectors, axis=0)
x = x - v
num_of_data_passes += 1.0
num_of_bits += bits_sum_temp*1.0/num_of_workers #we count number of bits per worker
if ((it + 1) % save_info_period == 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
if tim[-1] > max_t:
break
if ((it + 1) % save_info_period != 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
res = {'last_iter':x, 'func_vals':func_val, 'iters':its, 'time':tim, 'data_passes':data_passes,
'squared_distances':sq_distances, 'bits':number_of_bits, 'avg_ecgrad_norms':avg_ecgrad_norms, 'avg_grad_norms':avg_grad_norms, 'avg_error_norms':avg_error_norms}
with open("dump/"+filename+"_EC_DIANA_GD_gamma_"+str(gamma)+"_alpha_"+str(alpha)
+"_l2_"+str(l2)+"_num_of_epochs_"+str(S)
+"_num_of_workers_"+str(num_of_workers)+"_sparsificator_"
+sparsificator_params[0]+"_quantization_"+quant_params[0]+".txt", 'wb') as file:
pickle.dump(res, file)
return res
def ec_gd_star_const_stepsize(filename, x_init, A, y, gamma, sparsificator, sparsificator_params, data_split,
l2=0, sparse_full=True, sparse_stoch=False, S=50, max_t=np.inf,
batch_size=1, save_info_period=100, x_star=None, f_star=None):
#m -- total number of datasamples
#n -- dimension of the problem
m, n = A.shape
assert(len(x_init) == n)
assert(len(y) == m)
if x_star is None:
x_star = np.zeros(n)
if f_star is None:
f_star = 0
ref_point = np.array(x_star)
x = np.array(x_init)
num_of_workers = len(data_split)
assert(m == num_of_workers*(A[data_split[0]].shape[0]))
error_vectors = np.tile(np.zeros(n), [num_of_workers,1])
data_sizes = np.array([])
for i in range(num_of_workers):
data_sizes = np.append(data_sizes, len(data_split[i]))
its = np.array([0])
tim = np.array([0.0])
data_passes = np.array([0.0])
func_val = np.array([F(x, [A, y, l2, sparse_full, 0])-f_star])
sq_distances = np.array([norm(x - ref_point) ** 2])
number_of_bits = np.array([0]) #counts the number of bits per worker
avg_ecgrad_norms = np.array([0]) # average error-compensated gradient norm (across workers)
avg_grad_norms = np.array([0]) # average gradient norm (across workers)
avg_error_norms = np.array([0]) # average error norm (across workers)
t_start = time.time()
num_of_data_passes = 0.0
num_of_bits = 0.0
if sparse_stoch:
A_for_batch = A
else:
A_for_batch = A.toarray()
for it in range(S):
#below we emulate the workers behavior and aggregate their updates on-the-fly
v = np.zeros(n)
avg_ecgrad_norm = 0
avg_grad_norm = 0
bits_sum_temp = 0
avg_error_norm = 0
for i in range(num_of_workers):
A_i = A_for_batch[data_split[i]]
y_i = y[data_split[i]]
g_i = logreg_grad(x, [A_i, y_i, l2, sparse_stoch]) - logreg_grad(x_star, [A_i, y_i, l2, sparse_stoch])
e_i = error_vectors[i]
v_i, bits_i = sparsificator(e_i+gamma*g_i, sparsificator_params)
error_vectors[i] = e_i + gamma*g_i - v_i
v += v_i
avg_error_norm += norm(e_i)**2
avg_ecgrad_norm += norm(e_i + gamma*g_i)**2
avg_grad_norm += norm(gamma*g_i)**2
bits_sum_temp += bits_i
v = v / num_of_workers
avg_ecgrad_norm = avg_ecgrad_norm / num_of_workers
avg_error_norm = avg_error_norm / num_of_workers
avg_grad_norm = avg_grad_norm / num_of_workers
x = x - v
num_of_data_passes += 1.0
num_of_bits += bits_sum_temp*1.0/num_of_workers #we count number of bits per worker
if ((it + 1) % save_info_period == 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
if tim[-1] > max_t:
break
if ((it + 1) % save_info_period != 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
res = {'last_iter':x, 'func_vals':func_val, 'iters':its, 'time':tim, 'data_passes':data_passes,
'squared_distances':sq_distances, 'bits':number_of_bits, 'avg_ecgrad_norms':avg_ecgrad_norms, 'avg_grad_norms':avg_grad_norms, 'avg_error_norms':avg_error_norms}
with open("dump/"+filename+"_EC_GD_star_const_stepsize_gamma_"+str(gamma)+"_l2_"+str(l2)+"_num_of_epochs_"+str(S)
+"_num_of_workers_"+str(num_of_workers)+"_sparsificator_"
+sparsificator_params[0]+".txt", 'wb') as file:
pickle.dump(res, file)
return res
def ec_gd_const_stepsize(filename, x_init, A, y, gamma, sparsificator, sparsificator_params, data_split,
l2=0, sparse_full=True, sparse_stoch=False, S=50, max_t=np.inf,
batch_size=1, save_info_period=100, x_star=None, f_star=None):
#m -- total number of datasamples
#n -- dimension of the problem
m, n = A.shape
assert(len(x_init) == n)
assert(len(y) == m)
if x_star is None:
x_star = np.zeros(n)
if f_star is None:
f_star = 0
ref_point = np.array(x_star)
x = np.array(x_init)
num_of_workers = len(data_split)
assert(m == num_of_workers*(A[data_split[0]].shape[0]))
error_vectors = np.tile(np.zeros(n), [num_of_workers,1])
data_sizes = np.array([])
for i in range(num_of_workers):
data_sizes = np.append(data_sizes, len(data_split[i]))
its = np.array([0])
tim = np.array([0.0])
data_passes = np.array([0.0])
func_val = np.array([F(x, [A, y, l2, sparse_full, 0])-f_star])
sq_distances = np.array([norm(x - ref_point) ** 2])
number_of_bits = np.array([0]) #counts the number of bits per worker
avg_ecgrad_norms = np.array([0]) # average error-compensated gradient norm (across workers)
avg_grad_norms = np.array([0]) # average gradient norm (across workers)
avg_error_norms = np.array([0]) # average error norm (across workers)
t_start = time.time()
num_of_data_passes = 0.0
num_of_bits = 0.0
if sparse_stoch:
A_for_batch = A
else:
A_for_batch = A.toarray()
for it in range(S):
#below we emulate the workers behavior and aggregate their updates on-the-fly
v = np.zeros(n)
avg_ecgrad_norm = 0
avg_grad_norm = 0
bits_sum_temp = 0
avg_error_norm = 0
for i in range(num_of_workers):
A_i = A_for_batch[data_split[i]]
y_i = y[data_split[i]]
g_i = logreg_grad(x, [A_i, y_i, l2, sparse_stoch])
e_i = error_vectors[i]
v_i, bits_i = sparsificator(e_i+gamma*g_i, sparsificator_params)
error_vectors[i] = e_i + gamma*g_i - v_i
v += v_i
avg_error_norm += norm(e_i)**2
avg_ecgrad_norm += norm(e_i + gamma*g_i)**2
avg_grad_norm += norm(gamma*g_i)**2
bits_sum_temp += bits_i
v = v / num_of_workers
avg_ecgrad_norm = avg_ecgrad_norm / num_of_workers
avg_error_norm = avg_error_norm / num_of_workers
avg_grad_norm = avg_grad_norm / num_of_workers
x = x - v
num_of_data_passes += 1.0
num_of_bits += bits_sum_temp*1.0/num_of_workers #we count number of bits per worker
if ((it + 1) % save_info_period == 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
if tim[-1] > max_t:
break
if ((it + 1) % save_info_period != 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
res = {'last_iter':x, 'func_vals':func_val, 'iters':its, 'time':tim, 'data_passes':data_passes,
'squared_distances':sq_distances, 'bits':number_of_bits, 'avg_ecgrad_norms':avg_ecgrad_norms, 'avg_grad_norms':avg_grad_norms, 'avg_error_norms':avg_error_norms}
with open("dump/"+filename+"_EC_GD_const_stepsize_gamma_"+str(gamma)+"_l2_"+str(l2)+"_num_of_epochs_"+str(S)
+"_num_of_workers_"+str(num_of_workers)+"_sparsificator_"
+sparsificator_params[0]+".txt", 'wb') as file:
pickle.dump(res, file)
return res
def ec_sgd_const_stepsize(filename, x_init, A, y, gamma, sparsificator, sparsificator_params, data_split,
l2=0, sparse_full=True, sparse_stoch=False, S=50, max_t=np.inf,
batch_size=1, save_info_period=100, x_star=None, f_star=None):
#m -- total number of datasamples
#n -- dimension of the problem
m, n = A.shape
assert(len(x_init) == n)
assert(len(y) == m)
if x_star is None:
x_star = np.zeros(n)
if f_star is None:
f_star = 0
ref_point = np.array(x_star)
x = np.array(x_init)
num_of_workers = len(data_split)
num_of_local_data = A[data_split[0]].shape[0]
assert(m == num_of_workers*num_of_local_data)
error_vectors = np.tile(np.zeros(n), [num_of_workers,1])
data_sizes = np.array([])
for i in range(num_of_workers):
data_sizes = np.append(data_sizes, len(data_split[i]))
#this array below is needed to reduce the time of sampling stochastic gradients
indices_arr = randint.rvs(low=0, high=data_sizes[0], size=1000)
num_of_indices = len(indices_arr)
for i in range(num_of_workers-1):
indices_arr = np.vstack((indices_arr, randint.rvs(low=0, high=data_sizes[i+1], size=num_of_indices)))
indices_counter = 0
its = np.array([0])
tim = np.array([0.0])
data_passes = np.array([0.0])
func_val = np.array([F(x, [A, y, l2, sparse_full, 0])-f_star])
sq_distances = np.array([norm(x - ref_point) ** 2])
number_of_bits = np.array([0]) #counts the number of bits per worker
avg_ecgrad_norms = np.array([0]) # average error-compensated gradient norm (across workers)
avg_grad_norms = np.array([0]) # average gradient norm (across workers)
avg_error_norms = np.array([0]) # average error norm (across workers)
t_start = time.time()
num_of_data_passes = 0.0
num_of_bits = 0.0
if sparse_stoch:
A_for_batch = A
else:
A_for_batch = A.toarray()
indices_counter = 0
# For DCT
previous_thr = [np.nan]*num_of_workers
# For DGC, initial sparsity is 75%
previous_k = [int(0.25*n)]*num_of_workers
for it in range(int(S*num_of_local_data)):
#print(it)
if indices_counter == num_of_indices:
indices_arr = randint.rvs(low=0, high=data_sizes[0], size=num_of_indices)
num_of_indices = len(indices_arr)
for i in range(num_of_workers-1):
indices_arr = np.vstack((indices_arr, randint.rvs(low=0, high=data_sizes[i+1], size=num_of_indices)))
indices_counter = 0
#below we emulate the workers behavior and aggregate their updates on-the-fly
v = np.zeros(n)
avg_ecgrad_norm = 0
avg_grad_norm = 0
bits_sum_temp = 0
avg_error_norm = 0
for i in range(num_of_workers):
A_i = A_for_batch[data_split[i]]
y_i = y[data_split[i]]
g_i = logreg_grad(x, [A_i[indices_arr[i][indices_counter:indices_counter+1]],
y_i[indices_arr[i][indices_counter:indices_counter+1]], l2, sparse_stoch])
e_i = error_vectors[i]
avg_error_norm += norm(e_i)**2
if sparsificator == dct:
# DCT requires addtional arguments as current iteration count and previous threshold value
v_i, bits_i, previous_thr[i] = sparsificator(e_i+gamma*g_i, sparsificator_params+[it, previous_thr[i]])
elif sparsificator == dgc:
# DGC requires additional arguments as current iteration count and previous k
v_i, bits_i, previous_k[i] = sparsificator(e_i+gamma*g_i, sparsificator_params+[it, previous_k[i]])
else:
v_i, bits_i = sparsificator(e_i+gamma*g_i, sparsificator_params)
error_vectors[i] = e_i + gamma*g_i - v_i
v += v_i
avg_ecgrad_norm += norm(e_i + gamma*g_i)**2
avg_grad_norm += norm(gamma*g_i)**2
avg_ecgrad_topk += np.sort(np.abs(e_i + gamma*g_i))[-10]
bits_sum_temp += bits_i
v = v / num_of_workers
avg_ecgrad_norm = avg_ecgrad_norm / num_of_workers
avg_error_norm = avg_error_norm / num_of_workers
avg_grad_norm = avg_grad_norm / num_of_workers
avg_ecgrad_topk = avg_ecgrad_topk / num_of_workers
x = x - v
indices_counter += 1
num_of_data_passes += 1.0/num_of_local_data
num_of_bits += bits_sum_temp*1.0/num_of_workers #we count number of bits per worker
if ((it + 1) % save_info_period == 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
avg_ecgrad_topks = np.append(avg_ecgrad_topks, avg_ecgrad_topk)
if tim[-1] > max_t:
break
if ((it + 1) % save_info_period != 0):
its = np.append(its, it + 1)
tim = np.append(tim, time.time() - t_start)
data_passes = np.append(data_passes, num_of_data_passes)
func_val = np.append(func_val, F(x, [A, y, l2, sparse_full, 0])-f_star)
sq_distances = np.append(sq_distances, norm(x - ref_point) ** 2)
number_of_bits = np.append(number_of_bits, num_of_bits)
avg_ecgrad_norms = np.append(avg_ecgrad_norms, avg_ecgrad_norm)
avg_grad_norms = np.append(avg_grad_norms, avg_grad_norm)
avg_error_norms = np.append(avg_error_norms, avg_error_norm)
avg_ecgard_topk = np.append(avg_ecgrad_topks, avg_ecgrad_topk)
res = {'last_iter':x, 'func_vals':func_val, 'iters':its, 'time':tim, 'data_passes':data_passes,
'squared_distances':sq_distances, 'bits':number_of_bits, 'avg_ecgrad_norms':avg_ecgrad_norms, 'avg_grad_norms':avg_grad_norms, 'avg_error_norms':avg_error_norms, 'avg_ecgrad_topks':avg_ecgrad_topks}
with open("dump/"+filename+"_EC_SGD_const_stepsize_gamma_"+str(gamma)+"_l2_"+str(l2)+"_num_of_epochs_"+str(S)
+"_num_of_workers_"+str(num_of_workers)+"_sparsificator_"
+sparsificator_params[0]+".txt", 'wb') as file:
pickle.dump(res, file)
return res
| 45.427015
| 209
| 0.62738
| 6,568
| 41,702
| 3.63916
| 0.03106
| 0.043929
| 0.053217
| 0.028115
| 0.974856
| 0.970086
| 0.968413
| 0.964815
| 0.960464
| 0.960464
| 0
| 0.017246
| 0.257518
| 41,702
| 917
| 210
| 45.476554
| 0.754707
| 0.071028
| 0
| 0.943495
| 0
| 0
| 0.036438
| 0.002922
| 0
| 0
| 0
| 0
| 0.027595
| 1
| 0.009198
| false
| 0.057819
| 0.017083
| 0
| 0.03548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
c249f1be2239d24930e4e689ed054cde45fda95b
| 199
|
py
|
Python
|
python/test.py
|
SuperJerry/Swift
|
53b1c5e78e766424b9210092757c11d977ef8791
|
[
"MIT"
] | 1
|
2015-07-29T09:24:07.000Z
|
2015-07-29T09:24:07.000Z
|
python/test.py
|
SuperJerry/Swift
|
53b1c5e78e766424b9210092757c11d977ef8791
|
[
"MIT"
] | null | null | null |
python/test.py
|
SuperJerry/Swift
|
53b1c5e78e766424b9210092757c11d977ef8791
|
[
"MIT"
] | null | null | null |
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
| 18.090909
| 21
| 0.768844
| 18
| 199
| 8.5
| 0.111111
| 1
| 1.150327
| 1.777778
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0.095477
| 199
| 10
| 22
| 19.9
| 0.85
| 0
| 0
| 1
| 0
| 0
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 13
|
dff22d08efb1945443507e61010a14292914bcce
| 21,316
|
py
|
Python
|
python/test/testLocationResult.py
|
blijewski/earthquake-processing-formats
|
defa1ce69e247ddc4ea8b9570bea0420c133fbec
|
[
"CC0-1.0"
] | null | null | null |
python/test/testLocationResult.py
|
blijewski/earthquake-processing-formats
|
defa1ce69e247ddc4ea8b9570bea0420c133fbec
|
[
"CC0-1.0"
] | null | null | null |
python/test/testLocationResult.py
|
blijewski/earthquake-processing-formats
|
defa1ce69e247ddc4ea8b9570bea0420c133fbec
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#package imports
import processingformats.hypocenter
import processingformats.errorEllipse
#stdlib imports
import unittest
import datetime
class TestLocationResult(unittest.TestCase):
ID = '12345678'
#HYPOCENTER INCLUDES: lat, long, depth, time, latError, longError, depthError, timeError
HYPOCENTER = processingformats.hypocenter.Hypocenter(40.3344, -121.44, 32.44, datetime.datetime(2019, 5, 17, 15, 53, 00, 0), 12.5, 22.64, 2.44, 1.984)
SUPPORTINGDATA = '{"ID": "12GFH48776857", "Site": {"Station": "BOZ", "Channel": "BHZ", "Network": "US", "Location": "00", "Latitude": 45.59697, "Longitude": -111.62967, "Elevation": 1589.0}, "Source": {"Author": "TestAuthor", "AgencyID": "US", "Type": "Unknown"}, "Time": "2015-12-28T21:32:24.017Z", "Affinity": 1.2, "Quality": 0.45, "Use": True, "PickedPhase": "P", "AssociatedPhase": "P", "LocatedPhase": "P", "Residual": 1.05, "Distance": 2.65, "Azimuth": 21.5, "Weight": 2.65, "Importance": 3.8}'
ASSOCIATEDSTATIONS = 11
ASSOCIATEDPHASES = 22
USEDSTATIONS = 33
USEDPHASES = 44
GAP = 33.67
SECONDARYGAP = 33.67
MINIMUMDISTANCE = 2.14
RMS = 3.8
QUALITY = 'A'
BAYESIANDEPTH = 66.7
BAYESIANRANGE = 20.3
DEPTHIMPORTANCE = 1.8
LOCATOREXITCODE = 'Success'
#ERROR ELLIPSE INCLUDES: E0Error, E0Azimuth, E0Dip, E1Error, E1Azimuth, E1Dip, E2Error, E2Azimuth, E2Dip, maxHor, maxVert, equivHorRad
ERRORELLIPSE = processingformats.errorEllipse.ErrorEllipse(40.3344, -121.44, 32.44, 12.5, 22.64, 2.44, 12.5, 22.64, 2.44, 1.984, 1.984, 1.984)
JSONSTRING = '{"ID": "12345678", "Hypocenter": {"Latitude": 40.3344, "Longitude": -121.44, "Time": "2019-05-17T15:53:00.000Z", "Depth": 32.44, "LatitudeError": 12.5, "LongitudeError": 22.64, "DepthError": 2.44, "TimeError": 1.984}, "SupportingData": [{"ID": "12GFH48776857", "Site": {"Station": "BOZ", "Channel": "BHZ", "Network": "US", "Location": "00", "Latitude": 45.59697, "Longitude": -111.62967, "Elevation": 1589.0}, "Source": {"Author": "TestAuthor", "AgencyID": "US", "Type": "Unknown"}, "Time": "2015-12-28T21:32:24.017Z", "Affinity": 1.2, "Quality": 0.45, "Use": True, "PickedPhase": "P", "AssociatedPhase": "P", "LocatedPhase": "P", "Residual": 1.05, "Distance": 2.65, "Azimuth": 21.5, "Weight": 2.65, "Importance": 3.8}], "NumAssociatedStations": 11, "NumAssociatedPhases": "22", "NumUsedStations": 33, "NumUsedPhases": 44, "Gap": 33.67, "SecondaryGap": 33.67, "MinimumDistance": 2.14, "RMS": 3.8, "Quality": "A", "BayesianDepth": 66.7, "BayesianRange": 20.3, "DepthImportance": 1.8, "LocatorExitCode": "Success", "ErrorEllipse": {"E0Error": 40.3344, "E0Azimuth": -121.44, "E0Dip": 32.44, "E1Error": 12.5, "E1Azimuth": 22.64, "E1Dip": 2.44, "E2Error": 12.5, "E2Azimuth": 22.64, "E2Dip": 2.44, "MaximumHorizontalProjection": 1.984, "MaximumVerticalProjection": 1.984, "EquivalentHorizontalRadius": 1.984}}'
DICT = {"ID": "12345678", "Hypocenter": {"Latitude": 40.3344, "Longitude": -121.44, "Time": "2019-05-17T15:53:00.000Z", "Depth": 32.44, "LatitudeError": 12.5, "LongitudeError": 22.64, "DepthError": 2.44, "TimeError": 1.984}, "SupportingData": [{"ID": "12GFH48776857", "Site": {"Station": "BOZ", "Channel": "BHZ", "Network": "US", "Location": "00", "Latitude": 45.59697, "Longitude": -111.62967, "Elevation": 1589.0}, "Source": {"Author": "TestAuthor", "AgencyID": "US", "Type": "Unknown"}, "Time": "2015-12-28T21:32:24.017Z", "Affinity": 1.2, "Quality": 0.45, "Use": True, "PickedPhase": "P", "AssociatedPhase": "P", "LocatedPhase": "P", "Residual": 1.05, "Distance": 2.65, "Azimuth": 21.5, "Weight": 2.65, "Importance": 3.8}], "NumAssociatedStations": 11, "NumAssociatedPhases": "22", "NumUsedStations": 33, "NumUsedPhases": 44, "Gap": 33.67, "SecondaryGap": 33.67, "MinimumDistance": 2.14, "RMS": 3.8, "Quality": "A", "BayesianDepth": 66.7, "BayesianRange": 20.3, "DepthImportance": 1.8, "LocatorExitCode": "Success", "ErrorEllipse": {"E0Error": 40.3344, "E0Azimuth": -121.44, "E0Dip": 32.44, "E1Error": 12.5, "E1Azimuth": 22.64, "E1Dip": 2.44, "E2Error": 12.5, "E2Azimuth": 22.64, "E2Dip": 2.44, "MaximumHorizontalProjection": 1.984, "MaximumVerticalProjection": 1.984, "EquivalentHorizontalRadius": 1.984}}
def test_init(self):
locationResult = processingformats.locationResult.LocationResult()
self.assertFalse(hasattr(locationResult, 'id'))
self.assertFalse(hasattr(locationResult.hypocenter, 'latitude'))
self.assertFalse(hasattr(locationResult.hypocenter, 'longitude'))
self.assertFalse(hasattr(locationResult.hypocenter, 'depth'))
self.assertFalse(hasattr(locationResult.hypocenter, 'time'))
self.assertFalse(hasattr(locationResult.hypocenter, 'latitudeError'))
self.assertFalse(hasattr(locationResult.hypocenter, 'longitudeError'))
self.assertFalse(hasattr(locationResult.hypocenter, 'depthError'))
self.assertFalse(hasattr(locationResult.hypocenter, 'timeError'))
self.assertFalse(hasattr(locationResult, 'supportingData'))
self.assertFalse(hasattr(locationResult, 'associatedStations'))
self.assertFalse(hasattr(locationResult, 'associatedPhases'))
self.assertFalse(hasattr(locationResult, 'usedStations'))
self.assertFalse(hasattr(locationResult, 'usedPhases'))
self.assertFalse(hasattr(locationResult, 'gap'))
self.assertFalse(hasattr(locationResult, 'secondary gap'))
self.assertFalse(hasattr(locationResult, 'minimumDistance'))
self.assertFalse(hasattr(locationResult, 'rms'))
self.assertFalse(hasattr(locationResult, 'quality'))
self.assertFalse(hasattr(locationResult, 'bayesianDepth'))
self.assertFalse(hasattr(locationResult, 'bayesianRange'))
self.assertFalse(hasattr(locationResult, 'depthImportance'))
self.assertFalse(hasattr(locationResult, 'locatorExitCode'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E0Error'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E0Azimuth'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E0Dip'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E1Error'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E1Azimuth'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E1Dip'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E2Error'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E2Azimuth'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'E2Dip'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'maximumHorizontalProjection'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'maximumVerticalProjection'))
self.assertFalse(hasattr(locationResult.errorEllipse, 'equivalentHorizontalRadius'))
locationResult = processingformats.locationResult.LocationResult(self.ID, self.HYPOCENTER, self.SUPPORTINGDATA, self.ASSOCIATEDSATAIONS, self.ASSOCIATEDPHASES, self.USEDSTATIONS, self.USEDPHASES, self.GAP, self.SECONDARYGAP, self.MINIMUMDISTANCE, self.RMS, self.QUALITY, self.BAYESIANDEPTH, self.BAYESIANRANGE, self.DEPTHIMPORTANCE, self.LOCATOREXITCODE, self.ERRORELLIPSE)
self.assertTrue(hasattr(locationResult, 'id'))
self.assertTrue(hasattr(locationResult.hypocenter, 'latitude'))
self.assertTrue(hasattr(locationResult.hypocenter, 'longitude'))
self.assertTrue(hasattr(locationResult.hypocenter, 'depth'))
self.assertTrue(hasattr(locationResult.hypocenter, 'time'))
self.assertTrue(hasattr(locationResult.hypocenter, 'latitudeError'))
self.assertTrue(hasattr(locationResult.hypocenter, 'longitudeError'))
self.assertTrue(hasattr(locationResult.hypocenter, 'depthError'))
self.assertTrue(hasattr(locationResult.hypocenter, 'timeError'))
self.assertTrue(hasattr(locationResult, 'supportingData'))
self.assertTrue(hasattr(locationResult, 'associatedStations'))
self.assertTrue(hasattr(locationResult, 'associatedPhases'))
self.assertTrue(hasattr(locationResult, 'usedStations'))
self.assertTrue(hasattr(locationResult, 'usedPhases'))
self.assertTrue(hasattr(locationResult, 'gap'))
self.assertTrue(hasattr(locationResult, 'secondary gap'))
self.assertTrue(hasattr(locationResult, 'minimumDistance'))
self.assertTrue(hasattr(locationResult, 'rms'))
self.assertTrue(hasattr(locationResult, 'quality'))
self.assertTrue(hasattr(locationResult, 'bayesianDepth'))
self.assertTrue(hasattr(locationResult, 'bayesianRange'))
self.assertTrue(hasattr(locationResult, 'depthImportance'))
self.assertTrue(hasattr(locationResult, 'locatorExitCode'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E0Error'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E0Azimuth'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E0Dip'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E1Error'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E1Azimuth'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E1Dip'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E2Error'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E2Azimuth'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'E2Dip'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'maximumHorizontalProjection'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'maximumVerticalProjection'))
self.assertTrue(hasattr(locationResult.errorEllipse, 'equivalentHorizontalRadius'))
self.assertEqual(locationResult.id, self.ID)
self.assertEqual(locationResult.hypocenter.latitude, locationResult.HYPOCENTER.latitude)
self.assertEqual(locationResult.hypocenter.longitude, locationResult.HYPOCENTER.longitude)
self.assertEqual(locationResult.hypocenter.depth, locationResult.HYPOCENTER.depth)
self.assertEqual(locationResult.hypocenter.time, locationResult.HYPOCENTER.time)
self.assertEqual(locationResult.hypocenter.latitudeError, locationResult.HYPOCENTER.latitudeError)
self.assertEqual(locationResult.hypocenter.longitudeError, locationResult.HYPOCENTER.longitudeError)
self.assertEqual(locationResult.hypocenter.depthError, locationResult.HYPOCENTER.depthError)
self.assertEqual(locationResult.hypocenter.timeError, locationResult.HYPOCENTER.timeError)
self.assertEqual(locationResult.supportingData, self.SUPPORTINGDATA)
self.assertEqual(locationResult.associatedStations, self.ASSOCIATEDSTATIONS)
self.assertEqual(locationResult.associatedPhases, self.ASSOCIATEDPHASES)
self.assertEqual(locationResult.usedStations, self.USEDSTATIONS)
self.assertEqual(locationResult.usedPhases, self.USEDPHASES)
self.assertEqual(locationResult.gap, self.GAP)
self.assertEqual(locationResult.secondaryGap, self.SECONDARYGAP)
self.assertEqual(locationResult.minimumDistance, self.MINIMUMDISTANCE)
self.assertEqual(locationResult.rms, self.RMS)
self.assertEqual(locationResult.quality, self.QUALITY)
self.assertEqual(locationResult.bayesianDepth, self.BAYESIANDEPTH)
self.assertEqual(locationResult.bayesianRange, self.BAYESIANRANGE)
self.assertEqual(locationResult.depthImportance, self.DEPTHIMPORTANCE)
self.assertEqual(locationResult.locatorExitCode, self.LOCATOREXITCODE)
self.assertEqual(locationResult.errorEllipse.E0Error, locationResult.ERRORELLIPSE.E0Error)
self.assertEqual(locationResult.errorEllipse.E0Azimuth, locationResult.ERRORELLIPSE.E0Azimuth)
self.assertEqual(locationResult.errorEllipse.E0Dip, locationResult.ERRORELLIPSE.E0Dip)
self.assertEqual(locationResult.errorEllipse.E1Error, locationResult.ERRORELLIPSE.E1Error)
self.assertEqual(locationResult.errorEllipse.E1Azimuth, locationResult.ERRORELLIPSE.E1Azimuth)
self.assertEqual(locationResult.errorEllipse.E1Dip, locationResult.ERRORELLIPSE.E1Dip)
self.assertEqual(locationResult.errorEllipse.E2Error, locationResult.ERRORELLIPSE.E2Error)
self.assertEqual(locationResult.errorEllipse.E2Azimuth, locationResult.ERRORELLIPSE.E2Azimuth)
self.assertEqual(locationResult.errorEllipse.E2Dip, locationResult.ERRORELLIPSE.E2Dip)
self.assertEqual(locationResult.errorEllipse.maximumHorizontalProjection, locationResult.ERRORELLIPSE.maximumHorizontalProjection)
self.assertEqual(locationResult.errorEllipse.maximumVerticalProjection, locationResult.ERRORELLIPSE.maximumVerticalProjection)
self.assertEqual(locationResult.errorEllipse.equivalentHorizontalRadius, locationResult.ERRORELLIPSE.equivalentHorizontalRadius)
def test_toJSON(self):
locationResult = processingformats.locationResult.LocationResult(self.ID, self.HYPOCENTER, self.SUPPORTINGDATA, self.ASSOCIATEDSATAIONS, self.ASSOCIATEDPHASES, self.USEDSTATIONS, self.USEDPHASES, self.GAP, self.SECONDARYGAP, self.MINIMUMDISTANCE, self.RMS, self.QUALITY, self.BAYESIANDEPTH, self.BAYESIANRANGE, self.DEPTHIMPORTANCE, self.LOCATOREXITCODE, self.ERRORELLIPSE)
self.assertEqual(locationResult.toJSONString(), self.JSONSTRING)
def test_fromJSON(self):
locationResult = processingformats.locationResult.LocationResult()
locationResult.fromJSONString(self.JSONSTRING)
self.assertEqual(locationResult.id, self.ID)
self.assertEqual(locationResult.hypocenter.latitude, locationResult.HYPOCENTER.latitude)
self.assertEqual(locationResult.hypocenter.longitude, locationResult.HYPOCENTER.longitude)
self.assertEqual(locationResult.hypocenter.depth, locationResult.HYPOCENTER.depth)
self.assertEqual(locationResult.hypocenter.time, locationResult.HYPOCENTER.time)
self.assertEqual(locationResult.hypocenter.latitudeError, locationResult.HYPOCENTER.latitudeError)
self.assertEqual(locationResult.hypocenter.longitudeError, locationResult.HYPOCENTER.longitudeError)
self.assertEqual(locationResult.hypocenter.depthError, locationResult.HYPOCENTER.depthError)
self.assertEqual(locationResult.hypocenter.timeError, locationResult.HYPOCENTER.timeError)
self.assertEqual(locationResult.supportingData, self.SUPPORTINGDATA)
self.assertEqual(locationResult.associatedStations, self.ASSOCIATEDSTATIONS)
self.assertEqual(locationResult.associatedPhases, self.ASSOCIATEDPHASES)
self.assertEqual(locationResult.usedStations, self.USEDSTATIONS)
self.assertEqual(locationResult.usedPhases, self.USEDPHASES)
self.assertEqual(locationResult.gap, self.GAP)
self.assertEqual(locationResult.secondaryGap, self.SECONDARYGAP)
self.assertEqual(locationResult.minimumDistance, self.MINIMUMDISTANCE)
self.assertEqual(locationResult.rms, self.RMS)
self.assertEqual(locationResult.quality, self.QUALITY)
self.assertEqual(locationResult.bayesianDepth, self.BAYESIANDEPTH)
self.assertEqual(locationResult.bayesianRange, self.BAYESIANRANGE)
self.assertEqual(locationResult.depthImportance, self.DEPTHIMPORTANCE)
self.assertEqual(locationResult.locatorExitCode, self.LOCATOREXITCODE)
self.assertEqual(locationResult.errorEllipse.E0Error, locationResult.ERRORELLIPSE.E0Error)
self.assertEqual(locationResult.errorEllipse.E0Azimuth, locationResult.ERRORELLIPSE.E0Azimuth)
self.assertEqual(locationResult.errorEllipse.E0Dip, locationResult.ERRORELLIPSE.E0Dip)
self.assertEqual(locationResult.errorEllipse.E1Error, locationResult.ERRORELLIPSE.E1Error)
self.assertEqual(locationResult.errorEllipse.E1Azimuth, locationResult.ERRORELLIPSE.E1Azimuth)
self.assertEqual(locationResult.errorEllipse.E1Dip, locationResult.ERRORELLIPSE.E1Dip)
self.assertEqual(locationResult.errorEllipse.E2Error, locationResult.ERRORELLIPSE.E2Error)
self.assertEqual(locationResult.errorEllipse.E2Azimuth, locationResult.ERRORELLIPSE.E2Azimuth)
self.assertEqual(locationResult.errorEllipse.E2Dip, locationResult.ERRORELLIPSE.E2Dip)
self.assertEqual(locationResult.errorEllipse.maximumHorizontalProjection, locationResult.ERRORELLIPSE.maximumHorizontalProjection)
self.assertEqual(locationResult.errorEllipse.maximumVerticalProjection, locationResult.ERRORELLIPSE.maximumVerticalProjection)
self.assertEqual(locationResult.errorEllipse.equivalentHorizontalRadius, locationResult.ERRORELLIPSE.equivalentHorizontalRadius)
def test_toDict(self):
locationResult = processingformats.locationResult.LocationResult(self.ID, self.HYPOCENTER, self.SUPPORTINGDATA, self.ASSOCIATEDSATAIONS, self.ASSOCIATEDPHASES, self.USEDSTATIONS, self.USEDPHASES, self.GAP, self.SECONDARYGAP, self.MINIMUMDISTANCE, self.RMS, self.QUALITY, self.BAYESIANDEPTH, self.BAYESIANRANGE, self.DEPTHIMPORTANCE, self.LOCATOREXITCODE, self.ERRORELLIPSE)
self.assertEqual(locationResult.toDict(), self.DICT)
def test_fromDict(self):
locationResult = processingformats.locationResult.LocationResult()
locationResult.fromDict(self.DICT)
self.assertEqual(locationResult.id, self.ID)
self.assertEqual(locationResult.hypocenter.latitude, locationResult.HYPOCENTER.latitude)
self.assertEqual(locationResult.hypocenter.longitude, locationResult.HYPOCENTER.longitude)
self.assertEqual(locationResult.hypocenter.depth, locationResult.HYPOCENTER.depth)
self.assertEqual(locationResult.hypocenter.time, locationResult.HYPOCENTER.time)
self.assertEqual(locationResult.hypocenter.latitudeError, locationResult.HYPOCENTER.latitudeError)
self.assertEqual(locationResult.hypocenter.longitudeError, locationResult.HYPOCENTER.longitudeError)
self.assertEqual(locationResult.hypocenter.depthError, locationResult.HYPOCENTER.depthError)
self.assertEqual(locationResult.hypocenter.timeError, locationResult.HYPOCENTER.timeError)
self.assertEqual(locationResult.supportingData, self.SUPPORTINGDATA)
self.assertEqual(locationResult.associatedStations, self.ASSOCIATEDSTATIONS)
self.assertEqual(locationResult.associatedPhases, self.ASSOCIATEDPHASES)
self.assertEqual(locationResult.usedStations, self.USEDSTATIONS)
self.assertEqual(locationResult.usedPhases, self.USEDPHASES)
self.assertEqual(locationResult.gap, self.GAP)
self.assertEqual(locationResult.secondaryGap, self.SECONDARYGAP)
self.assertEqual(locationResult.minimumDistance, self.MINIMUMDISTANCE)
self.assertEqual(locationResult.rms, self.RMS)
self.assertEqual(locationResult.quality, self.QUALITY)
self.assertEqual(locationResult.bayesianDepth, self.BAYESIANDEPTH)
self.assertEqual(locationResult.bayesianRange, self.BAYESIANRANGE)
self.assertEqual(locationResult.depthImportance, self.DEPTHIMPORTANCE)
self.assertEqual(locationResult.locatorExitCode, self.LOCATOREXITCODE)
self.assertEqual(locationResult.errorEllipse.E0Error, locationResult.ERRORELLIPSE.E0Error)
self.assertEqual(locationResult.errorEllipse.E0Azimuth, locationResult.ERRORELLIPSE.E0Azimuth)
self.assertEqual(locationResult.errorEllipse.E0Dip, locationResult.ERRORELLIPSE.E0Dip)
self.assertEqual(locationResult.errorEllipse.E1Error, locationResult.ERRORELLIPSE.E1Error)
self.assertEqual(locationResult.errorEllipse.E1Azimuth, locationResult.ERRORELLIPSE.E1Azimuth)
self.assertEqual(locationResult.errorEllipse.E1Dip, locationResult.ERRORELLIPSE.E1Dip)
self.assertEqual(locationResult.errorEllipse.E2Error, locationResult.ERRORELLIPSE.E2Error)
self.assertEqual(locationResult.errorEllipse.E2Azimuth, locationResult.ERRORELLIPSE.E2Azimuth)
self.assertEqual(locationResult.errorEllipse.E2Dip, locationResult.ERRORELLIPSE.E2Dip)
self.assertEqual(locationResult.errorEllipse.maximumHorizontalProjection, locationResult.ERRORELLIPSE.maximumHorizontalProjection)
self.assertEqual(locationResult.errorEllipse.maximumVerticalProjection, locationResult.ERRORELLIPSE.maximumVerticalProjection)
self.assertEqual(locationResult.errorEllipse.equivalentHorizontalRadius, locationResult.ERRORELLIPSE.equivalentHorizontalRadius)
def test_isValid(self):
locationResult = processingformats.locationResult.LocationResult(self.ID, self.HYPOCENTER, self.SUPPORTINGDATA, self.ASSOCIATEDSATAIONS, self.ASSOCIATEDPHASES, self.USEDSTATIONS, self.USEDPHASES, self.GAP, self.SECONDARYGAP, self.MINIMUMDISTANCE, self.RMS, self.QUALITY, self.BAYESIANDEPTH, self.BAYESIANRANGE, self.DEPTHIMPORTANCE, self.LOCATOREXITCODE, self.ERRORELLIPSE)
self.assertTrue(locationResult.isValid())
badLocationResult = processingformats.locationResult.LocationResult()
self.assertFalse(badLocationResult.isValid())
if __name__ == '__main__':
unittest.main()
| 82.941634
| 1,322
| 0.753565
| 1,903
| 21,316
| 8.433526
| 0.085654
| 0.100006
| 0.193345
| 0.091968
| 0.871581
| 0.716244
| 0.700729
| 0.699981
| 0.698486
| 0.698486
| 0
| 0.037196
| 0.127228
| 21,316
| 257
| 1,323
| 82.941634
| 0.825468
| 0.013699
| 0
| 0.506787
| 0
| 0.00905
| 0.156144
| 0.024597
| 0
| 0
| 0
| 0
| 0.809955
| 1
| 0.027149
| false
| 0
| 0.076923
| 0
| 0.19457
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a074cdbb38b86decd68c256289da7361f9dc6a8a
| 32,853
|
py
|
Python
|
plugins/admin.py
|
BWBellairs/Andromeda
|
e01f5cfef1e14e4f1f7e94d333c1dee5f44716d7
|
[
"MIT"
] | null | null | null |
plugins/admin.py
|
BWBellairs/Andromeda
|
e01f5cfef1e14e4f1f7e94d333c1dee5f44716d7
|
[
"MIT"
] | null | null | null |
plugins/admin.py
|
BWBellairs/Andromeda
|
e01f5cfef1e14e4f1f7e94d333c1dee5f44716d7
|
[
"MIT"
] | null | null | null |
from fnmatch import fnmatch
from time import sleep
import subprocess
import random as rand
from utils import *
import utils
name = "admin"
cmds = ["join", "part", "nick", "quit", "raw", ">>", ">", "op", "deop",
"voice", "devoice", "ban", "kban", "unban", "sop", "sdeop",
"svoice", "sdevoice", "squiet", "sunquiet", "kick", "quiet",
"unquiet", "mode"]
def main(irc):
if not name in irc.plugins:
irc.plugins[name] = {}
if not name in irc.state["plugins"]:
irc.state["plugins"][name] = {}
@add_cmd
def join(irc, event, args):
"""<channel> [<key>,<channel>...]
Makes the bot join <channel> using <key> if given.
If no key is given but the bot already has a record
of the channel's key, it will attempt to use that.
"""
args = " ".join(args)
for channel in args.split(","):
channel = channel.split()
if is_allowed(irc, event.source, channel[0]):
if irc.is_channel(channel[0]):
if len(channel) > 1:
irc.join(channel[0], channel[1])
else:
if channel[0] in irc.channels.keys() and "key" in irc.channels[channel[0]].keys() and irc.channels[channel[0]]["key"]:
key = irc.channels[channel[0]]["key"]
irc.join(channel[0], key)
else:
irc.join(channel[0])
else:
irc.reply(event, "ERROR: Invalid channel: {}".format(channel[0]))
@add_cmd
def part(irc, event, args):
"""[<channel>] [<message>]
Parts <channel> with <message> if given. <channel>
is only necessary if the command isn't given in the
channel itself.
"""
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
reason = " ".join(args[1:])
else:
reason = event.source.nick
elif not is_private(event):
channel = event.target
reason = " ".join(args)
else:
irc.reply(event, "ERROR: No channel specified.")
return
elif not is_private(event):
channel = event.target
reason = event.source.nick
else:
irc.reply(event, "ERROR: No channel specified.")
return
if is_owner(irc, event.source, channel):
irc.part(channel, reason)
@add_cmd
def nick(irc, event, args):
"""<nick>
Changes the bot's nick to <nick>.
"""
if is_allowed(irc, event.source): # Checks if the user is on the global allowed list
irc.chgnick(args[0]) # Calls the nickname change if the above function returns True
def botquit(irc, event, args):
"""[<message>]
Makes the bot quit with <message> if given.
"""
if is_owner(irc, event.source):
if len(args) > 0:
irc.quit(" ".join(args))
else:
irc.quit(event.source.nick)
add_cmd(botquit, "quit")
@add_cmd
def raw(irc, event, args):
"""<command>
Sends <command> to the IRC server.
"""
if is_owner(irc, event.source):
irc.send(" ".join(args))
def _exec(irc, event, args):
"""<code>
Executes <code> in a Python interpreter.
"""
if is_owner(irc, event.source):
output = utils.console({"irc": irc, "utils": utils, "event": event}).run(" ".join(args))
if output is not None:
irc.reply(event, output)
add_cmd(_exec, ">>")
def _shell(irc, event, args):
"""<command>
Executes <command> on the shell.
"""
if is_owner(irc, event.source):
args = " ".join(args)
try:
s = subprocess.check_output(args+" | ./ircize --remove", stderr=subprocess.STDOUT, shell=True)
if s:
s = s.decode()
for line in str(s).splitlines():
irc.reply(event, line)
except subprocess.CalledProcessError as e:
irc.reply(event, e)
add_cmd(_shell, ">")
@add_cmd
def sop(irc, event, args):
"""[<channel>] [<nick>...]
Ops <nick> (or the bot if no <nick> is given) in <channel> using services.
<channel> is only necessary if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [irc.get_nick()]
except IndexError:
irc.reply(event, utils.gethelp("sop"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
for nick in nicks:
if irc.is_opped(nick, channel):
nicks.remove(nick)
if len(nicks) > 0:
irc.privmsg("ChanServ", "OP {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def sdeop(irc, event, args):
"""[<channel>] [<nick>...]
Deops <nick> (or the bot if no <nick> is given) in <channel> using services.
<channel> is only necessary if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [irc.get_nick()]
except IndexError:
irc.reply(event, utils.gethelp("sdeop"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
for nick in nicks:
if not irc.is_opped(nick, channel):
nicks.remove(nick)
if len(nicks) > 0:
irc.privmsg("ChanServ", "DEOP {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def svoice(irc, event, args):
"""[<channel>] [<nick>...]
Voices <nick> (or the bot if no <nick> is given) in <channel> using services.
<channel> is only necessary if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [irc.get_nick()]
except IndexError:
irc.reply(event, utils.gethelp("svoice"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
for nick in nicks:
if irc.is_voiced(nick, channel):
nicks.remove(nick)
if len(nicks) > 0:
irc.privmsg("ChanServ", "VOICE {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def sdevoice(irc, event, args):
"""[<channel>] [<nick>...]
Devoices <nick> (or the bot if no <nick> is given) in <channel> using services.
<channel> is only necessary if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [irc.get_nick()]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [irc.get_nick()]
except IndexError:
irc.reply(event, utils.gethelp("sdevoice"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
for nick in nicks:
if not irc.is_voiced(nick, channel):
nicks.remove(nick)
if len(nicks) > 0:
irc.privmsg("ChanServ", "DEVOICE {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def squiet(irc, event, args):
"""[<channel>] <nick|hostmask> [<nick|hostmask>...]
Quiets <nick> in <channel> using services. <channel> is only necessary
if the command isn't sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("squiet"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
irc.privmsg("ChanServ", "QUIET {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def sunquiet(irc, event, args):
"""[<channel>] [<nick|hostmask>...]
Unquiets <nick> (or yourself if no <nick> is given) in <channel>
using services. <channel> is only necessary if the command isn't
sent in the channel itself.
"""
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [event.source.nick]
except IndexError:
irc.reply(event, utils.gethelp("sunquiet"))
else:
if utils.is_allowed(irc, event.source, channel):
try:
if irc.channels[channel].get("chanserv", irc.chanserv):
irc.privmsg("ChanServ", "UNQUIET {} {}".format(channel, " ".join(nicks)))
except KeyError:
pass
@add_cmd
def op(irc, event, args):
"""[<channel>] [<nick>...]
Ops <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the
channel itself.
"""
setmodes = []
try:
if len(args) == 0:
nicks = [event.source.nick]
channel = event.target
elif irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
nicks = args
channel = event.target
except IndexError:
irc.reply(event, utils.gethelp("op"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if "*" in nicks:
nicks = irc.state["channels"][channel]["names"]
for nick in nicks:
if not irc.is_opped(nick, channel):
setmodes.append("+o {}".format(nick))
if len(setmodes) == 0:
return
if not already_op and irc.get_nick() not in nicks:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def deop(irc, event, args):
"""[<channel>] [<nick>...]
Deops <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't set in the channel
itself.
"""
setmodes = []
try:
if len(args) == 0:
nicks = [event.source.nick]
channel = event.target
elif irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
nicks = args
channel = event.target
except IndexError:
irc.reply(event, utils.gethelp("deop"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if "*" in nicks:
nicks = irc.state["channels"][channel]["names"]
if irc.get_nick() in nicks:
nicks.remove(irc.get_nick())
if irc.channels[channel].get("chanserv", irc.chanserv) and "ChanServ" in nicks:
nicks.remove("ChanServ")
for nick in nicks:
if irc.is_opped(nick, channel):
setmodes.append("-o {}".format(nick))
if len(setmodes) == 0:
return
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def voice(irc, event, args):
"""[<channel>] [<nick>...]
Voices <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the channel
itself.
"""
setmodes = []
try:
if len(args) == 0:
nicks = [event.source.nick]
channel = event.target
elif irc.is_channel(args):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
nicks = args
channel = event.target
except IndexError:
irc.reply(event, utils.gethelp("devoice"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if "*" in nicks:
nicks = irc.state["channels"][channel]["names"]
for nick in nicks:
if not irc.is_voiced(nick, channel):
setmodes.append("+v {}".format(nick))
if len(setmodes) == 0:
return
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def devoice(irc, event, args):
"""[<channel>] [<nick>...]
Devoices <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the channel
itself.
"""
setmodes = []
try:
if len(args) == 0:
nicks = [event.source.nick]
channel = event.target
elif irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
nicks = args
channel = event.target
except IndexError:
irc.reply(event, utils.gethelp("devoice"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if "*" in nicks:
nicks = irc.state["channels"][channel]["names"]
for nick in nicks:
if irc.is_voiced(nick, channel):
setmodes.append("-v {}".format(nick))
if len(setmodes) == 0:
return
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def ban(irc, event, args):
"""[<channel>] <nick|hostmask> [<nick|hostmask>...]
Bans <nick> in <channel>. <channel> is only necessary if the command
isn't sent in the channel itself.
"""
setmodes = []
affected = []
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("ban"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if utils.is_hostmask(nick):
bmask = nick
else:
bmask = utils.banmask(irc, nick)
setmodes.append("+b {}".format(bmask))
for affect in utils.ban_affects(irc, channel, bmask):
if affect not in affected and affect != irc.get_nick():
affected.append(affect)
for nick in affected:
if irc.is_opped(nick, channel):
setmodes.append("-o {}".format(nick))
if irc.is_voiced(nick, channel):
setmodes.append("-v {}".format(nick))
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick())) # remove op from self after ban
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def kban(irc, event, args):
"""[<channel>] <nick|hostmask> [<nick|hostmask>...] [:][<reason>]
Bans <nick> in <channel> and kicks anyone affected using <reason>
as the kick message if specified. <channel> is only necessary if
the command isn't sent in the channel itself. It is recommended to
use ':' as a seperator between <nick> and <reason>, otherwise, if
there's a nick in the channel matching the first word in reason it
will be kicked.
"""
prepare_nicks = []
setmodes = []
affected = []
reason = None
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("kban"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if nick in irc.state["channels"][channel]["names"] and nick not in prepare_nicks and not nick.startswith(":"):
prepare_nicks.append(nick)
elif utils.is_hostmask(nick):
prepare_nicks.append(nick)
else:
reason = " ".join(nicks[len(prepare_nicks):]).lstrip(": ")
break
nicks = prepare_nicks
for nick in nicks:
if utils.is_hostmask(nick):
bmask = nick
else:
bmask = utils.banmask(irc, nick)
setmodes.append("+b {}".format(bmask))
for affect in utils.ban_affects(irc, channel, bmask):
if affect not in affected and affect != irc.get_nick():
if irc.is_opped(affect, channel):
setmodes.append("-o {}".format(affect))
if irc.is_voiced(affect, channel):
setmodes.append("-v {}".format(affect))
affected.append(affect)
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
for nick in affected:
if reason:
irc.kick(channel, nick, reason)
else:
irc.kick(channel, nick)
if not already_op:
irc.mode(channel, "-o {}".format(irc.get_nick()))
@add_cmd
def kick(irc, event, args):
"""[<channel>] <nick> [<nick>...] [:][<reason>]
Kicks <nick> in <channel>. <channel> is only necessary if the
command isn't sent in the channel itself. It is recommended to
use ':' as a seperator between <nick> and <reason>, otherwise, if
there's a nick in the channel matching the first word in reason it
will be kicked.
"""
prepare_nicks = []
reason = None
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("kick"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if nick in irc.state["channels"][channel]["names"] and nick not in prepare_nicks and not nick.startswith(":"):
prepare_nicks.append(nick)
else:
reason = " ".join(nicks[len(prepare_nicks):]).lstrip(": ")
break
nicks = prepare_nicks
already_op = irc.is_opped(irc.get_nick(), channel)
gotop = utils.getop(irc, channel)
if gotop:
for nick in nicks:
if reason:
irc.kick(channel, nick, reason)
else:
irc.kick(channel, nick)
if not already_op:
irc.mode(channel, "-o {}".format(irc.get_nick()))
@add_cmd
def unban(irc, event, args):
"""[<channel>] [<nick|hostmask>...]
Unbans <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the channel
itself.
"""
setmodes = []
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [event.source.nick]
except IndexError:
irc.reply(event, utils.gethelp("unban"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if utils.is_hostmask(nick):
hmask = nick
else:
hmask = utils.gethm(irc, nick)
if hmask and channel in irc.state["channels"]:
for bmask in irc.state["channels"][channel]["bans"]:
if fnmatch(utils.irclower(hmask), utils.irclower(bmask)):
setmodes.append("-b {}".format(bmask))
else:
return
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def quiet(irc, event, args):
"""[<channel>] <nick|hostmask> [<nick|hostmask>...]
Quiets <nick> in <channel>. <channel> is only necessary if the command
isn't sent in the channel itself.
"""
setmodes = []
affected = []
try:
if utils.is_private(event):
channel = args[0]
nicks = args[1:]
else:
if irc.is_channel(args[0]):
channel = args[0]
nicks = args[1:]
else:
channel = event.target
nicks = args
except IndexError:
irc.reply(event, utils.gethelp("quiet"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if utils.is_hostmask(nick):
bmask = nick
else:
bmask = utils.banmask(irc, nick)
setmodes.append("+q {}".format(bmask))
for affect in utils.ban_affects(irc, channel, bmask):
if affect not in affected and affect != irc.get_nick():
affected.append(affect)
for nick in affected:
if irc.is_opped(nick, channel):
setmodes.append("-o {}".format(nick))
if irc.is_voiced(nick, channel):
setmodes.append("-v {}".format(nick))
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def unquiet(irc, event, args):
"""[<channel>] [<nick|hostmask>...]
Unquiets <nick> (or yourself if no <nick> is specified) in <channel>.
<channel> is only necessary if the command isn't sent in the channel
itself.
"""
setmodes = []
try:
if utils.is_private(event):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
if len(args) > 0:
if irc.is_channel(args[0]):
channel = args[0]
if len(args) > 1:
nicks = args[1:]
else:
nicks = [event.source.nick]
else:
channel = event.target
nicks = args
else:
channel = event.target
nicks = [event.source.nick]
except IndexError:
irc.reply(event, utils.gethelp("unquiet"))
else:
if utils.is_allowed(irc, event.source, channel):
for nick in nicks:
if utils.is_hostmask(nick):
hmask = nick
else:
hmask = utils.gethm(irc, nick)
if hmask and channel in irc.state["channels"]:
for bmask in irc.state["channels"][channel]["quiets"]:
if fnmatch(utils.irclower(hmask), utils.irclower(bmask)):
setmodes.append("-q {}".format(bmask))
else:
return
if len(setmodes) == 0:
return
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for mode in utils.unsplit_modes(setmodes):
irc.mode(channel, mode)
@add_cmd
def mode(irc, event, args):
"""[<channel>] <modes>
Sets <modes> in <channel>. <channel> is only necessary if the command
isn't sent in the channel itself.
"""
try:
if utils.is_private(event) or irc.is_channel(args[0]):
if args[0] in irc.state["channels"]:
channel = args[0]
setmodes = utils.split_modes(args[1:])
elif not utils.is_private(event):
channel = event.target
setmodes = utils.split_modes(args)
else:
irc.reply(event, utils.gethelp("mode"))
return
else:
channel = event.target
setmodes = utils.split_modes(args)
except IndexError:
irc.reply(event, utils.gethelp("mode"))
else:
if utils.is_allowed(irc, event.source, channel):
already_op = irc.is_opped(irc.get_nick(), channel)
if not already_op:
setmodes.append("-o {}".format(irc.get_nick()))
gotop = utils.getop(irc, channel)
if gotop:
for modes in utils.unsplit_modes(setmodes):
irc.mode(channel, modes)
@add_cmd
def random(irc, event, args): # I'll delete this after
"""takes no arguments
Returns random statement
"""
random_events = ["moo{}".format("o"*rand.randint(0, 100)), "lol"]
irc.reply(event, rand.choice(random_events))
def on_mode(irc, conn, event):
channel = event.target
modes = utils.split_modes(event.arguments)
for mode in modes:
if mode.startswith("+b"):
if event.source.nick == irc.get_nick():
continue
mask = mode.split()[1]
affects = utils.ban_affects(irc, channel, mask)
names = irc.state["channels"][channel]["names"]
if len(affects) >= len(names) / 2:
setmodes = []
bmask = utils.banmask(irc, event.source)
setmodes.append("-b {}".format(mask))
baffects = utils.ban_affects(irc, channel, bmask)
for nick in baffects:
if irc.is_opped(nick, channel):
setmodes.append("-o {}".format(nick))
if irc.is_voiced(nick, channel):
setmodes.append("-v {}".format(nick))
setmodes.append("+b {}".format(bmask))
already_op = irc.is_opped(irc.get_nick(), channel)
gotop = utils.getop(irc, channel)
if gotop:
for modes in utils.unsplit_modes(setmodes):
irc.mode(channel, modes)
for nick in baffects:
irc.kick(channel, nick)
if not already_op:
irc.mode(channel, "-o {}".format(irc.get_nick()))
add_handler(on_mode, name)
| 34.009317
| 138
| 0.491888
| 3,648
| 32,853
| 4.367873
| 0.060855
| 0.019455
| 0.035396
| 0.024601
| 0.832497
| 0.811096
| 0.789883
| 0.789883
| 0.75932
| 0.745513
| 0
| 0.007078
| 0.38931
| 32,853
| 965
| 139
| 34.04456
| 0.787121
| 0.123489
| 0
| 0.826772
| 0
| 0
| 0.030709
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035433
| false
| 0.007874
| 0.007874
| 0
| 0.06168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2622ce1ff16b823a3027c9619a2b3abba4ed4214
| 137
|
py
|
Python
|
data_loader/__init__.py
|
yuanlinping/deep_colormap_extraction
|
46ed2673d561ac91349bd47c6559df64b16e6131
|
[
"MIT"
] | 1
|
2022-03-16T11:07:41.000Z
|
2022-03-16T11:07:41.000Z
|
data_loader/__init__.py
|
yuanlinping/deep_colormap_extraction
|
46ed2673d561ac91349bd47c6559df64b16e6131
|
[
"MIT"
] | 1
|
2021-06-16T03:38:35.000Z
|
2021-06-16T03:38:35.000Z
|
data_loader/__init__.py
|
yuanlinping/deep_colormap_extraction
|
46ed2673d561ac91349bd47c6559df64b16e6131
|
[
"MIT"
] | 1
|
2022-03-16T11:12:49.000Z
|
2022-03-16T11:12:49.000Z
|
from .CSV_PNG_Dataset import CSV_PNG_Dataset
from .CSV_PNG_Dataset import CSV_PNG_Dataset_2D
from .PNG_PNG_Dataset import PNG_PNG_Dataset
| 45.666667
| 47
| 0.89781
| 25
| 137
| 4.4
| 0.24
| 0.545455
| 0.472727
| 0.309091
| 0.654545
| 0.654545
| 0.654545
| 0.654545
| 0
| 0
| 0
| 0.007937
| 0.080292
| 137
| 3
| 48
| 45.666667
| 0.865079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
26587be026734a2e5e6152b867bd59e1accc2de9
| 24,296
|
py
|
Python
|
aspose_barcode_cloud/api/file_api.py
|
aspose-barcode-cloud/aspose-barcode-cloud-python
|
f57e27be63104533d9cfd4d835e67ed22f808a3e
|
[
"MIT"
] | 4
|
2020-06-29T07:21:00.000Z
|
2022-03-23T09:46:30.000Z
|
aspose_barcode_cloud/api/file_api.py
|
aspose-barcode-cloud/aspose-barcode-cloud-python
|
f57e27be63104533d9cfd4d835e67ed22f808a3e
|
[
"MIT"
] | null | null | null |
aspose_barcode_cloud/api/file_api.py
|
aspose-barcode-cloud/aspose-barcode-cloud-python
|
f57e27be63104533d9cfd4d835e67ed22f808a3e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright (c) 2021 Aspose.BarCode for Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from aspose_barcode_cloud.api_client import ApiClient
class FileApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def copy_file(
self,
src_path,
dest_path,
src_storage_name=None,
dest_storage_name=None,
version_id=None,
async_req=False,
**kwargs
):
"""Copy file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().copy_file(src_path, dest_path, async_req=True)
>>> result = thread.get()
:param str src_path: Source file path e.g. '/folder/file.ext' # noqa: E501
:param str dest_path: Destination file path # noqa: E501
:param str src_storage_name: Source storage name # noqa: E501
:param str dest_storage_name: Destination storage name # noqa: E501
:param str version_id: File version ID to copy # noqa: E501
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.copy_file_with_http_info(
src_path,
dest_path,
src_storage_name=src_storage_name,
dest_storage_name=dest_storage_name,
version_id=version_id,
**kwargs
)
else:
(data) = self.copy_file_with_http_info(
src_path,
dest_path,
src_storage_name=src_storage_name,
dest_storage_name=dest_storage_name,
version_id=version_id,
**kwargs
)
return data
def copy_file_with_http_info(self, src_path, dest_path, **kwargs):
"""Copy file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().copy_file_with_http_info(src_path, dest_path, async_req=True)
>>> result = thread.get()
:param str src_path: Source file path e.g. '/folder/file.ext' # noqa: E501
:param str dest_path: Destination file path # noqa: E501
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = {"src_path", "dest_path", "src_storage_name", "dest_storage_name", "version_id"}
all_params.add("async_req")
all_params.add("_return_http_data_only")
all_params.add("_preload_content")
all_params.add("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'" " to method copy_file" % key)
if val is None:
continue
params[key] = val
del params["kwargs"]
# verify the required parameter "src_path" is set
if "src_path" not in params or params["src_path"] is None:
raise ValueError("Missing the required parameter 'src_path' when calling 'copy_file'")
# verify the required parameter "dest_path" is set
if "dest_path" not in params or params["dest_path"] is None:
raise ValueError("Missing the required parameter 'dest_path' when calling 'copy_file'")
collection_formats = {}
path_params = {}
if "src_path" in params:
path_params["srcPath"] = params["src_path"]
query_params = []
if "dest_path" in params:
query_params.append(("destPath", params["dest_path"]))
if "src_storage_name" in params:
query_params.append(("srcStorageName", params["src_storage_name"]))
if "dest_storage_name" in params:
query_params.append(("destStorageName", params["dest_storage_name"]))
if "version_id" in params:
query_params.append(("versionId", params["version_id"]))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header "Accept"
header_params["Accept"] = self.api_client.select_header_accept(["application/json"])
# HTTP header "Content-Type"
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"])
# Authentication setting
auth_settings = ["JWT"]
return self.api_client.call_api(
"/barcode/storage/file/copy/{srcPath}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def delete_file(self, path, storage_name=None, version_id=None, async_req=False, **kwargs):
"""Delete file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().delete_file(path, async_req=True)
>>> result = thread.get()
:param str path: File path e.g. '/folder/file.ext' # noqa: E501
:param str storage_name: Storage name # noqa: E501
:param str version_id: File version ID to delete # noqa: E501
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.delete_file_with_http_info(path, storage_name=storage_name, version_id=version_id, **kwargs)
else:
(data) = self.delete_file_with_http_info(path, storage_name=storage_name, version_id=version_id, **kwargs)
return data
def delete_file_with_http_info(self, path, **kwargs):
"""Delete file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().delete_file_with_http_info(path, async_req=True)
>>> result = thread.get()
:param str path: File path e.g. '/folder/file.ext' # noqa: E501
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = {"path", "storage_name", "version_id"}
all_params.add("async_req")
all_params.add("_return_http_data_only")
all_params.add("_preload_content")
all_params.add("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'" " to method delete_file" % key)
if val is None:
continue
params[key] = val
del params["kwargs"]
# verify the required parameter "path" is set
if "path" not in params or params["path"] is None:
raise ValueError("Missing the required parameter 'path' when calling 'delete_file'")
collection_formats = {}
path_params = {}
if "path" in params:
path_params["path"] = params["path"]
query_params = []
if "storage_name" in params:
query_params.append(("storageName", params["storage_name"]))
if "version_id" in params:
query_params.append(("versionId", params["version_id"]))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header "Accept"
header_params["Accept"] = self.api_client.select_header_accept(["application/json"])
# HTTP header "Content-Type"
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"])
# Authentication setting
auth_settings = ["JWT"]
return self.api_client.call_api(
"/barcode/storage/file/{path}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def download_file(self, path, storage_name=None, version_id=None, async_req=False, **kwargs):
"""Download file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().download_file(path, async_req=True)
>>> result = thread.get()
:param str path: File path e.g. '/folder/file.ext' # noqa: E501
:param str storage_name: Storage name # noqa: E501
:param str version_id: File version ID to download # noqa: E501
:param async_req bool
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.download_file_with_http_info(path, storage_name=storage_name, version_id=version_id, **kwargs)
else:
(data) = self.download_file_with_http_info(path, storage_name=storage_name, version_id=version_id, **kwargs)
return data
def download_file_with_http_info(self, path, **kwargs):
"""Download file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().download_file_with_http_info(path, async_req=True)
>>> result = thread.get()
:param str path: File path e.g. '/folder/file.ext' # noqa: E501
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = {"path", "storage_name", "version_id"}
all_params.add("async_req")
all_params.add("_return_http_data_only")
all_params.add("_preload_content")
all_params.add("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'" " to method download_file" % key)
if val is None:
continue
params[key] = val
del params["kwargs"]
# verify the required parameter "path" is set
if "path" not in params or params["path"] is None:
raise ValueError("Missing the required parameter 'path' when calling 'download_file'")
collection_formats = {}
path_params = {}
if "path" in params:
path_params["path"] = params["path"]
query_params = []
if "storage_name" in params:
query_params.append(("storageName", params["storage_name"]))
if "version_id" in params:
query_params.append(("versionId", params["version_id"]))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header "Accept"
header_params["Accept"] = self.api_client.select_header_accept(["multipart/form-data"])
# HTTP header "Content-Type"
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"])
# Authentication setting
auth_settings = ["JWT"]
return self.api_client.call_api(
"/barcode/storage/file/{path}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="file",
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", False),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def move_file(
self,
src_path,
dest_path,
src_storage_name=None,
dest_storage_name=None,
version_id=None,
async_req=False,
**kwargs
):
"""Move file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().move_file(src_path, dest_path, async_req=True)
>>> result = thread.get()
:param str src_path: Source file path e.g. '/src.ext' # noqa: E501
:param str dest_path: Destination file path e.g. '/dest.ext' # noqa: E501
:param str src_storage_name: Source storage name # noqa: E501
:param str dest_storage_name: Destination storage name # noqa: E501
:param str version_id: File version ID to move # noqa: E501
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.move_file_with_http_info(
src_path,
dest_path,
src_storage_name=src_storage_name,
dest_storage_name=dest_storage_name,
version_id=version_id,
**kwargs
)
else:
(data) = self.move_file_with_http_info(
src_path,
dest_path,
src_storage_name=src_storage_name,
dest_storage_name=dest_storage_name,
version_id=version_id,
**kwargs
)
return data
def move_file_with_http_info(self, src_path, dest_path, **kwargs):
"""Move file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().move_file_with_http_info(src_path, dest_path, async_req=True)
>>> result = thread.get()
:param str src_path: Source file path e.g. '/src.ext' # noqa: E501
:param str dest_path: Destination file path e.g. '/dest.ext' # noqa: E501
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = {"src_path", "dest_path", "src_storage_name", "dest_storage_name", "version_id"}
all_params.add("async_req")
all_params.add("_return_http_data_only")
all_params.add("_preload_content")
all_params.add("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'" " to method move_file" % key)
if val is None:
continue
params[key] = val
del params["kwargs"]
# verify the required parameter "src_path" is set
if "src_path" not in params or params["src_path"] is None:
raise ValueError("Missing the required parameter 'src_path' when calling 'move_file'")
# verify the required parameter "dest_path" is set
if "dest_path" not in params or params["dest_path"] is None:
raise ValueError("Missing the required parameter 'dest_path' when calling 'move_file'")
collection_formats = {}
path_params = {}
if "src_path" in params:
path_params["srcPath"] = params["src_path"]
query_params = []
if "dest_path" in params:
query_params.append(("destPath", params["dest_path"]))
if "src_storage_name" in params:
query_params.append(("srcStorageName", params["src_storage_name"]))
if "dest_storage_name" in params:
query_params.append(("destStorageName", params["dest_storage_name"]))
if "version_id" in params:
query_params.append(("versionId", params["version_id"]))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header "Accept"
header_params["Accept"] = self.api_client.select_header_accept(["application/json"])
# HTTP header "Content-Type"
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"])
# Authentication setting
auth_settings = ["JWT"]
return self.api_client.call_api(
"/barcode/storage/file/move/{srcPath}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def upload_file(self, path, file, storage_name=None, async_req=False, **kwargs):
"""Upload file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().upload_file(path, file, async_req=True)
>>> result = thread.get()
:param str path: Path where to upload including filename and extension e.g. /file.ext or /Folder 1/file.ext If the content is multipart and path does not contains the file name it tries to get them from filename parameter from Content-Disposition header. # noqa: E501
:param file file: File to upload # noqa: E501
:param str storage_name: Storage name # noqa: E501
:param async_req bool
:return: FilesUploadResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.upload_file_with_http_info(path, file, storage_name=storage_name, **kwargs)
else:
(data) = self.upload_file_with_http_info(path, file, storage_name=storage_name, **kwargs)
return data
def upload_file_with_http_info(self, path, file, **kwargs):
"""Upload file
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FileApi().upload_file_with_http_info(path, file, async_req=True)
>>> result = thread.get()
:param str path: Path where to upload including filename and extension e.g. /file.ext or /Folder 1/file.ext If the content is multipart and path does not contains the file name it tries to get them from filename parameter from Content-Disposition header. # noqa: E501
:param file file: File to upload # noqa: E501
:return: FilesUploadResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = {"path", "file", "storage_name"}
all_params.add("async_req")
all_params.add("_return_http_data_only")
all_params.add("_preload_content")
all_params.add("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'" " to method upload_file" % key)
if val is None:
continue
params[key] = val
del params["kwargs"]
# verify the required parameter "path" is set
if "path" not in params or params["path"] is None:
raise ValueError("Missing the required parameter 'path' when calling 'upload_file'")
# verify the required parameter "file" is set
if "file" not in params or params["file"] is None:
raise ValueError("Missing the required parameter 'file' when calling 'upload_file'")
collection_formats = {}
path_params = {}
if "path" in params:
path_params["path"] = params["path"]
query_params = []
if "storage_name" in params:
query_params.append(("storageName", params["storage_name"]))
header_params = {}
form_params = []
local_var_files = {}
if "file" in params:
local_var_files["File"] = params["file"]
body_params = None
# HTTP header "Accept"
header_params["Accept"] = self.api_client.select_header_accept(["application/json"])
# HTTP header "Content-Type"
header_params["Content-Type"] = self.api_client.select_header_content_type(["multipart/form-data"])
# Authentication setting
auth_settings = ["JWT"]
return self.api_client.call_api(
"/barcode/storage/file/{path}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="FilesUploadResult",
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
| 38.998395
| 277
| 0.619155
| 2,945
| 24,296
| 4.863497
| 0.085908
| 0.05376
| 0.019968
| 0.025134
| 0.890526
| 0.882846
| 0.88117
| 0.871954
| 0.868603
| 0.868603
| 0
| 0.005391
| 0.289924
| 24,296
| 622
| 278
| 39.061093
| 0.824832
| 0.312068
| 0
| 0.816619
| 0
| 0
| 0.18752
| 0.030977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031519
| false
| 0
| 0.011461
| 0
| 0.088825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
268674dcd08ab99216ccb4bf1e8d0e76dbdef2c4
| 2,461
|
py
|
Python
|
km_api/know_me/profile/tests/filters/test_profile_filter_backend.py
|
knowmetools/km-api
|
e4b72484c42e88a6c0087c9b1d5fef240e66cbb0
|
[
"Apache-2.0"
] | 4
|
2017-08-03T00:46:31.000Z
|
2018-11-06T03:32:32.000Z
|
km_api/know_me/profile/tests/filters/test_profile_filter_backend.py
|
knowmetools/km-api
|
e4b72484c42e88a6c0087c9b1d5fef240e66cbb0
|
[
"Apache-2.0"
] | 526
|
2017-06-27T18:13:59.000Z
|
2021-06-10T18:00:21.000Z
|
km_api/know_me/profile/tests/filters/test_profile_filter_backend.py
|
knowmetools/km-api
|
e4b72484c42e88a6c0087c9b1d5fef240e66cbb0
|
[
"Apache-2.0"
] | 1
|
2017-07-10T19:46:27.000Z
|
2017-07-10T19:46:27.000Z
|
from unittest import mock
from know_me.profile import filters, models
def test_filter_queryset_owner(api_rf, km_user_factory, profile_factory):
"""
All profiles should be included for requests by the owner.
"""
km_user = km_user_factory()
profile_factory(is_private=False, km_user=km_user)
profile_factory(is_private=True, km_user=km_user)
api_rf.user = km_user.user
request = api_rf.get("/")
view = mock.Mock(name="Mock View")
view.kwargs = {"pk": km_user.pk}
backend = filters.ProfileFilterBackend()
result = backend.filter_queryset(
request, models.Profile.objects.all(), view
)
expected = km_user.profiles.all()
assert list(result) == list(expected)
def test_filter_queryset_shared_admin(
api_rf, km_user_accessor_factory, km_user_factory, profile_factory
):
"""
If the shared user is an admin, they should be able to see private
profiles.
"""
km_user = km_user_factory()
accessor = km_user_accessor_factory(
is_accepted=True, is_admin=True, km_user=km_user
)
profile_factory(is_private=False, km_user=km_user)
profile_factory(is_private=True, km_user=km_user)
api_rf.user = accessor.user_with_access
request = api_rf.get("/")
view = mock.Mock(name="Mock View")
view.kwargs = {"pk": km_user.pk}
backend = filters.ProfileFilterBackend()
result = backend.filter_queryset(
request, models.Profile.objects.all(), view
)
expected = km_user.profiles.all()
assert list(result) == list(expected)
def test_filter_queryset_shared_non_admin(
api_rf, km_user_accessor_factory, km_user_factory, profile_factory
):
"""
If the shared user does not have admin permissions, private profiles
should not be included in the results.
"""
km_user = km_user_factory()
accessor = km_user_accessor_factory(
is_accepted=True, is_admin=False, km_user=km_user
)
profile_factory(is_private=False, km_user=km_user)
profile_factory(is_private=True, km_user=km_user)
api_rf.user = accessor.user_with_access
request = api_rf.get("/")
view = mock.Mock(name="Mock View")
view.kwargs = {"pk": km_user.pk}
backend = filters.ProfileFilterBackend()
result = backend.filter_queryset(
request, models.Profile.objects.all(), view
)
expected = km_user.profiles.filter(is_private=False)
assert list(result) == list(expected)
| 27.344444
| 73
| 0.702966
| 340
| 2,461
| 4.797059
| 0.188235
| 0.132434
| 0.073574
| 0.080932
| 0.823421
| 0.775598
| 0.775598
| 0.775598
| 0.775598
| 0.772532
| 0
| 0
| 0.195043
| 2,461
| 89
| 74
| 27.651685
| 0.823322
| 0.09874
| 0
| 0.740741
| 0
| 0
| 0.016636
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.055556
| false
| 0
| 0.037037
| 0
| 0.092593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cd1f573b247de22326a258791022ed41cce64b84
| 61,678
|
py
|
Python
|
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/pppoxclient.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/pppoxclient.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/pppoxclient.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Pppoxclient(Base):
"""The Pppoxclient class encapsulates a user managed pppoxclient node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Pppoxclient property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'pppoxclient'
def __init__(self, parent):
super(Pppoxclient, self).__init__(parent)
@property
def Bfdv4Interface(self):
"""An instance of the Bfdv4Interface class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface.Bfdv4Interface)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface import Bfdv4Interface
return Bfdv4Interface(self)
@property
def Bfdv6Interface(self):
"""An instance of the Bfdv6Interface class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bfdv6interface.Bfdv6Interface)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bfdv6interface import Bfdv6Interface
return Bfdv6Interface(self)
@property
def BgpIpv4Peer(self):
"""An instance of the BgpIpv4Peer class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv4peer.BgpIpv4Peer)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv4peer import BgpIpv4Peer
return BgpIpv4Peer(self)
@property
def BgpIpv6Peer(self):
"""An instance of the BgpIpv6Peer class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6peer.BgpIpv6Peer)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6peer import BgpIpv6Peer
return BgpIpv6Peer(self)
@property
def Connector(self):
"""An instance of the Connector class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector.Connector)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector import Connector
return Connector(self)
@property
def Dhcpv6client(self):
"""An instance of the Dhcpv6client class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6client.Dhcpv6client)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.dhcpv6client import Dhcpv6client
return Dhcpv6client(self)
@property
def Geneve(self):
"""An instance of the Geneve class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.geneve.Geneve)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.geneve import Geneve
return Geneve(self)
@property
def IgmpHost(self):
"""An instance of the IgmpHost class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.igmphost.IgmpHost)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.igmphost import IgmpHost
return IgmpHost(self)
@property
def IgmpQuerier(self):
"""An instance of the IgmpQuerier class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.igmpquerier.IgmpQuerier)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.igmpquerier import IgmpQuerier
return IgmpQuerier(self)
@property
def MldHost(self):
"""An instance of the MldHost class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mldhost.MldHost)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mldhost import MldHost
return MldHost(self)
@property
def MldQuerier(self):
"""An instance of the MldQuerier class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mldquerier.MldQuerier)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mldquerier import MldQuerier
return MldQuerier(self)
@property
def MplsOam(self):
"""An instance of the MplsOam class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplsoam.MplsOam)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mplsoam import MplsOam
return MplsOam(self)
@property
def NetconfClient(self):
"""An instance of the NetconfClient class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.netconfclient.NetconfClient)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.netconfclient import NetconfClient
return NetconfClient(self)
@property
def NetconfServer(self):
"""An instance of the NetconfServer class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.netconfserver.NetconfServer)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.netconfserver import NetconfServer
return NetconfServer(self)
@property
def Ospfv2(self):
"""An instance of the Ospfv2 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfv2.Ospfv2)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfv2 import Ospfv2
return Ospfv2(self)
@property
def Ospfv3(self):
"""An instance of the Ospfv3 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfv3.Ospfv3)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfv3 import Ospfv3
return Ospfv3(self)
@property
def Pcc(self):
"""An instance of the Pcc class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pcc.Pcc)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pcc import Pcc
return Pcc(self)
@property
def Pce(self):
"""An instance of the Pce class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pce.Pce)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pce import Pce
return Pce(self)
@property
def PimV4Interface(self):
"""An instance of the PimV4Interface class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pimv4interface.PimV4Interface)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pimv4interface import PimV4Interface
return PimV4Interface(self)
@property
def PimV6Interface(self):
"""An instance of the PimV6Interface class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pimv6interface.PimV6Interface)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pimv6interface import PimV6Interface
return PimV6Interface(self)
@property
def Tag(self):
"""An instance of the Tag class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag.Tag)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag import Tag
return Tag(self)
@property
def Vxlan(self):
"""An instance of the Vxlan class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.vxlan.Vxlan)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.vxlan import Vxlan
return Vxlan(self)
@property
def AcMatchMac(self):
"""?
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('acMatchMac')
@property
def AcMatchName(self):
"""?
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('acMatchName')
@property
def AcOptions(self):
"""Indicates PPPoE AC retrieval mode
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('acOptions')
@property
def ActualRateDownstream(self):
"""This parameter specifies the value to be included in the vendor specific PPPoE tag. It is the actual downstream data rate (sub-option 0x81), in kbps.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('actualRateDownstream')
@property
def ActualRateUpstream(self):
"""This parameter specifies the value to be included in the vendor specific PPPoE tag. It is the actual upstream data rate (sub-option 0x82), in kbps.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('actualRateUpstream')
@property
def AgentAccessAggregationCircuitId(self):
"""The value to be inserted into the Agent Access-Aggregation-Circuit-ID-ASCII-Value field of the PPPoX tag.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('agentAccessAggregationCircuitId')
@property
def AgentCircuitId(self):
"""The value to be inserted into the Agent Circuit ID field of the PPPoX tag.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('agentCircuitId')
@property
def AgentRemoteId(self):
"""The value to be inserted into the Agent Remote ID field of the PPPoX tag.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('agentRemoteId')
@property
def AuthRetries(self):
"""Number of PPP authentication retries
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('authRetries')
@property
def AuthTimeout(self):
"""Timeout for PPP authentication, in seconds.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('authTimeout')
@property
def AuthType(self):
"""The authentication type to use during link setup.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('authType')
@property
def ChapName(self):
"""User name when CHAP Authentication is being used
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('chapName')
@property
def ChapSecret(self):
"""Secret when CHAP Authentication is being used
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('chapSecret')
@property
def ClientDnsOptions(self):
"""The client DNS options.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientDnsOptions')
@property
def ClientLocalIp(self):
"""The requested IPv4 address.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientLocalIp')
@property
def ClientLocalIpv6Iid(self):
"""The requested IPv6 Interface Identifier (IID).
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientLocalIpv6Iid')
@property
def ClientNcpOptions(self):
"""The NCP configuration mode for IPv4 addressing.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientNcpOptions')
@property
def ClientNetmask(self):
"""The netmask that the client will use with the assigned IP address.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientNetmask')
@property
def ClientNetmaskOptions(self):
"""The client netmask option.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientNetmaskOptions')
@property
def ClientPrimaryDnsAddress(self):
"""This is the primary DNS server address that the client requests from the server when the value of the Client DNS Options field is set to 'Request Primary only' or 'Request Primary and Secondary'.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientPrimaryDnsAddress')
@property
def ClientSecondaryDnsAddress(self):
"""This is the secondary DNS server address that the client requests from the server when the value of the Client DNS Options field is set to 'Request Primary and Secondary'.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientSecondaryDnsAddress')
@property
def ClientSignalIWF(self):
"""This parameter enables or disables the insertion of sub-option 0xFE (signaling of interworked sessions) into the DSL tag in PADI and PADR packets.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientSignalIWF')
@property
def ClientSignalLoopChar(self):
"""This parameter enables or disables the insertion of sub-options 0x81 and 0x82 into the DSL tag in PADI and PADR packets.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientSignalLoopChar')
@property
def ClientSignalLoopEncapsulation(self):
"""This parameter enables or disables the insertion of sub-option 0x90 into the DSL tag in PADI and PADR packets.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientSignalLoopEncapsulation')
@property
def ClientSignalLoopId(self):
"""This parameter enables or disables the insertion of sub-options 0x01 , 0x02, 0x03 (Remote ID,Circuit ID and Access Aggregation Circuit ID) into the DSL tag in PADI and PADR packets.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientSignalLoopId')
@property
def ClientV6NcpOptions(self):
"""The NCP configuration mode for IPv6 addressing.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientV6NcpOptions')
@property
def ClientWinsOptions(self):
"""Specifies the mode in which WINS host addresses are configured.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientWinsOptions')
@property
def ClientWinsPrimaryAddress(self):
"""Specifies the primary WINS address.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientWinsPrimaryAddress')
@property
def ClientWinsSecondaryAddress(self):
"""Specifies the secondary WINS address.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('clientWinsSecondaryAddress')
@property
def ConnectedVia(self):
"""List of layers this layer used to connect to the wire
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('connectedVia')
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute('connectedVia', value)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def DataLink(self):
"""A one-byte field included with sub-option 0x90.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('dataLink')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def DiscoveredIpv4Addresses(self):
"""The discovered IPv4 addresses.
Returns:
list(str)
"""
return self._get_attribute('discoveredIpv4Addresses')
@property
def DiscoveredIpv6Addresses(self):
"""The discovered IPv6 addresses.
Returns:
list(str)
"""
return self._get_attribute('discoveredIpv6Addresses')
@property
def DiscoveredMacs(self):
"""The discovered remote MAC address.
Returns:
list(str)
"""
return self._get_attribute('discoveredMacs')
@property
def DiscoveredRemoteSessionIds(self):
"""Remote session ID.
Returns:
list(number)
"""
return self._get_attribute('discoveredRemoteSessionIds')
@property
def DiscoveredRemoteTunnelIds(self):
"""Remote tunnel ID.
Returns:
list(number)
"""
return self._get_attribute('discoveredRemoteTunnelIds')
@property
def DiscoveredSessionIds(self):
"""The negotiated session ID.
Returns:
list(number)
"""
return self._get_attribute('discoveredSessionIds')
@property
def DiscoveredTunnelIPs(self):
"""The discovered remote tunnel IP.
Returns:
list(str)
"""
return self._get_attribute('discoveredTunnelIPs')
@property
def DiscoveredTunnelIds(self):
"""The negotiated tunnel ID.
Returns:
list(number)
"""
return self._get_attribute('discoveredTunnelIds')
@property
def DomainList(self):
"""Configure domain group settings
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('domainList')
@property
def DslTypeTlv(self):
"""DSL Type to be advertised in PPPoE VSA Tag. For undefined DSL type user has to select User-defined DSL Type.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('dslTypeTlv')
@property
def EchoReqInterval(self):
"""Keep alive interval, in seconds
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('echoReqInterval')
@property
def EnableDomainGroups(self):
"""Enable domain groups
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableDomainGroups')
@property
def EnableEchoReq(self):
"""?
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableEchoReq')
@property
def EnableEchoRsp(self):
"""?
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableEchoRsp')
@property
def EnableHostUniq(self):
"""Enables PPPoE Host-Uniq tag
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableHostUniq')
@property
def EnableMaxPayload(self):
"""Enables PPPoE Max Payload tag
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableMaxPayload')
@property
def EnableRedial(self):
"""If checked, PPPoE redial is enabled
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableRedial')
@property
def Encaps1(self):
"""A one-byte field included with sub-option 0x90.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('encaps1')
@property
def Encaps2(self):
"""A one-byte field included with sub-option 0x90.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('encaps2')
@property
def Errors(self):
"""A list of errors that have occurred
Returns:
list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))
"""
return self._get_attribute('errors')
@property
def HostUniq(self):
"""Indicates Host-Uniq Tag
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('hostUniq')
@property
def HostUniqLength(self):
"""Host-Uniq Length, in bytes
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('hostUniqLength')
@property
def LcpAccm(self):
"""Async-Control-Character-Map
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('lcpAccm')
@property
def LcpEnableAccm(self):
"""Enable Async-Control-Character-Map
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('lcpEnableAccm')
@property
def LcpMaxFailure(self):
"""Number of Configure-Nak packets sent without sending a Configure-Ack before assuming that configuration is not converging. Any further Configure-Nak packets for peer requested options are converted to Configure-Reject packets
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('lcpMaxFailure')
@property
def LcpRetries(self):
"""Number of LCP retries
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('lcpRetries')
@property
def LcpStartDelay(self):
"""Delay time in milliseconds to wait before sending LCP Config Request packet
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('lcpStartDelay')
@property
def LcpTermRetries(self):
"""Number of LCP Termination Retries
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('lcpTermRetries')
@property
def LcpTimeout(self):
"""Timeout for LCP phase, in seconds
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('lcpTimeout')
@property
def MaxPayload(self):
"""Max Payload
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('maxPayload')
@property
def MruNegotiation(self):
"""Enable MRU Negotiation
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('mruNegotiation')
@property
def Mtu(self):
"""Max Transmit Unit for PPP
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('mtu')
@property
def Multiplier(self):
"""Number of layer instances per parent instance (multiplier)
Returns:
number
"""
return self._get_attribute('multiplier')
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute('multiplier', value)
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def NcpRetries(self):
"""Number of NCP retries
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ncpRetries')
@property
def NcpTimeout(self):
"""Timeout for NCP phase, in seconds
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ncpTimeout')
@property
def NcpType(self):
"""IP address type (IPv4 or IPv6) for Network Control Protocol
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ncpType')
@property
def PadiRetries(self):
"""Number of PADI Retries
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('padiRetries')
@property
def PadiTimeout(self):
"""Timeout for PADI no response, in seconds
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('padiTimeout')
@property
def PadrRetries(self):
"""Number of PADR Retries
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('padrRetries')
@property
def PadrTimeout(self):
"""Timeout for PADR no response, in seconds
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('padrTimeout')
@property
def PapPassword(self):
"""Password when PAP Authentication is being used
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('papPassword')
@property
def PapUser(self):
"""User name when PAP Authentication is being used
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('papUser')
@property
def PonTypeTlv(self):
"""PON Type to be advertised in PPPoE VSA Tag. For undefined PON type user has to select User-defined PON Type.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ponTypeTlv')
@property
def RedialMax(self):
"""Maximum number of PPPoE redials
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('redialMax')
@property
def RedialTimeout(self):
"""PPPoE redial timeout, in seconds
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('redialTimeout')
@property
def ServiceName(self):
"""Access Concentrator Service Name - this option is only available for PPP servers.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('serviceName')
@property
def ServiceOptions(self):
"""Indicates PPPoE service retrieval mode
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('serviceOptions')
@property
def SessionInfo(self):
"""Logs additional information about the session state
Returns:
list(str[cLS_CFG_REJ_AUTH|cLS_CHAP_PEER_DET_FAIL|cLS_CHAP_PEER_RESP_BAD|cLS_CODE_REJ_IPCP|cLS_CODE_REJ_IPV6CP|cLS_CODE_REJ_LCP|cLS_ERR_PPP_NO_BUF|cLS_ERR_PPP_SEND_PKT|cLS_LINK_DISABLE|cLS_LOC_IPADDR_BROADCAST|cLS_LOC_IPADDR_CLASS_E|cLS_LOC_IPADDR_INVAL_ACKS_0|cLS_LOC_IPADDR_INVAL_ACKS_DIFF|cLS_LOC_IPADDR_LOOPBACK|cLS_LOC_IPADDR_PEER_MATCH_LOC|cLS_LOC_IPADDR_PEER_NO_GIVE|cLS_LOC_IPADDR_PEER_NO_HELP|cLS_LOC_IPADDR_PEER_NO_TAKE|cLS_LOC_IPADDR_PEER_REJ|cLS_LOOPBACK_DETECT|cLS_NO_NCP|cLS_NONE|cLS_PAP_BAD_PASSWD|cLS_PEER_DISCONNECTED|cLS_PEER_DISCONNECTED_NEGO|cLS_PEER_IPADDR_MATCH_LOC|cLS_PEER_IPADDR_PEER_NO_SET|cLS_PPOE_AC_SYSTEM_ERROR|cLS_PPOE_GENERIC_ERROR|cLS_PPP_DISABLE|cLS_PPPOE_NO_HOST_UNIQ|cLS_PPPOE_PADI_TIMEOUT|cLS_PPPOE_PADO_TIMEOUT|cLS_PPPOE_PADR_TIMEOUT|cLS_PROTO_REJ_IPCP|cLS_PROTO_REJ_IPv6CP|cLS_TIMEOUT_CHAP_CHAL|cLS_TIMEOUT_CHAP_RESP|cLS_TIMEOUT_IPCP_CFG_REQ|cLS_TIMEOUT_IPV6CP_CFG_REQ|cLS_TIMEOUT_IPV6CP_RA|cLS_TIMEOUT_LCP_CFG_REQ|cLS_TIMEOUT_LCP_ECHO_REQ|cLS_TIMEOUT_PAP_AUTH_REQ|cLS_TUN_AUTH_FAILED|cLS_TUN_NO_RESOURCES|cLS_TUN_TIMEOUT_ICRQ|cLS_TUN_TIMEOUT_SCCRQ|cLS_TUN_VENDOR_SPECIFIC_ERR])
"""
return self._get_attribute('sessionInfo')
@property
def SessionStatus(self):
"""Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
Returns:
list(str[down|notStarted|up])
"""
return self._get_attribute('sessionStatus')
@property
def StackedLayers(self):
"""List of secondary (many to one) child layer protocols
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('stackedLayers')
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute('stackedLayers', value)
@property
def StateCounts(self):
"""A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Returns:
dict(total:number,notStarted:number,down:number,up:number)
"""
return self._get_attribute('stateCounts')
@property
def Status(self):
"""Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
str(configured|error|mixed|notStarted|started|starting|stopping)
"""
return self._get_attribute('status')
@property
def UnlimitedRedialAttempts(self):
"""If checked, PPPoE unlimited redial attempts is enabled
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('unlimitedRedialAttempts')
@property
def UserDefinedDslType(self):
"""User Defined DSL-Type Value.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('userDefinedDslType')
@property
def UserDefinedPonType(self):
"""User Defined PON-Type Value.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('userDefinedPonType')
def add(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Adds a new pppoxclient node on the server and retrieves it in this instance.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
Returns:
self: This instance with all currently retrieved pppoxclient data using find and the newly added pppoxclient data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the pppoxclient data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, DiscoveredIpv4Addresses=None, DiscoveredIpv6Addresses=None, DiscoveredMacs=None, DiscoveredRemoteSessionIds=None, DiscoveredRemoteTunnelIds=None, DiscoveredSessionIds=None, DiscoveredTunnelIPs=None, DiscoveredTunnelIds=None, Errors=None, Multiplier=None, Name=None, SessionInfo=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves pppoxclient data from the server.
All named parameters support regex and can be used to selectively retrieve pppoxclient data from the server.
By default the find method takes no parameters and will retrieve all pppoxclient data from the server.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
DiscoveredIpv4Addresses (list(str)): The discovered IPv4 addresses.
DiscoveredIpv6Addresses (list(str)): The discovered IPv6 addresses.
DiscoveredMacs (list(str)): The discovered remote MAC address.
DiscoveredRemoteSessionIds (list(number)): Remote session ID.
DiscoveredRemoteTunnelIds (list(number)): Remote tunnel ID.
DiscoveredSessionIds (list(number)): The negotiated session ID.
DiscoveredTunnelIPs (list(str)): The discovered remote tunnel IP.
DiscoveredTunnelIds (list(number)): The negotiated tunnel ID.
Errors (list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))): A list of errors that have occurred
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
SessionInfo (list(str[cLS_CFG_REJ_AUTH|cLS_CHAP_PEER_DET_FAIL|cLS_CHAP_PEER_RESP_BAD|cLS_CODE_REJ_IPCP|cLS_CODE_REJ_IPV6CP|cLS_CODE_REJ_LCP|cLS_ERR_PPP_NO_BUF|cLS_ERR_PPP_SEND_PKT|cLS_LINK_DISABLE|cLS_LOC_IPADDR_BROADCAST|cLS_LOC_IPADDR_CLASS_E|cLS_LOC_IPADDR_INVAL_ACKS_0|cLS_LOC_IPADDR_INVAL_ACKS_DIFF|cLS_LOC_IPADDR_LOOPBACK|cLS_LOC_IPADDR_PEER_MATCH_LOC|cLS_LOC_IPADDR_PEER_NO_GIVE|cLS_LOC_IPADDR_PEER_NO_HELP|cLS_LOC_IPADDR_PEER_NO_TAKE|cLS_LOC_IPADDR_PEER_REJ|cLS_LOOPBACK_DETECT|cLS_NO_NCP|cLS_NONE|cLS_PAP_BAD_PASSWD|cLS_PEER_DISCONNECTED|cLS_PEER_DISCONNECTED_NEGO|cLS_PEER_IPADDR_MATCH_LOC|cLS_PEER_IPADDR_PEER_NO_SET|cLS_PPOE_AC_SYSTEM_ERROR|cLS_PPOE_GENERIC_ERROR|cLS_PPP_DISABLE|cLS_PPPOE_NO_HOST_UNIQ|cLS_PPPOE_PADI_TIMEOUT|cLS_PPPOE_PADO_TIMEOUT|cLS_PPPOE_PADR_TIMEOUT|cLS_PROTO_REJ_IPCP|cLS_PROTO_REJ_IPv6CP|cLS_TIMEOUT_CHAP_CHAL|cLS_TIMEOUT_CHAP_RESP|cLS_TIMEOUT_IPCP_CFG_REQ|cLS_TIMEOUT_IPV6CP_CFG_REQ|cLS_TIMEOUT_IPV6CP_RA|cLS_TIMEOUT_LCP_CFG_REQ|cLS_TIMEOUT_LCP_ECHO_REQ|cLS_TIMEOUT_PAP_AUTH_REQ|cLS_TUN_AUTH_FAILED|cLS_TUN_NO_RESOURCES|cLS_TUN_TIMEOUT_ICRQ|cLS_TUN_TIMEOUT_SCCRQ|cLS_TUN_VENDOR_SPECIFIC_ERR])): Logs additional information about the session state
SessionStatus (list(str[down|notStarted|up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Status (str(configured|error|mixed|notStarted|started|starting|stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
self: This instance with matching pppoxclient data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of pppoxclient data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the pppoxclient data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CloseIpcp(self):
"""Executes the closeIpcp operation on the server.
Close IPCP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('CloseIpcp', payload=locals(), response_object=None)
def CloseIpcp(self, SessionIndices):
"""Executes the closeIpcp operation on the server.
Close IPCP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('CloseIpcp', payload=locals(), response_object=None)
def CloseIpcp(self, SessionIndices):
"""Executes the closeIpcp operation on the server.
Close IPCP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('CloseIpcp', payload=locals(), response_object=None)
def CloseIpv6cp(self):
"""Executes the closeIpv6cp operation on the server.
Close IPv6CP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('CloseIpv6cp', payload=locals(), response_object=None)
def CloseIpv6cp(self, SessionIndices):
"""Executes the closeIpv6cp operation on the server.
Close IPv6CP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('CloseIpv6cp', payload=locals(), response_object=None)
def CloseIpv6cp(self, SessionIndices):
"""Executes the closeIpv6cp operation on the server.
Close IPv6CP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('CloseIpv6cp', payload=locals(), response_object=None)
def OpenIpcp(self):
"""Executes the openIpcp operation on the server.
Open IPCP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('OpenIpcp', payload=locals(), response_object=None)
def OpenIpcp(self, SessionIndices):
"""Executes the openIpcp operation on the server.
Open IPCP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('OpenIpcp', payload=locals(), response_object=None)
def OpenIpcp(self, SessionIndices):
"""Executes the openIpcp operation on the server.
Open IPCP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('OpenIpcp', payload=locals(), response_object=None)
def OpenIpv6cp(self):
"""Executes the openIpv6cp operation on the server.
Open IPv6CP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('OpenIpv6cp', payload=locals(), response_object=None)
def OpenIpv6cp(self, SessionIndices):
"""Executes the openIpv6cp operation on the server.
Open IPv6CP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('OpenIpv6cp', payload=locals(), response_object=None)
def OpenIpv6cp(self, SessionIndices):
"""Executes the openIpv6cp operation on the server.
Open IPv6CP for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('OpenIpv6cp', payload=locals(), response_object=None)
def RestartDown(self):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def SendPing(self, DestIp):
"""Executes the sendPing operation on the server.
Send Ping IPv4 for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
DestIp (str): This parameter requires a destIp of type kString
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing', payload=locals(), response_object=None)
def SendPing(self, DestIp, SessionIndices):
"""Executes the sendPing operation on the server.
Send Ping IPv4 for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
DestIp (str): This parameter requires a destIp of type kString
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing', payload=locals(), response_object=None)
def SendPing(self, SessionIndices, DestIp):
"""Executes the sendPing operation on the server.
Send Ping IPv4 for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (str): This parameter requires a destIp of type kString
DestIp (str): This parameter requires a string of session numbers 1-4;6;7-12
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing', payload=locals(), response_object=None)
def SendPing6(self, DestIp):
"""Executes the sendPing6 operation on the server.
Send Ping IPv6 for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
DestIp (str): This parameter requires a destIp of type kString
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing6', payload=locals(), response_object=None)
def SendPing6(self, DestIp, SessionIndices):
"""Executes the sendPing6 operation on the server.
Send Ping IPv6 for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
DestIp (str): This parameter requires a destIp of type kString
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing6', payload=locals(), response_object=None)
def SendPing6(self, SessionIndices, DestIp):
"""Executes the sendPing6 operation on the server.
Send Ping IPv6 for selected PPPoX items.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
SessionIndices (str): This parameter requires a destIp of type kString
DestIp (str): This parameter requires a string of session numbers 1-4;6;7-12
Returns:
list(dict(port:str[None|/api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendPing6', payload=locals(), response_object=None)
def Start(self):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
| 35.901048
| 1,187
| 0.749392
| 7,956
| 61,678
| 5.720588
| 0.078054
| 0.027882
| 0.038407
| 0.050535
| 0.784347
| 0.763298
| 0.757234
| 0.742293
| 0.735175
| 0.693274
| 0
| 0.009022
| 0.171504
| 61,678
| 1,717
| 1,188
| 35.921957
| 0.881644
| 0.709556
| 0
| 0.387446
| 0
| 0
| 0.093925
| 0.017269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.318182
| false
| 0.004329
| 0.051948
| 0
| 0.679654
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
26c512fdbb0b4d15fe8aa1267d2ef097a60a7aef
| 21,888
|
py
|
Python
|
Tracklete_Analysis_nogui.py
|
PaulHofma/tracklete-analysis
|
46ee50d9aba11992d321e33cd23a5a44b044a3cb
|
[
"BSD-3-Clause"
] | 1
|
2019-01-14T09:46:21.000Z
|
2019-01-14T09:46:21.000Z
|
Tracklete_Analysis_nogui.py
|
PaulHofma/tracklete-analysis
|
46ee50d9aba11992d321e33cd23a5a44b044a3cb
|
[
"BSD-3-Clause"
] | 3
|
2018-12-20T11:35:31.000Z
|
2019-01-28T11:12:06.000Z
|
Tracklete_Analysis_nogui.py
|
PaulHofma/tracklete-analysis
|
46ee50d9aba11992d321e33cd23a5a44b044a3cb
|
[
"BSD-3-Clause"
] | null | null | null |
<<<<<<< HEAD:Tracklete_Analysis_nogui.py
'''
Created on 19 Nov 2018
@author: Paul Hofma
@version: 1.0.1 (GUILESS VERSION)
@disclaimer:
You're advised to take any and all trends it reproduces with a good dose of salt and
common sense. These are simply the trends visible in the data, and while the graph extrapolates for
another 2 weeks by default, these results are (obviously) not guaranteed. In addition, 1st and 2nd order
interpolations are by their nature rather simplistic, and especially e.g. weight graph may display
something more like exponential or slightly sinusoidal behaviour; none of this will be modelled
(though perhaps it may be in a future version).
TL;DR - human fysiology is complicated. These graphs are simple. Draw your conclusions with some caution.
'''
##################
## INSTRUCTIONS ##
##################
"""
Current version takes in a csv file with Tracklete bodystats, and returns (and saves) graphs for weight,
heartrate, and mood. Also draws trend lines in all of these, both 1st (linear) and second (quadratic) order.
HOW TO USE:
1. Export bodystats as excel (either individual or group) from Tracklete.io
2. Set parameters in OPTIONAL PARAMS below as desired.
3. Run script by dragging bodystat-excel onto the program.
"""
######################################################
## OPTIONAL PARAMS - SET THESE YOURSELF IF YOU WANT ##
######################################################
"""
Want to see the trend developed for longer/shorter? As implied in the name, measured in days.
Shorter/longer may be appropriate, especially if you have either few/a lot of data.
"""
additional_days = 7
"""
Want to only use the last N days of data? Default is use all available data; leave 0 in this case.
Useful if you just want to analyse recent vs global trends.
Note that results with fewer data will give less reliable results.
"""
N_days_used = 30
"""
Which plots do you want to see?
Set to False to disable plot, or True to enable plot.
Default plots weight, heartrate, and rating (how they're feeling), and omits hours slept.
Note that more than three graphs will likely get a bit crowded (depending on your monitor).
"""
PLOT_WEIGHT = True
PLOT_HEARTRATE = True
PLOT_RATING = True
PLOT_SLEEP = False
"""
Do you want additional weight lines? Mostly useful for lightweight coaches.
If so, enter average weights (summer) below, and set 'WEIGHT_LINES' to True.
Plots will then also draw lines (when relevant) for:
- max weight (winter)
- max weight (summer)
- avg weight (summer)
"""
WEIGHT_LINES = True
AVG_WEIGHT = 57
##########################
## ##
## -- SETUP -- ##
## ##
##########################
print("Starting Tracklete Analysis (no GUI version)")
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
import sys
TESTING = True
if not TESTING:
assert len(sys.argv) > 1, "You did not provide a file to run."
excel_file_name = sys.argv[1]
assert excel_file_name[-4:]==('xlsx' or 'xls'), "You did not provide an Excel file."
else:
excel_file_name = 'download.xlsx'
# excel_file_name = 'download singular.xlsx'
# excel_file_name = 'download (2).xlsx'
bodystats_db = pd.ExcelFile(excel_file_name)
if len(bodystats_db.sheet_names) == 1:
# if singular bodystat export
name_list = bodystats_db.sheet_names
else:
# if team export, first 2 sheets contain attendance/ergo
name_list = bodystats_db.sheet_names[2:]
if TESTING: print(name_list)
if WEIGHT_LINES:
assert(AVG_WEIGHT == 57 or AVG_WEIGHT == 70), "The average athlete weight provided doesn't match either 57 or 70 kg."
if AVG_WEIGHT == 57:
MAX_WEIGHT = AVG_WEIGHT + 2.0
WINT_MAX = MAX_WEIGHT + 2.5
elif AVG_WEIGHT == 70:
MAX_WEIGHT = AVG_WEIGHT + 2.5
WINT_MAX = MAX_WEIGHT + 2.5
################
## PLOT STUFF ##
################
PLOTS = [PLOT_WEIGHT, PLOT_HEARTRATE, PLOT_RATING, PLOT_SLEEP]
assert sum(PLOTS) != 0, "You have not enabled any plots! See Optional Parameters."
for name in name_list:
""" For each in name in name_list, make a plot """
print("Creating plots for {}".format(name))
fig, axes = plt.subplots(sum(PLOTS),1,sharex=True,figsize=(12,12))
if sum(PLOTS) == 1:
axes = [axes]
input_file = bodystats_db.parse(name)
if N_days_used != 0:
""" Optional Param: N_days_used implementation. """
len(input_file)
if TESTING: print("using input_file[{}:]".format(N_days_used))
if N_days_used <= len(input_file):
# print((len(input_file)-N_days_used), len(input_file))
input_file = input_file[:N_days_used]
# print(input_file)
else:
print("Data for athlete {} has fewer than N_days_used ({}) data entries, using all available data ({}) instead.".format(name, N_days_used, len(input_file)))
plot_counter = 0
if PLOTS[0]:
#################
## WEIGHT PLOT ##
#################
# print input_file
input_file['Date'] = pd.to_datetime(input_file['Date'], dayfirst=True)
x, y = input_file['Date'], input_file['Weight [kg]']
if y.isnull().all() == False:
x = mdates.date2num(x)
idx = np.isfinite(x) & np.isfinite(y)
axes[plot_counter].plot(x[idx],y[idx],label="data")
xx = np.linspace(x.min(), x.max()+additional_days, 100)
dd = mdates.num2date(xx)
z1 = np.polyfit(x[idx], y[idx], 1)
p1 = np.poly1d(z1)
axes[plot_counter].plot(dd,p1(xx),"r--",label=r"Trend, 1st order [$\Delta$={}{:.2}/w]".format(("+" if z1[0]>0 else ""), z1[0]*7))
z2 = np.polyfit(x[idx], y[idx], 2)
p2 = np.poly1d(z2)
axes[plot_counter].plot(dd,p2(xx),"g--",lw=0.8,label="Trend, 2nd order".format("a"))
if WEIGHT_LINES:
""" Weight lines implementation. """
w = input_file['Weight [kg]']
if np.max(w) >= WINT_MAX-1.:
axes[plot_counter].axhline(WINT_MAX,color="black",lw=0.8, ls=':')
axes[plot_counter].axhline(MAX_WEIGHT,color="black",ls="--",lw=0.8)
if np.min(w) < MAX_WEIGHT-1.:
axes[plot_counter].axhline(57.0,color="black",ls="-.",lw=0.8)
axes[plot_counter].legend()
axes[plot_counter].set(ylabel="Weight [kg]",xlim=(x.min(), x.max()+additional_days))
else:
print("WARNING: No weight data found for athlete {}.\nThis plot will be empty.".format(name))
plot_counter+=1
if PLOTS[1]:
####################
## HEARTRATE PLOT ##
####################
x, y = input_file['Date'], input_file['Heartrate [bpm]']
if y.isnull().all() == False:
x = mdates.date2num(x)
idx = np.isfinite(x) & np.isfinite(y)
axes[plot_counter].plot(x[idx],y[idx],label="data")
xx = np.linspace(x.min(), x.max()+additional_days, 100)
dd = mdates.num2date(xx)
z1 = np.polyfit(x[idx], y[idx], 1)
p1 = np.poly1d(z1)
axes[plot_counter].plot(dd,p1(xx),"r--",label="trend, 1st order")
z2 = np.polyfit(x[idx], y[idx], 2)
p2 = np.poly1d(z2)
axes[plot_counter].plot(dd,p2(xx),"g--",lw=0.8,label="trend, 2nd order")
axes[plot_counter].axhline(np.mean(input_file['Heartrate [bpm]']),label="avg",color="black",lw=0.8,ls="-.")
axes[plot_counter].legend(ncol=4)
axes[plot_counter].set(ylabel="Heartrate [bpm]",xlim=(x.min(), x.max()+additional_days))
else:
print("WARNING: No heartrate data found for athlete {}\nThis plot will be empty.".format(name))
plot_counter+=1
if PLOTS[2]:
################
## FEELY PLOT ##
################
x, y = input_file['Date'], input_file['Rating [1:10]']
if y.isnull().all() == False:
x = mdates.date2num(x)
idx = np.isfinite(x) & np.isfinite(y)
axes[plot_counter].plot(x[idx],y[idx],label="data")
xx = np.linspace(x.min(), x.max()+additional_days, 100)
dd = mdates.num2date(xx)
z1 = np.polyfit(x[idx], y[idx], 1)
p1 = np.poly1d(z1)
axes[plot_counter].plot(dd,p1(xx),"r--",label="trend, 1st order")
z2 = np.polyfit(x[idx], y[idx], 2)
p2 = np.poly1d(z2)
axes[plot_counter].plot(dd,p2(xx),"g--",lw=0.8,label="trend, 2nd order")
axes[plot_counter].axhline(np.mean(input_file['Rating [1:10]']),label="avg", color="black",lw=0.8,ls="-.")
axes[plot_counter].legend(ncol=4)
axes[plot_counter].set(ylabel="Rating [1:10]",ylim=(-0.5,10.5),xlim=(x.min(), x.max()+additional_days))
else:
print("WARNING: No rating data found for athlete {}\nThis plot will be empty.".format(name))
plot_counter+=1
if PLOTS[3]:
################
## SLEEP PLOT ##
################
x, y = input_file['Date'], input_file['Sleep [h]']
if y.isnull().all() == False:
x = mdates.date2num(x)
idx = np.isfinite(x) & np.isfinite(y)
axes[plot_counter].plot(x[idx],y[idx],label="data")
xx = np.linspace(x.min(), x.max()+additional_days, 100)
dd = mdates.num2date(xx)
z1 = np.polyfit(x[idx], y[idx], 1)
p1 = np.poly1d(z1)
axes[plot_counter].plot(dd,p1(xx),"r--",label="trend, 1st order")
z2 = np.polyfit(x[idx], y[idx], 2)
p2 = np.poly1d(z2)
axes[plot_counter].plot(dd,p2(xx),"g--",lw=0.8,label="trend, 2nd order")
axes[plot_counter].axhline(np.mean(input_file['Sleep [h]']),label="avg",color="black",lw=0.8,ls="-.")
axes[plot_counter].legend(ncol=4)
axes[plot_counter].set(ylabel="Sleep [h]",xlim=(x.min(), x.max()+additional_days))
else:
print("WARNING: No sleep data found for athlete {}\nThis plot will be empty.".format(name))
plot_counter+=1
#######################
## PLOTTING BUSINESS ##
#######################
axes[-1].xaxis.set_major_formatter(mdates.DateFormatter("%d-%m-%Y"))
axes[-1].set_xlabel("Date")
axes[0].set_title("{}: Trends".format(name))
fig.savefig("Tracklete_Trends_{}.png".format(name))
if TESTING: plt.show()
input("Finished! Press enter to close.")
=======
'''
Created on 19 Nov 2018
@author: Paul Hofma
@version: 1.0.1 (GUILESS VERSION)
@disclaimer:
You're advised to take any and all trends it reproduces with a good dose of salt and
common sense. These are simply the trends visible in the data, and while the graph extrapolates for
another 2 weeks by default, these results are (obviously) not guaranteed. In addition, 1st and 2nd order
interpolations are by their nature rather simplistic, and especially e.g. weight graph may display
something more like exponential or slightly sinusoidal behaviour; none of this will be modelled
(though perhaps it may be in a future version).
TL;DR - human fysiology is complicated. These graphs are simple. Draw your conclusions with some caution.
'''
##################
## INSTRUCTIONS ##
##################
"""
Current version takes in a csv file with Tracklete bodystats, and returns (and saves) graphs for weight,
heartrate, and mood. Also draws trend lines in all of these, both 1st (linear) and second (quadratic) order.
HOW TO USE:
1. Export bodystats as excel (either individual or group) from Tracklete.io
2. Set parameters in OPTIONAL PARAMS below as desired.
3. Run script by dragging bodystat-excel onto the program.
"""
######################################################
## OPTIONAL PARAMS - SET THESE YOURSELF IF YOU WANT ##
######################################################
"""
Want to see the trend developed for longer/shorter? As implied in the name, measured in days.
Shorter/longer may be appropriate, especially if you have either few/a lot of data.
"""
additional_days = 7
"""
Want to only use the last N days of data? Default is use all available data; leave 0 in this case.
Useful if you just want to analyse recent vs global trends.
Note that results with fewer data will give less reliable results.
"""
N_days_used = 30
"""
Which plots do you want to see?
Set to False to disable plot, or True to enable plot.
Default plots weight, heartrate, and rating (how they're feeling), and omits hours slept.
Note that more than three graphs will likely get a bit crowded (depending on your monitor).
"""
PLOT_WEIGHT = True
PLOT_HEARTRATE = True
PLOT_RATING = True
PLOT_SLEEP = False
"""
Do you want additional weight lines? Mostly useful for lightweight coaches.
If so, enter average weights (summer) below, and set 'WEIGHT_LINES' to True.
Plots will then get lines (when relevant) for:
- max weight (winter)
- max weight (summer)
- avg weight (summer)
"""
WEIGHT_LINES = True
AVG_WEIGHT = 57
##########################
## ##
## -- SETUP -- ##
## ##
##########################
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
import sys
TESTING = False
if not TESTING:
assert len(sys.argv) > 1, "You did not provide a file to run."
excel_file_name = sys.argv[1]
assert excel_file_name[-4:]==('xlsx' or 'xls'), "You did not provide an Excel file."
else:
excel_file_name = 'download.xlsx'
# excel_file_name = 'download singular.xlsx'
# excel_file_name = 'download (2).xlsx'
bodystats_db = pd.ExcelFile(excel_file_name)
if len(bodystats_db.sheet_names) == 1:
# if singular bodystat export
name_list = bodystats_db.sheet_names
else:
# if team export, first 2 sheets contain attendance/ergo
name_list = bodystats_db.sheet_names[2:]
if TESTING: print(name_list)
if WEIGHT_LINES:
assert(AVG_WEIGHT == 57 or AVG_WEIGHT == 70), "The average athlete weight provided doesn't match either 57 or 70 kg."
if AVG_WEIGHT == 57:
MAX_WEIGHT = AVG_WEIGHT + 2.0
WINT_MAX = MAX_WEIGHT + 2.5
elif AVG_WEIGHT == 70:
MAX_WEIGHT = AVG_WEIGHT + 2.5
WINT_MAX = MAX_WEIGHT + 2.5
################
## PLOT STUFF ##
################
PLOTS = [PLOT_WEIGHT, PLOT_HEARTRATE, PLOT_RATING, PLOT_SLEEP]
assert sum(PLOTS) != 0, "You have not enabled any plots! See Optional Parameters."
for name in name_list:
""" For each in name in name_list, make a plot """
print("Creating plots for {}".format(name))
fig, axes = plt.subplots(sum(PLOTS),1,sharex=True,figsize=(12,12))
if sum(PLOTS) == 1:
axes = [axes]
input_file = bodystats_db.parse(name)
if N_days_used != 0:
""" Optional Param: N_days_used implementation. """
len(input_file)
if TESTING: print("using input_file[{}:]".format(N_days_used))
if N_days_used <= len(input_file):
# print((len(input_file)-N_days_used), len(input_file))
input_file = input_file[:N_days_used]
# print(input_file)
else:
print("Data for athlete {} has fewer than N_days_used ({}) data entries, using all available data ({}) instead.".format(name, N_days_used, len(input_file)))
plot_counter = 0
if PLOTS[0]:
#################
## WEIGHT PLOT ##
#################
# print input_file
input_file['Date'] = pd.to_datetime(input_file['Date'], dayfirst=True)
x, y = input_file['Date'], input_file['Weight [kg]']
if y.isnull().all() == False:
x = mdates.date2num(x)
idx = np.isfinite(x) & np.isfinite(y)
axes[plot_counter].plot(x[idx],y[idx],label="data")
xx = np.linspace(x.min(), x.max()+additional_days, 100)
dd = mdates.num2date(xx)
z1 = np.polyfit(x[idx], y[idx], 1)
p1 = np.poly1d(z1)
axes[plot_counter].plot(dd,p1(xx),"r--",label=r"Trend, 1st order [$\Delta$={}{:.2}/w]".format(("+" if z1[0]>0 else ""), z1[0]*7))
z2 = np.polyfit(x[idx], y[idx], 2)
p2 = np.poly1d(z2)
axes[plot_counter].plot(dd,p2(xx),"g--",lw=0.8,label="Trend, 2nd order".format("a"))
if WEIGHT_LINES:
""" Weight lines implementation. """
w = input_file['Weight [kg]']
if np.max(w) >= WINT_MAX-1.:
axes[plot_counter].axhline(WINT_MAX,color="black",lw=0.8, ls=':')
axes[plot_counter].axhline(MAX_WEIGHT,color="black",ls="--",lw=0.8)
if np.min(w) < MAX_WEIGHT-1.:
axes[plot_counter].axhline(57.0,color="black",ls="-.",lw=0.8)
axes[plot_counter].legend()
axes[plot_counter].set(ylabel="Weight [kg]",xlim=(x.min(), x.max()+additional_days))
else:
print("WARNING: No weight data found for athlete {}.\nThis plot will be empty.".format(name))
plot_counter+=1
if PLOTS[1]:
####################
## HEARTRATE PLOT ##
####################
x, y = input_file['Date'], input_file['Heartrate [bpm]']
if y.isnull().all() == False:
x = mdates.date2num(x)
idx = np.isfinite(x) & np.isfinite(y)
axes[plot_counter].plot(x[idx],y[idx],label="data")
xx = np.linspace(x.min(), x.max()+additional_days, 100)
dd = mdates.num2date(xx)
z1 = np.polyfit(x[idx], y[idx], 1)
p1 = np.poly1d(z1)
axes[plot_counter].plot(dd,p1(xx),"r--",label="trend, 1st order")
z2 = np.polyfit(x[idx], y[idx], 2)
p2 = np.poly1d(z2)
axes[plot_counter].plot(dd,p2(xx),"g--",lw=0.8,label="trend, 2nd order")
axes[plot_counter].axhline(np.mean(input_file['Heartrate [bpm]']),label="avg",color="black",lw=0.8,ls="-.")
axes[plot_counter].legend(ncol=4)
axes[plot_counter].set(ylabel="Heartrate [bpm]",xlim=(x.min(), x.max()+additional_days))
else:
print("WARNING: No heartrate data found for athlete {}\nThis plot will be empty.".format(name))
plot_counter+=1
if PLOTS[2]:
################
## FEELY PLOT ##
################
x, y = input_file['Date'], input_file['Rating [1:10]']
if y.isnull().all() == False:
x = mdates.date2num(x)
idx = np.isfinite(x) & np.isfinite(y)
axes[plot_counter].plot(x[idx],y[idx],label="data")
xx = np.linspace(x.min(), x.max()+additional_days, 100)
dd = mdates.num2date(xx)
z1 = np.polyfit(x[idx], y[idx], 1)
p1 = np.poly1d(z1)
axes[plot_counter].plot(dd,p1(xx),"r--",label="trend, 1st order")
z2 = np.polyfit(x[idx], y[idx], 2)
p2 = np.poly1d(z2)
axes[plot_counter].plot(dd,p2(xx),"g--",lw=0.8,label="trend, 2nd order")
axes[plot_counter].axhline(np.mean(input_file['Rating [1:10]']),label="avg", color="black",lw=0.8,ls="-.")
axes[plot_counter].legend(ncol=4)
axes[plot_counter].set(ylabel="Rating [1:10]",ylim=(-0.5,10.5),xlim=(x.min(), x.max()+additional_days))
else:
print("WARNING: No rating data found for athlete {}\nThis plot will be empty.".format(name))
plot_counter+=1
if PLOTS[3]:
################
## SLEEP PLOT ##
################
x, y = input_file['Date'], input_file['Sleep [h]']
if y.isnull().all() == False:
x = mdates.date2num(x)
idx = np.isfinite(x) & np.isfinite(y)
axes[plot_counter].plot(x[idx],y[idx],label="data")
xx = np.linspace(x.min(), x.max()+additional_days, 100)
dd = mdates.num2date(xx)
z1 = np.polyfit(x[idx], y[idx], 1)
p1 = np.poly1d(z1)
axes[plot_counter].plot(dd,p1(xx),"r--",label="trend, 1st order")
z2 = np.polyfit(x[idx], y[idx], 2)
p2 = np.poly1d(z2)
axes[plot_counter].plot(dd,p2(xx),"g--",lw=0.8,label="trend, 2nd order")
axes[plot_counter].axhline(np.mean(input_file['Sleep [h]']),label="avg",color="black",lw=0.8,ls="-.")
axes[plot_counter].legend(ncol=4)
axes[plot_counter].set(ylabel="Sleep [h]",xlim=(x.min(), x.max()+additional_days))
else:
print("WARNING: No sleep data found for athlete {}\nThis plot will be empty.".format(name))
plot_counter+=1
#######################
## PLOTTING BUSINESS ##
#######################
axes[-1].xaxis.set_major_formatter(mdates.DateFormatter("%d-%m-%Y"))
axes[-1].set_xlabel("Date")
axes[0].set_title("{}: Trends".format(name))
fig.savefig("Tracklete_Trends_{}.png".format(name))
if TESTING: plt.show()
input("Finished! Press enter to close.")
>>>>>>> d49bce8037b6f884cd3b232cd51c63801dbf57c4:Tracklete_Analysis.py
| 38
| 168
| 0.568713
| 2,987
| 21,888
| 4.06997
| 0.117509
| 0.056099
| 0.064161
| 0.037509
| 0.987744
| 0.987744
| 0.987744
| 0.987744
| 0.987744
| 0.987744
| 0
| 0.025465
| 0.260828
| 21,888
| 575
| 169
| 38.066087
| 0.725941
| 0.045687
| 0
| 0.97931
| 0
| 0
| 0.149473
| 0.002992
| 0
| 0
| 0
| 0
| 0.027586
| 0
| null | null | 0
| 0.034483
| null | null | 0.058621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
26c826ee61f3bb213c6fe05fa5daef80974b2455
| 84
|
py
|
Python
|
lang/Python/file-size.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | 5
|
2021-01-29T20:08:05.000Z
|
2022-03-22T06:16:05.000Z
|
lang/Python/file-size.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/file-size.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | 1
|
2021-04-13T04:19:31.000Z
|
2021-04-13T04:19:31.000Z
|
import os
size = os.path.getsize('input.txt')
size = os.path.getsize('/input.txt')
| 16.8
| 36
| 0.690476
| 14
| 84
| 4.142857
| 0.5
| 0.206897
| 0.344828
| 0.586207
| 0.862069
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 84
| 4
| 37
| 21
| 0.773333
| 0
| 0
| 0
| 0
| 0
| 0.22619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
26c9b3d132890b23fadacc7a62aa67fc4fc5d57c
| 8,383
|
py
|
Python
|
app.py
|
MedCabinet/medicalcabinet_api
|
a3d72e426bfe9eeda166eada964ff84c6bf86484
|
[
"MIT"
] | 1
|
2020-02-01T17:06:01.000Z
|
2020-02-01T17:06:01.000Z
|
app.py
|
skredenmathias/medicalcabinet_api
|
a3d72e426bfe9eeda166eada964ff84c6bf86484
|
[
"MIT"
] | 4
|
2021-06-02T01:15:11.000Z
|
2022-01-13T02:07:15.000Z
|
app.py
|
skredenmathias/medicalcabinet_api
|
a3d72e426bfe9eeda166eada964ff84c6bf86484
|
[
"MIT"
] | 4
|
2020-01-07T16:51:21.000Z
|
2020-02-01T13:18:59.000Z
|
"""Code for our api app"""
from flask import Flask, jsonify, request
import basilica
import numpy as np
import pandas as pd
from scipy import spatial
app = Flask(__name__)
@app.route('/')
def root():
return "We have the best API!"
@app.route('/strains', methods=['Post'])
def strains():
""" a route, expects json object with 1 key """
# receive input
lines = request.get_json(force=True)
# get data from json
text = lines['input'] # json keys to be determined
# validate input (optional)
assert isinstance(text, str)
# predict
output = predict(text)
# give output to sender.
return output
@app.route('/symptom', methods=['Post'])
def symptom():
""" a route, expects json object with 1 key """
# receive input
lines = request.get_json(force=True)
# get data from json
text = lines['input'] # json keys to be determined
# validate input (optional)
assert isinstance(text, str)
# predict
output = predict_symptoms(text)
# give output to sender.
return output
@app.route('/general', methods=['Post'])
def general():
""" a route, expects json object with 1 key """
# receive input
lines = request.get_json(force=True)
# get data from json
text = lines['input'] # json keys to be determined
# validate input (optional)
assert isinstance(text, str)
# predict
output = predict_all(text)
# give output to sender.
return output
# 4 spaced symptoms json version
# user input
user_input_symp = "multiple sclerosis, epilepsy, pain, "
def predict_symptoms(user_input_symp):
#unpickling file of embedded cultivar symptoms diseases
unpickled_df_test = pd.read_pickle("./symptommedembedv8.pkl")
# getting data
df = pd.read_csv('symptoms8_medcab3.csv')
# Part 1
# a function to calculate_user_text_embedding
# to save the embedding value in session memory
user_input_embedding = 0
def calculate_user_text_embedding(input, user_input_embedding):
# setting a string of two sentences for the algo to compare
sentences = [input]
# calculating embedding for both user_entered_text and for features
with basilica.Connection('36a370e3-becb-99f5-93a0-a92344e78eab') as c:
user_input_embedding = list(c.embed_sentences(sentences))
return user_input_embedding
# run the function to save the embedding value in session memory
user_input_embedding = calculate_user_text_embedding(user_input, user_input_embedding)
# part 2
score = 0
def score_user_input_from_stored_embedding_from_stored_values(input, score, row1, user_input_embedding):
# obtains pre-calculated values from a pickled dataframe of arrays
embedding_stored = unpickled_df_test.loc[row1, 0]
# calculates the similarity of user_text vs. product description
score = 1 - spatial.distance.cosine(embedding_stored, user_input_embedding)
# returns a variable that can be used outside of the function
return score
# Part 3
for i in range(2351):
# calls the function to set the value of 'score'
# which is the score of the user input
score = score_user_input_from_stored_embedding_from_stored_values(user_input_symp, score, i, user_input_embedding)
#stores the score in the dataframe
df.loc[i,'score'] = score
# Part 4: returns all data for the top 5 results as a json obj
df_big_json = df.sort_values(by='score', ascending=False)
df_big_json = df_big_json.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1)
df_big_json = df_big_json[:5]
df_big_json = df_big_json.to_json(orient='columns')
# Part 5: output
return df_big_json
# 4 spaced effect json version
# user input
user_input = "text, Relaxed, Violet, Aroused, Creative, Happy, Energetic, Flowery, Diesel"
def predict(user_input):
# getting data
df = pd.read_csv('symptoms8_medcab3.csv')
#effcts unpickling file of embedded cultivar descriptions
unpickled_df_test = pd.read_pickle("./medembedv2.pkl")
# Part 1
# a function to calculate_user_text_embedding
# to save the embedding value in session memory
user_input_embedding = 0
def calculate_user_text_embedding(input, user_input_embedding):
# setting a string of two sentences for the algo to compare
sentences = [input]
# calculating embedding for both user_entered_text and for features
with basilica.Connection('36a370e3-becb-99f5-93a0-a92344e78eab') as c:
user_input_embedding = list(c.embed_sentences(sentences))
return user_input_embedding
# run the function to save the embedding value in session memory
user_input_embedding = calculate_user_text_embedding(user_input, user_input_embedding)
# part 2
score = 0
def score_user_input_from_stored_embedding_from_stored_values(input, score, row1, user_input_embedding):
# obtains pre-calculated values from a pickled dataframe of arrays
embedding_stored = unpickled_df_test.loc[row1, 0]
# calculates the similarity of user_text vs. product description
score = 1 - spatial.distance.cosine(embedding_stored, user_input_embedding)
# returns a variable that can be used outside of the function
return score
# Part 3
for i in range(2351):
# calls the function to set the value of 'score'
# which is the score of the user input
score = score_user_input_from_stored_embedding_from_stored_values(user_input, score, i, user_input_embedding)
#stores the score in the dataframe
df.loc[i,'score'] = score
# Part 4: returns all data for the top 5 results as a json obj
df_big_json = df.sort_values(by='score', ascending=False)
df_big_json = df_big_json.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1)
df_big_json = df_big_json[:5]
df_big_json = df_big_json.to_json(orient='columns')
# Part 5: output
return df_big_json
# user input
user_input = "multiple sclerosis, epilepsy, pain, "
def predict_all(user_input_all):
#unpickling file of embedded cultivar symptoms diseases
unpickled_df_test = pd.read_pickle("./all_text_medembedv8.pkl")
# getting data
df = pd.read_csv('symptoms8_medcab3.csv')
# Part 1
# a function to calculate_user_text_embedding
# to save the embedding value in session memory
user_input_embedding = 0
def calculate_user_text_embedding(input, user_input_embedding):
# setting a string of two sentences for the algo to compare
sentences = [input]
# calculating embedding for both user_entered_text and for features
with basilica.Connection('36a370e3-becb-99f5-93a0-a92344e78eab') as c:
user_input_embedding = list(c.embed_sentences(sentences))
return user_input_embedding
# run the function to save the embedding value in session memory
user_input_embedding = calculate_user_text_embedding(user_input_all, user_input_embedding)
# part 2
score = 0
def score_user_input_from_stored_embedding_from_stored_values(input, score, row1, user_input_embedding):
# obtains pre-calculated values from a pickled dataframe of arrays
embedding_stored = unpickled_df_test.loc[row1, 0]
# calculates the similarity of user_text vs. product description
score = 1 - spatial.distance.cosine(embedding_stored, user_input_embedding)
# returns a variable that can be used outside of the function
return score
# Part 3
for i in range(2351):
# calls the function to set the value of 'score'
# which is the score of the user input
score = score_user_input_from_stored_embedding_from_stored_values(user_input, score, i, user_input_embedding)
#stores the score in the dataframe
df.loc[i,'score'] = score
# Part 4: returns all data for the top 5 results as a json obj
df_big_json = df.sort_values(by='score', ascending=False)
df_big_json = df_big_json.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1)
df_big_json = df_big_json[:5]
df_big_json = df_big_json.to_json(orient='columns')
# Part 5: output
return df_big_json
| 29.517606
| 122
| 0.695932
| 1,174
| 8,383
| 4.754685
| 0.153322
| 0.082229
| 0.087066
| 0.023647
| 0.909531
| 0.903798
| 0.874597
| 0.868506
| 0.868506
| 0.846113
| 0
| 0.020756
| 0.22987
| 8,383
| 284
| 123
| 29.517606
| 0.843866
| 0.337349
| 0
| 0.715686
| 0
| 0
| 0.104839
| 0.040139
| 0
| 0
| 0
| 0
| 0.029412
| 1
| 0.127451
| false
| 0
| 0.04902
| 0.009804
| 0.303922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
26caa8066973f299cf03ef4be179bcc48f93f33b
| 13,417
|
py
|
Python
|
complaintdatabase/tests.py
|
cfpb/complaint
|
f643ba3b23d497d7c26b9e6f5af0353db65f914b
|
[
"CC0-1.0"
] | 6
|
2017-02-28T20:02:18.000Z
|
2019-04-21T12:07:14.000Z
|
complaintdatabase/tests.py
|
DalavanCloud/complaint
|
b8344f73351af49c38eebfeb7e15a5c0ed9c4635
|
[
"CC0-1.0"
] | 18
|
2016-07-01T16:16:00.000Z
|
2018-07-02T22:10:29.000Z
|
complaintdatabase/tests.py
|
DalavanCloud/complaint
|
b8344f73351af49c38eebfeb7e15a5c0ed9c4635
|
[
"CC0-1.0"
] | 14
|
2016-08-26T00:26:41.000Z
|
2021-02-20T10:37:03.000Z
|
import collections
from datetime import datetime
from StringIO import StringIO
from unittest import skipIf
from mock import patch, Mock, MagicMock, mock_open
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import RequestFactory, TestCase
from django.test import Client
from requests.exceptions import ConnectionError
from .views import (LandingView, DocsView, get_narratives_json, get_stats,
is_data_not_updated)
MOCK_404 = ConnectionError(Mock(return_value={'status': 404}), 'not found')
client = Client()
class LandingViewTest(TestCase):
def setUp(self):
"""Every test needs access to the request factory."""
self.factory = RequestFactory()
def test_get_context_data_exist(self):
"""Create an instance of a GET request."""
request = self.factory.get('/')
response = LandingView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertTrue('base_template' in response.context_data.keys())
self.assertTrue('stats' in response.context_data.keys())
@skipIf(True, "not running with feature flags")
def test_demo_json(self):
"""Test demo version of landing page"""
response = client.get(reverse("ccdb-demo",
kwargs={'demo_json': 'demo.json'}))
self.assertEqual(response.status_code, 200)
self.assertTrue('base_template' in response.context_data.keys())
self.assertTrue('stats' in response.context_data.keys())
class NarrativeJsonTest(TestCase):
@patch('complaintdatabase.views.requests.get')
def test_get_narratives_json(self, mock_get):
mock_return = MagicMock()
mock_return.json.return_value = {}
mock_get.return_value = mock_return
res_json = get_narratives_json()
self.assertEqual(res_json, {})
self.assertTrue(mock_get.call_count == 1)
@patch('complaintdatabase.views.requests.get')
def test_get_demo_narratives_json(self, mock_get):
mock_return = MagicMock()
mock_return.json.return_value = {}
mock_get.return_value = mock_return
m = mock_open(read_data='{"mock_data": ""}')
with patch("__builtin__.open", m, create=True):
res_json = get_narratives_json(demo_json='/fake/path')
self.assertEqual(res_json, {"mock_data": ""})
@patch('complaintdatabase.views.requests.get')
def test_request_exception_get_narratives_json(self, mock_requests_get):
mock_requests_get.side_effect = MOCK_404
with patch('sys.stdout', new=StringIO()) as fakeOutput:
res_json = get_narratives_json()
self.assertEqual(res_json, {})
self.assertIn('requests.exceptions.RequestException',
fakeOutput.getvalue().strip())
@patch('complaintdatabase.views.requests.get')
def test_incorrect_text_get_narratives_json(self, mock_get):
mock_return = MagicMock()
mock_return.json.return_value = {}
mock_get.return_value = mock_return
with patch('sys.stdout', new=StringIO('ValueError')) as fakeOutput:
res_json = get_narratives_json()
self.assertEqual(res_json, {})
self.assertIn('ValueError', fakeOutput.getvalue())
self.assertTrue(mock_get.call_count == 1)
class GetStatsTest(TestCase):
def test_get_stats(self):
input_json = {'stats': {'test': 1}}
res = get_stats(input_json)
self.assertEqual({'test': 1}, res)
def test_no_key_get_stats(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
res = get_stats({})
self.assertEqual({}, res)
self.assertIn('KeyError', fakeOutput.getvalue().strip())
class DataUpdatedTest(TestCase):
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_monday_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 21, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-14",
'last_updated_narratives': "2015-12-14"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertTrue(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_monday_up(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 21, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-15",
'last_updated_narratives': "2015-12-15"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_monday_narratives_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 21, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-15",
'last_updated_narratives': "2015-12-14"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertTrue(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_tuesday_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 22, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-15",
'last_updated_narratives': "2015-12-15"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertTrue(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_tuesday_up(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 22, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-16",
'last_updated_narratives': "2015-12-16"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_tuesday_narratives_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 22, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-16",
'last_updated_narratives': "2015-12-15"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertTrue(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_wednesday_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 23, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-16",
'last_updated_narratives': "2015-12-16"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertTrue(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_wednesday_up(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 23, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-17",
'last_updated_narratives': "2015-12-17"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_wednesday_narratives_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 23, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-17",
'last_updated_narratives': "2015-12-16"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertTrue(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_thursday_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 24, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-17",
'last_updated_narratives': "2015-12-17"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertTrue(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_thursday_up(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 24, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-18",
'last_updated_narratives': "2015-12-18"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_thursday_narratives_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 24, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-18",
'last_updated_narratives': "2015-12-17"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertTrue(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_friday_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 25, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-18",
'last_updated_narratives': "2015-12-18"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertTrue(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_friday_up(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 25, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-21",
'last_updated_narratives': "2015-12-21"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_friday_narratives_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 25, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-21",
'last_updated_narratives': "2015-12-18"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertTrue(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_saturday_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 27, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-18",
'last_updated_narratives': "2015-12-18"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertTrue(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_saturday_up(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 27, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-21",
'last_updated_narratives': "2015-12-21"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertFalse(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_data_not_updated_saturday_narratives_down(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 27, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated': "2015-12-21",
'last_updated_narratives': "2015-12-18"}}
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertTrue(narratives_down)
@patch('complaintdatabase.views.get_now')
def test_incorrect_json_data_not_updated_saturday(self, mock_get_now):
mock_get_now.return_value = datetime(2015, 12, 27, 19, 20, 10, 975427)
input_json = {'stats': {'last_updated_narratives': "2015-12-21"}}
with patch('sys.stdout', new=StringIO()) as fakeOutput:
data_down, narratives_down = is_data_not_updated(input_json)
self.assertFalse(data_down)
self.assertFalse(narratives_down)
self.assertIn('KeyError', fakeOutput.getvalue().strip())
class DocsViewTest(TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
def test_get_context_data_exist(self):
# Create an instance of a GET request.
request = self.factory.get('/technical-documentation')
response = DocsView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertTrue('base_template' in response.context_data.keys())
| 46.586806
| 78
| 0.667959
| 1,685
| 13,417
| 4.986944
| 0.084866
| 0.0407
| 0.064977
| 0.038082
| 0.857551
| 0.85172
| 0.846126
| 0.817089
| 0.801142
| 0.792455
| 0
| 0.066609
| 0.218976
| 13,417
| 287
| 79
| 46.749129
| 0.73528
| 0.015205
| 0
| 0.714286
| 0
| 0
| 0.164747
| 0.09321
| 0
| 0
| 0
| 0
| 0.243697
| 1
| 0.12605
| false
| 0
| 0.046218
| 0
| 0.193277
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
26f8d590cfc72ab299e7a3d646cbd7877a96a730
| 18,449
|
py
|
Python
|
cloudmersive_validate_api_client/api/domain_api.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | null | null | null |
cloudmersive_validate_api_client/api/domain_api.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | null | null | null |
cloudmersive_validate_api_client/api/domain_api.py
|
doc22940/cloudmersive.apiclient.python
|
8646291f45ebd7c6572a296e30f693693a6782c4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
validateapi
The validation APIs help you validate data. Check if an E-mail address is real. Check if a domain is real. Check up on an IP address, and even where it is located. All this and much more is available in the validation API. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cloudmersive_validate_api_client.api_client import ApiClient
class DomainApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def domain_check(self, domain, **kwargs): # noqa: E501
"""Validate a domain name # noqa: E501
Check whether a domain name is valid or not. API performs a live validation by contacting DNS services to validate the existence of the domain name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.domain_check(domain, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str domain: Domain name to check, for example \"cloudmersive.com\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: CheckResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.domain_check_with_http_info(domain, **kwargs) # noqa: E501
else:
(data) = self.domain_check_with_http_info(domain, **kwargs) # noqa: E501
return data
def domain_check_with_http_info(self, domain, **kwargs): # noqa: E501
"""Validate a domain name # noqa: E501
Check whether a domain name is valid or not. API performs a live validation by contacting DNS services to validate the existence of the domain name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.domain_check_with_http_info(domain, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str domain: Domain name to check, for example \"cloudmersive.com\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: CheckResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['domain'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method domain_check" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'domain' is set
if ('domain' not in params or
params['domain'] is None):
raise ValueError("Missing the required parameter `domain` when calling `domain_check`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'domain' in params:
body_params = params['domain']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/domain/check', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CheckResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def domain_post(self, domain, **kwargs): # noqa: E501
"""Get WHOIS information for a domain # noqa: E501
Validate whether a domain name exists, and also return the full WHOIS record for that domain name. WHOIS records include all the registration details of the domain name, such as information about the domain's owners. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.domain_post(domain, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str domain: Domain name to check, for example \"cloudmersive.com\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: WhoisResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.domain_post_with_http_info(domain, **kwargs) # noqa: E501
else:
(data) = self.domain_post_with_http_info(domain, **kwargs) # noqa: E501
return data
def domain_post_with_http_info(self, domain, **kwargs): # noqa: E501
"""Get WHOIS information for a domain # noqa: E501
Validate whether a domain name exists, and also return the full WHOIS record for that domain name. WHOIS records include all the registration details of the domain name, such as information about the domain's owners. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.domain_post_with_http_info(domain, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str domain: Domain name to check, for example \"cloudmersive.com\". The input is a string so be sure to enclose it in double-quotes. (required)
:return: WhoisResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['domain'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method domain_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'domain' is set
if ('domain' not in params or
params['domain'] is None):
raise ValueError("Missing the required parameter `domain` when calling `domain_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'domain' in params:
body_params = params['domain']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/domain/whois', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WhoisResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def domain_url_full(self, request, **kwargs): # noqa: E501
"""Validate a URL fully # noqa: E501
Validate whether a URL is syntactically valid (does not check endpoint for validity), whether it exists, and whether the endpoint is up and passes virus scan checks. Accepts various types of input and produces a well-formed URL as output. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.domain_url_full(request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateUrlRequestFull request: (required)
:return: ValidateUrlResponseFull
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.domain_url_full_with_http_info(request, **kwargs) # noqa: E501
else:
(data) = self.domain_url_full_with_http_info(request, **kwargs) # noqa: E501
return data
def domain_url_full_with_http_info(self, request, **kwargs): # noqa: E501
"""Validate a URL fully # noqa: E501
Validate whether a URL is syntactically valid (does not check endpoint for validity), whether it exists, and whether the endpoint is up and passes virus scan checks. Accepts various types of input and produces a well-formed URL as output. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.domain_url_full_with_http_info(request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateUrlRequestFull request: (required)
:return: ValidateUrlResponseFull
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method domain_url_full" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'request' is set
if ('request' not in params or
params['request'] is None):
raise ValueError("Missing the required parameter `request` when calling `domain_url_full`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in params:
body_params = params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/domain/url/full', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ValidateUrlResponseFull', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def domain_url_syntax_only(self, request, **kwargs): # noqa: E501
"""Validate a URL syntactically # noqa: E501
Validate whether a URL is syntactically valid (does not check endpoint for validity). Accepts various types of input and produces a well-formed URL as output. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.domain_url_syntax_only(request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateUrlRequestSyntaxOnly request: (required)
:return: ValidateUrlResponseSyntaxOnly
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.domain_url_syntax_only_with_http_info(request, **kwargs) # noqa: E501
else:
(data) = self.domain_url_syntax_only_with_http_info(request, **kwargs) # noqa: E501
return data
def domain_url_syntax_only_with_http_info(self, request, **kwargs): # noqa: E501
"""Validate a URL syntactically # noqa: E501
Validate whether a URL is syntactically valid (does not check endpoint for validity). Accepts various types of input and produces a well-formed URL as output. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.domain_url_syntax_only_with_http_info(request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateUrlRequestSyntaxOnly request: (required)
:return: ValidateUrlResponseSyntaxOnly
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method domain_url_syntax_only" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'request' is set
if ('request' not in params or
params['request'] is None):
raise ValueError("Missing the required parameter `request` when calling `domain_url_syntax_only`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in params:
body_params = params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/domain/url/syntax-only', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ValidateUrlResponseSyntaxOnly', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.805104
| 261
| 0.628869
| 2,202
| 18,449
| 5.071753
| 0.101726
| 0.043696
| 0.020057
| 0.025788
| 0.933381
| 0.93168
| 0.931322
| 0.923621
| 0.921472
| 0.921472
| 0
| 0.014359
| 0.28278
| 18,449
| 430
| 262
| 42.904651
| 0.829655
| 0.386688
| 0
| 0.8
| 0
| 0
| 0.206279
| 0.056954
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.017778
| 0
| 0.115556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
26fb98b48dfb1f097b48d4f89b5c33143c4900ad
| 3,650
|
py
|
Python
|
envio1/discreta.py
|
leonheld/INE5118
|
356ac119275237f6291efd2e8d5df0cc9cb69c52
|
[
"WTFPL"
] | null | null | null |
envio1/discreta.py
|
leonheld/INE5118
|
356ac119275237f6291efd2e8d5df0cc9cb69c52
|
[
"WTFPL"
] | null | null | null |
envio1/discreta.py
|
leonheld/INE5118
|
356ac119275237f6291efd2e8d5df0cc9cb69c52
|
[
"WTFPL"
] | null | null | null |
#Distribuição discreta da geração de um dado Pokemon. Range: 1, 2 ... n
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import collections
from collections import Counter
#configurações pra usar LaTeX nas legendas
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
sns.set(style = "darkgrid", context = "paper")
#novamente não usei pandas aqui, só um regex e macros de vim
var = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6]
var_counted = Counter(var) #cria um dict da forma {var: nro ocorrencias}
var_sorted_dict = dict(sorted(var_counted.items())) #ordena o dict
sum_of_values = sum(var_sorted_dict.values())
for key in var_sorted_dict:
var_sorted_dict[key] /= sum_of_values #para cada chave no dict, divide pelo total, obtendo frequência relativa
dist = 1/(np.amax(var) - np.amin(var)); #1/(b - a)
x = var_sorted_dict.keys() #usa as próprias keys como x, melhor do que montar um vetor x = [1, 2, 3, ...]
sns.distplot(var, kde=True, norm_hist=1, color = "#3dc1ff")
# plt.subplot(211)
# plt.bar(x, dist, color = "#9b59b6")
# plt.ylabel(r'Frequência relativa')
# plt.subplot(212)
# plt.bar(list(var_sorted_dict.keys()), var_sorted_dict.values(), color = "#e74c3c")
# plt.xlabel(r'Geração do Pokemon')
# plt.ylabel(r'Frequência relativa')
plt.show()
| 101.388889
| 2,406
| 0.467397
| 1,006
| 3,650
| 1.67495
| 0.114314
| 0.195846
| 0.291988
| 0.386944
| 0.511573
| 0.511573
| 0.474777
| 0.474777
| 0.474777
| 0.474777
| 0
| 0.307549
| 0.266849
| 3,650
| 36
| 2,407
| 101.388889
| 0.322123
| 0.175068
| 0
| 0
| 0
| 0
| 0.011018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.277778
| 0
| 0.277778
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f80a727fff1370f402314f76478efe7dcb215acc
| 98
|
py
|
Python
|
abeja/train/__init__.py
|
abeja-inc/abeja-platform-sdk
|
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
|
[
"Apache-2.0"
] | 2
|
2020-10-20T18:38:16.000Z
|
2020-10-20T20:12:35.000Z
|
abeja/train/__init__.py
|
abeja-inc/abeja-platform-sdk
|
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
|
[
"Apache-2.0"
] | 30
|
2020-04-07T01:15:47.000Z
|
2020-11-18T03:25:19.000Z
|
abeja/train/__init__.py
|
abeja-inc/abeja-platform-sdk
|
97cfc99b11ffc1fccb3f527435277bc89e18b8c3
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from abeja.train.client import Client
from abeja.train.api.client import APIClient
| 24.5
| 44
| 0.816327
| 15
| 98
| 5.333333
| 0.6
| 0.225
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0.112245
| 98
| 3
| 45
| 32.666667
| 0.908046
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f8396bfde6041f9cc017ab4da665e40f244e437c
| 60,259
|
py
|
Python
|
tests/test_responses.py
|
livebungalow/certn-python-public
|
aa411626a2918e37c3bbe26023b1b97014860414
|
[
"MIT"
] | null | null | null |
tests/test_responses.py
|
livebungalow/certn-python-public
|
aa411626a2918e37c3bbe26023b1b97014860414
|
[
"MIT"
] | null | null | null |
tests/test_responses.py
|
livebungalow/certn-python-public
|
aa411626a2918e37c3bbe26023b1b97014860414
|
[
"MIT"
] | 1
|
2019-07-04T00:19:15.000Z
|
2019-07-04T00:19:15.000Z
|
APPLICATION_GOOD_BODY = {
'information': {
'first_name': 'Andrew James',
'last_name': 'McLeod',
'date_of_birth': '1987-03-04',
'addresses': [
{
'address': '3023 BODEGA ROAD',
'city': 'VICTORIA',
'province_state': 'BC',
'country': 'CA',
}
],
}
}
AUTH_RESPONSE = {
'token': 'e5e4c777acb3c2a4e4234a282a8ac507c0be24708e6dfe121de563dda397784b',
'user_id': 'b8959d81-89aa-4e3e-9a5a-66f46bad591b',
}
AUTH_RESPONSE_FAIL = {'detail': 'Invalid username/password.'}
AUTH_RESPONSE_LIST_LOGINS = [
{'created': '2019-03-01T18:56:34.702679Z', 'expires': None, 'token_key': 'e21e9b59'}
]
LISTING_RESPONSE = {
'id': '86dd2316-f31d-4f5e-86a1-3a5e91a75a8c',
'created': '2019-03-01T14:34:32.753995Z',
'modified': '2019-03-01T14:34:32.949006Z',
'last_updated': '2019-03-01T14:34:32.754044Z',
'name': None,
'unit': None,
'move_in_date': None,
'move_in_immediately': False,
'rent': '1000.00',
'rent_range': None,
'security_deposit_amount': None,
'pet_deposit': False,
'pet_deposit_amount': None,
'storage_locker': False,
'property_manager_terms': None,
'is_active': True,
'is_public': False,
'url_code': '86dd2316-f31d-4f5e-86a1-3a5e91a75a8c',
'is_placeholder': False,
'owner': {
'id': 'b8959d81-89aa-4e3e-9a5a-66f46bad591b',
'email': '***********************',
'team': {
'id': 'c7105cf6-0ae6-4d0b-9373-45f3c25b83f5',
'name': 'Bungalow',
'country': 'CA',
'industry': '',
'team_type': 'PM',
'app_url': 'https://demo-app.certn.co/',
'settings_config': {
'get_org_name': 'Bungalow',
'org_name': 'Bungalow',
'org_logo_link': None,
'org_primary_color': None,
'behavioural_test_req': False,
'emergency_contact_req': False,
'personal_ref_req': False,
'education_req': False,
'tenancy_years_amount_req': 2,
'tenancy_ref_amount_req': 1,
'tenancy_ref_email_req': False,
'tenancy_ref_phone_req': True,
'employer_records_amount_req': 1,
'employer_years_amount_req': 0,
'employer_ref_req': True,
'employer_ref_email_req': False,
'employer_ref_phone_req': False,
'document_required': False,
'cover_letter_req': False,
'government_id_req': True,
'proof_of_income_req': True,
'resume_req': False,
'personal_ref_amount_req': 1,
'request_base': True,
'request_behavioural': True,
'request_softcheck': True,
'request_equifax': False,
'request_identity_verification': False,
'request_enhanced_identity_verification': False,
'request_criminal_record_check': False,
'request_enhanced_criminal_record_check': False,
'request_motor_vehicle_records': False,
'request_education_verification': False,
'request_employment_verification_3_yrs': False,
'request_employment_verification_5_yrs': False,
'request_employment_verification_7_yrs': False,
'request_us_criminal_softcheck': False,
'request_us_ssn_verification': False,
'request_employer_references': True,
'request_address_references': True,
'exclude_softcheck_possible_matches': False,
},
'billing_plan': {
'pm_softcheck_price': '9.99',
'hr_softcheck_price': '9.99',
'pm_equifax_price': '14.99',
'hr_equifax_price': '14.99',
'pm_identity_verification_price': '1.99',
'hr_identity_verification_price': '1.99',
'pm_enhanced_identity_verification_price': '4.99',
'hr_enhanced_identity_verification_price': '4.99',
'pm_criminal_record_check_price': '29.99',
'hr_criminal_record_check_price': '29.99',
'pm_motor_vehicle_records_price': '24.99',
'hr_motor_vehicle_records_price': '24.99',
'pm_us_criminal_softcheck_price': '14.99',
'hr_us_criminal_softcheck_price': '14.99',
'pm_us_ssn_verification_price': '4.99',
'hr_us_ssn_verification_price': '4.99',
'pm_education_verification_price': '14.99',
'hr_education_verification_price': '14.99',
'pm_credential_verification_price': '14.99',
'hr_credential_verification_price': '14.99',
'pm_employment_verification_3_yrs_price': '14.99',
'hr_employment_verification_3_yrs_price': '14.99',
'pm_employment_verification_5_yrs_price': '19.99',
'hr_employment_verification_5_yrs_price': '19.99',
'pm_employment_verification_7_yrs_price': '22.99',
'hr_employment_verification_7_yrs_price': '22.99',
'pm_us_criminal_record_check_tier_1_price': '15.00',
'hr_us_criminal_record_check_tier_1_price': '15.00',
'pm_us_criminal_record_check_tier_2_price': '30.00',
'hr_us_criminal_record_check_tier_2_price': '30.00',
'pm_us_criminal_record_check_tier_3_price': '40.00',
'hr_us_criminal_record_check_tier_3_price': '40.00',
'pm_employer_references_price': '4.99',
'pm_address_references_price': '4.99',
'pm_education_references_price': '4.99',
'pm_credential_references_price': '4.99',
'hr_employer_references_price': '4.99',
'hr_address_references_price': '4.99',
'hr_education_references_price': '4.99',
'hr_credential_references_price': '4.99',
},
},
},
'property': {
'id': 'ef834890-ce60-4b95-bee9-69a12c59d4f8',
'status': 'N',
'get_status_display': 'No Vacancy',
'created': '2019-03-01T14:34:32.601691Z',
'modified': '2019-03-01T14:34:32.601720Z',
'last_updated': '2019-03-01T14:34:32.601755Z',
'building': None,
'building_code': None,
'address': '123 Fakestreet',
'city': 'Victoria',
'province_state': 'BC',
'country': 'N',
'postal_code': None,
'is_active': True,
'owner': {
'id': 'b8959d81-89aa-4e3e-9a5a-66f46bad591b',
'email': '************************',
'team': {
'id': 'c7105cf6-0ae6-4d0b-9373-45f3c25b83f5',
'name': 'Bungalow',
'country': 'CA',
'industry': '',
'team_type': 'PM',
'app_url': 'https://demo-app.certn.co/',
'settings_config': {
'get_org_name': 'Bungalow',
'org_name': 'Bungalow',
'org_logo_link': None,
'org_primary_color': None,
'behavioural_test_req': False,
'emergency_contact_req': False,
'personal_ref_req': False,
'education_req': False,
'tenancy_years_amount_req': 2,
'tenancy_ref_amount_req': 1,
'tenancy_ref_email_req': False,
'tenancy_ref_phone_req': True,
'employer_records_amount_req': 1,
'employer_years_amount_req': 0,
'employer_ref_req': True,
'employer_ref_email_req': False,
'employer_ref_phone_req': False,
'document_required': False,
'cover_letter_req': False,
'government_id_req': True,
'proof_of_income_req': True,
'resume_req': False,
'personal_ref_amount_req': 1,
'request_base': True,
'request_behavioural': True,
'request_softcheck': True,
'request_equifax': False,
'request_identity_verification': False,
'request_enhanced_identity_verification': False,
'request_criminal_record_check': False,
'request_enhanced_criminal_record_check': False,
'request_motor_vehicle_records': False,
'request_education_verification': False,
'request_employment_verification_3_yrs': False,
'request_employment_verification_5_yrs': False,
'request_employment_verification_7_yrs': False,
'request_us_criminal_softcheck': False,
'request_us_ssn_verification': False,
'request_employer_references': True,
'request_address_references': True,
'exclude_softcheck_possible_matches': False,
},
'billing_plan': {
'pm_softcheck_price': '9.99',
'hr_softcheck_price': '9.99',
'pm_equifax_price': '14.99',
'hr_equifax_price': '14.99',
'pm_identity_verification_price': '1.99',
'hr_identity_verification_price': '1.99',
'pm_enhanced_identity_verification_price': '4.99',
'hr_enhanced_identity_verification_price': '4.99',
'pm_criminal_record_check_price': '29.99',
'hr_criminal_record_check_price': '29.99',
'pm_motor_vehicle_records_price': '24.99',
'hr_motor_vehicle_records_price': '24.99',
'pm_us_criminal_softcheck_price': '14.99',
'hr_us_criminal_softcheck_price': '14.99',
'pm_us_ssn_verification_price': '4.99',
'hr_us_ssn_verification_price': '4.99',
'pm_education_verification_price': '14.99',
'hr_education_verification_price': '14.99',
'pm_credential_verification_price': '14.99',
'hr_credential_verification_price': '14.99',
'pm_employment_verification_3_yrs_price': '14.99',
'hr_employment_verification_3_yrs_price': '14.99',
'pm_employment_verification_5_yrs_price': '19.99',
'hr_employment_verification_5_yrs_price': '19.99',
'pm_employment_verification_7_yrs_price': '22.99',
'hr_employment_verification_7_yrs_price': '22.99',
'pm_us_criminal_record_check_tier_1_price': '15.00',
'hr_us_criminal_record_check_tier_1_price': '15.00',
'pm_us_criminal_record_check_tier_2_price': '30.00',
'hr_us_criminal_record_check_tier_2_price': '30.00',
'pm_us_criminal_record_check_tier_3_price': '40.00',
'hr_us_criminal_record_check_tier_3_price': '40.00',
'pm_employer_references_price': '4.99',
'pm_address_references_price': '4.99',
'pm_education_references_price': '4.99',
'pm_credential_references_price': '4.99',
'hr_employer_references_price': '4.99',
'hr_address_references_price': '4.99',
'hr_education_references_price': '4.99',
'hr_credential_references_price': '4.99',
},
},
},
'listing_count': 1,
'full_address': '123 Fakestreet Victoria BC N ',
'url_code': 'ef834890-ce60-4b95-bee9-69a12c59d4f8',
},
'applicant_count': 0,
'new_applicant_count': 0,
'is_psychometric_required': True,
'notification_list': [],
'selected_application': None,
'use_team_link': False,
'length_of_lease': None,
}
LISTINGS_LIST_RESPONSE = {
'count': 1,
'next': 'http://demo-api.certn.co/api/v2/listings/?page=2',
'previous': None,
'results': [LISTING_RESPONSE],
}
INVITE_BODY = {'email': 'fake@fake.com', 'email_applicants': False}
QUICK_RESPONSE = {
'notes': None,
'is_favourite': False,
'has_viewed_listing': None,
'is_viewed': False,
'id': 'db13d65c-4311-46e7-8d4d-97feb693e113',
'created': '2019-03-04T14:51:24.323137Z',
'modified': '2019-03-04T14:51:24.582378Z',
'submitted_time': '2019-03-04T14:51:24.468514Z',
'last_updated': '2019-03-04T14:51:24.323304Z',
'status': 'Analyzing',
'applicant_type': 'Quick Screen',
'monthly_cost': None,
'is_equifax_eligible': True,
'certn_score_label': 'NONE',
'is_submitted': True,
'softcheck_discounted': False,
'equifax_discounted': False,
'is_cosigner': False,
'email_references': False,
'tenancy_verified': None,
'employment_verified': None,
'employment_verification': 'NONE',
'certn_score': None,
'late_rent_potential': None,
'damage_to_property': None,
'eviction_potential': None,
'tenancy_length': None,
'late_rent_potential_label': 'None',
'damage_to_property_label': 'None',
'eviction_potential_label': 'None',
'tenancy_length_label': 'None',
'applicant_account': {'id': 'bf5080d0-7f73-4c8f-9cbd-afcf54450962', 'email': None},
'application': {
'id': 'd799e68f-3491-4645-95c7-8f42b3c2976d',
'created': '2019-03-04T14:51:24.318595Z',
'modified': '2019-03-04T14:51:24.591766Z',
'owner': {
'id': 'b8959d81-89aa-4e3e-9a5a-66f46bad591b',
'email': '*******************',
'team': {
'id': 'c7105cf6-0ae6-4d0b-9373-45f3c25b83f5',
'name': 'Bungalow',
'country': 'CA',
'industry': '',
'team_type': 'PM',
'app_url': 'https://demo-app.certn.co/',
'settings_config': {
'get_org_name': 'Bungalow',
'org_name': 'Bungalow',
'org_logo_link': None,
'org_primary_color': None,
'behavioural_test_req': False,
'emergency_contact_req': False,
'personal_ref_req': False,
'education_req': False,
'tenancy_years_amount_req': 2,
'tenancy_ref_amount_req': 1,
'tenancy_ref_email_req': False,
'tenancy_ref_phone_req': True,
'employer_records_amount_req': 1,
'employer_years_amount_req': 0,
'employer_ref_req': True,
'employer_ref_email_req': False,
'employer_ref_phone_req': False,
'document_required': False,
'cover_letter_req': False,
'government_id_req': True,
'proof_of_income_req': True,
'resume_req': False,
'personal_ref_amount_req': 1,
'request_base': True,
'request_behavioural': True,
'request_softcheck': True,
'request_equifax': False,
'request_identity_verification': False,
'request_enhanced_identity_verification': False,
'request_criminal_record_check': False,
'request_enhanced_criminal_record_check': False,
'request_motor_vehicle_records': False,
'request_education_verification': False,
'request_employment_verification_3_yrs': False,
'request_employment_verification_5_yrs': False,
'request_employment_verification_7_yrs': False,
'request_us_criminal_softcheck': False,
'request_us_ssn_verification': False,
'request_employer_references': True,
'request_address_references': True,
'exclude_softcheck_possible_matches': False,
},
'billing_plan': {
'pm_softcheck_price': '9.99',
'hr_softcheck_price': '9.99',
'pm_equifax_price': '14.99',
'hr_equifax_price': '14.99',
'pm_identity_verification_price': '1.99',
'hr_identity_verification_price': '1.99',
'pm_enhanced_identity_verification_price': '4.99',
'hr_enhanced_identity_verification_price': '4.99',
'pm_criminal_record_check_price': '29.99',
'hr_criminal_record_check_price': '29.99',
'pm_motor_vehicle_records_price': '24.99',
'hr_motor_vehicle_records_price': '24.99',
'pm_us_criminal_softcheck_price': '14.99',
'hr_us_criminal_softcheck_price': '14.99',
'pm_us_ssn_verification_price': '4.99',
'hr_us_ssn_verification_price': '4.99',
'pm_education_verification_price': '14.99',
'hr_education_verification_price': '14.99',
'pm_credential_verification_price': '14.99',
'hr_credential_verification_price': '14.99',
'pm_employment_verification_3_yrs_price': '14.99',
'hr_employment_verification_3_yrs_price': '14.99',
'pm_employment_verification_5_yrs_price': '19.99',
'hr_employment_verification_5_yrs_price': '19.99',
'pm_employment_verification_7_yrs_price': '22.99',
'hr_employment_verification_7_yrs_price': '22.99',
'pm_us_criminal_record_check_tier_1_price': '15.00',
'hr_us_criminal_record_check_tier_1_price': '15.00',
'pm_us_criminal_record_check_tier_2_price': '30.00',
'hr_us_criminal_record_check_tier_2_price': '30.00',
'pm_us_criminal_record_check_tier_3_price': '40.00',
'hr_us_criminal_record_check_tier_3_price': '40.00',
'pm_employer_references_price': '4.99',
'pm_address_references_price': '4.99',
'pm_education_references_price': '4.99',
'pm_credential_references_price': '4.99',
'hr_employer_references_price': '4.99',
'hr_address_references_price': '4.99',
'hr_education_references_price': '4.99',
'hr_credential_references_price': '4.99',
},
},
},
'listing': None,
'property': None,
'status': 'Complete',
'applicants': [
{
'id': '72af8e90-832f-4aee-89e4-3bcf090aa758',
'status': 'Analyzing',
'first_name': 'Andrew James',
'last_name': 'McLeod',
'email': None,
'certn_score': None,
'share_of_rent': None,
'is_cosigner': False,
'application_url': None,
'report_url': (
'https://demo-app.certn.co/pm/applications/'
'72af8e90-832f-4aee-89e4-3bcf090aa758/'
),
}
],
'is_active': True,
'is_selected': False,
'applicant_status': 'N',
'get_applicant_status_display': 'None',
},
'behavioural_result_summary': (
'The Behavioural score is determined by '
'analysing psychometric personality tests,'
' social media analysis, and more.'
),
'information': {
'id': '9de408e3-6fa8-4520-b772-236c674a885d',
'created': '2019-03-04T14:51:24.302393Z',
'modified': '2019-03-04T14:51:24.335917Z',
'id_url': None,
'id_file_name': None,
'first_name': 'Andrew James',
'last_name': 'McLeod',
'middle_name': None,
'employers': [],
'applicant_created': False,
'applicant_verified': False,
'educations': [],
'cover_letter': None,
'addresses': [
{
'id': '15d7a997-9b4a-4825-b853-2d8276f2892b',
'created': '2019-03-04T14:51:24.307633Z',
'current': True,
'address': '3023 BODEGA ROAD',
'unit': None,
'rent_or_own': 'R',
'city': 'VICTORIA',
'province_state': 'BC',
'country': 'CA',
'postal_code': None,
'cost': None,
'start_date': None,
'end_date': None,
'reason_for_leaving': 'N',
'landlords_first_name': None,
'landlords_last_name': None,
'landlords_phone': None,
'landlords_email': None,
'reference': None,
'full_address': ' 3023 BODEGA ROAD VICTORIA BC CA',
'information': {'first_name': 'Andrew James', 'last_name': 'McLeod'},
'address_reference': None,
'other_reason_for_leaving': None,
'auto_address': None,
'place_id': None,
'verification': None,
'consistency': None,
'rent_or_own_label': 'Rent',
'reference_verified': False,
'other_province_state': None,
'county': None,
}
],
'date_of_birth': '1987-03-04',
'phone_number': None,
'co_signer': None,
'applicant_account': {'id': 'bf5080d0-7f73-4c8f-9cbd-afcf54450962', 'email': None},
'co_signer_living_with_applicant': None,
'co_signer_association': 'N',
'co_signer_first_name': None,
'co_signer_last_name': None,
'co_signer_email': None,
'co_signer_phone': None,
'car': None,
'car_make': None,
'car_model': None,
'car_year': None,
'car_color': None,
'health_insurance_label': 'No Coverage',
'car_prov_state': None,
'car_plate': None,
'smoke': None,
'conviction_explanation': None,
'personal_reference_association_label': 'None',
'bankruptcy_explanation': None,
'eviction_explanation': None,
'status': 'C',
'rent_refusal_explanation': None,
'health_insurance': 'NC',
'feedback': None,
'pets': None,
'license_number': None,
'license_valid': None,
'license_prov_state': None,
'pets_type': None,
'emergency_contact': False,
'emergency_contact_first_name': None,
'emergency_contact_last_name': None,
'emergency_contact_email': None,
'expected_tenancy_length': None,
'personal_reference': False,
'emergency_contact_phone': None,
'personal_reference_first_name': None,
'personal_reference_last_name': None,
'personal_reference_phone': None,
'personal_reference_email': None,
'personal_reference_association': 'N',
'occupants': [],
'sin_ssn': None,
'facebook_link': None,
'twitter_link': None,
'linkedin_link': None,
'googleplus_link': None,
'desired_move_in_date': None,
'skills': [],
'documents': [],
'pet_details': [],
'rent_refusals': [],
'bankruptcies': [],
'evictions': [],
'convictions': [],
'co_signer_association_label': 'None',
'former_names': None,
'last_name_at_birth': None,
'alias': None,
'gender': None,
'birth_city': None,
'birth_province_state': None,
'birth_country': None,
'birth_other_province_state': None,
'personal_references': [],
'terms_accepted': False,
'rcmp_consent_given': False,
'co_signer_report_url': None,
'phone': None,
},
'psychometric_test': None,
'facebook': None,
'linkedin': None,
'informational_result': None,
'behavioural_result': None,
'risk_result': {
'id': 'dfec7632-e148-4e5c-a6f9-e018a802d75b',
'status': 'NONE',
'overall_score': 'NONE',
'risk_evaluations': [],
'red_flags': None,
'green_flags': None,
'description': (
'The social score is based on self-provided information from the '
'applicant and an analysis of the applicants information available '
'to Certn. This includes a criminal identity scan, social profile '
'scan, as well as other important informational data points. *Note '
'that when criminal identities are discovered the overall Certn '
'score is not impacted by the results found unless the match '
'similarity is above our confidence threshold of 95%.'
),
'risk_description': (
'The criminal identity risk is an assessment of the risk posed to '
'assets given the results of the criminal identity scan. Please '
'review any risk relevant information from our negative news and '
'criminal database analysis below.'
),
'similarity_description': (
'The criminal identity similarity percentage is a comparison between '
'the applicants self-provided information and the corresponding '
'information we find in our databases. As a general guideline, if '
'the results of our criminal identity scan has a similarity '
'percentage of above 95%, Certn can confidently predict that the '
'information presented below corresponds correctly to the applicant '
'being screened. However, if the criminal identity scan returns results '
'with a similarity percentage below 95%, the onus falls to the client '
'to accurately verify the results.'
),
'match_description': (
'The criminal identity Match Score is a comparison between the '
'applicants self-provided information and the corresponding '
'information we find in our databases. As a general guideline, if '
'the results of our criminal identity scan finds a "Likely Match", '
'Certn can confidently predict that the information presented '
'below corresponds correctly to the applicant being screened. '
'However, if the criminal identity scan returns reasults with a '
'Match Score of "Possible Match", the onus falls on the client to '
'accurately verify the results.'
),
},
'equifax_result': None,
'identity_verification': None,
'enhanced_identity_verification': None,
'manual_id_verification': None,
'late_rent_potential_description': (
'Late rent potential risk is assessed using an analysis of an '
'applicants financial stability and / or behavioural credibility '
'characteristics that predicts the likelihood of late or missed payments.'
),
'damage_to_property_description': (
'Damage to property risk is assessed using an analysis of an applicants '
'personal history and / or behavioural cleanliness and neighbour '
'characteristics that predicts the likelihood of causing damage to property.'
),
'eviction_potential_description': (
'Early vacancy risk if assessed using an analysis of an applicants '
'tenancy history and / or behavioural stability characteristics that '
'predicts the likelihood of breaking a lease.'
),
'applicant_result_description': (
'Certn Rating and Certn Score is a summary assessment of the applicants '
'unique characteristics and personal information. Andrew James McLeod '
'has received a Certn rating of "NONE" which indicates an analysis with '
'no potential issues. Although the score is a summary assessment, Certn '
'still recommends you carefully review each section of this report to '
'determine if the applicant meets your specific requirements.'
),
'applicant_result_summary': (
'The Applicant score is determined by analysing tenancy history, '
'employment history, and more.'
),
'social_result_summary': (
'The Social score is determined by analysing criminal identity, negative '
'news, social profile scans, and more.'
),
'financial_result_summary': (
'The Financial score is determined by analysing an Equifax credit check, '
'and more.'
),
'identity_verified': None,
'identity_verified_summary': (
'Upgrade Certn Report to verify Andrew James McLeods identity.'
),
'request_equifax': False,
'request_softcheck': True,
'request_identity_verification': False,
'request_criminal_record_check': False,
'request_motor_vehicle_records': False,
'request_behavioural': True,
'request_enhanced_identity_verification': False,
'request_education_verification': False,
'request_credential_verification': False,
'request_employment_verification_3_yrs': False,
'request_employment_verification_5_yrs': False,
'request_employment_verification_7_yrs': False,
'can_upgrade': True,
'reference_result': None,
'tag': None,
'comments': [],
'request_us_criminal_softcheck': False,
'request_us_ssn_verification': False,
'country': 'CA',
'request_employer_references': True,
'request_address_references': True,
'request_us_criminal_record_check_tier_1': False,
'request_us_criminal_record_check_tier_2': False,
'request_us_criminal_record_check_tier_3': False,
'us_criminal_record_check_result': None,
}
APPLICANT_GET_RESPONSE = {
'notes': None,
'is_favourite': False,
'has_viewed_listing': None,
'is_viewed': False,
'id': 'db13d65c-4311-46e7-8d4d-97feb693e113',
'created': '2019-03-04T17:00:38.684380Z',
'modified': '2019-03-04T17:00:38.919122Z',
'submitted_time': '2019-03-04T17:00:38.837847Z',
'last_updated': '2019-03-04T17:00:38.684535Z',
'status': 'Analyzing',
'applicant_type': 'Quick Screen',
'monthly_cost': None,
'is_equifax_eligible': True,
'certn_score_label': 'NONE',
'is_submitted': True,
'softcheck_discounted': False,
'equifax_discounted': False,
'is_cosigner': False,
'email_references': False,
'tenancy_verified': None,
'employment_verified': None,
'employment_verification': 'NONE',
'certn_score': None,
'late_rent_potential': None,
'damage_to_property': None,
'eviction_potential': None,
'tenancy_length': None,
'late_rent_potential_label': 'None',
'damage_to_property_label': 'None',
'eviction_potential_label': 'None',
'tenancy_length_label': 'None',
'applicant_account': {'id': 'e8d7dd67-7b3f-4edb-acb9-8c28c2f10dbe', 'email': None},
'application': {
'id': '10bfda79-c571-49f5-8630-132ccef86592',
'created': '2019-03-04T17:00:38.679697Z',
'modified': '2019-03-04T17:00:38.928674Z',
'owner': {
'id': 'b8959d81-89aa-4e3e-9a5a-66f46bad591b',
'email': '****************',
'team': {
'id': 'c7105cf6-0ae6-4d0b-9373-45f3c25b83f5',
'name': 'Bungalow',
'country': 'CA',
'industry': '',
'team_type': 'PM',
'app_url': 'https://demo-app.certn.co/',
'settings_config': {
'get_org_name': 'Bungalow',
'org_name': 'Bungalow',
'org_logo_link': None,
'org_primary_color': None,
'behavioural_test_req': False,
'emergency_contact_req': False,
'personal_ref_req': False,
'education_req': False,
'tenancy_years_amount_req': 2,
'tenancy_ref_amount_req': 1,
'tenancy_ref_email_req': False,
'tenancy_ref_phone_req': True,
'employer_records_amount_req': 1,
'employer_years_amount_req': 0,
'employer_ref_req': True,
'employer_ref_email_req': False,
'employer_ref_phone_req': False,
'document_required': False,
'cover_letter_req': False,
'government_id_req': True,
'proof_of_income_req': True,
'resume_req': False,
'personal_ref_amount_req': 1,
'request_base': True,
'request_behavioural': True,
'request_softcheck': True,
'request_equifax': False,
'request_identity_verification': False,
'request_enhanced_identity_verification': False,
'request_criminal_record_check': False,
'request_enhanced_criminal_record_check': False,
'request_motor_vehicle_records': False,
'request_education_verification': False,
'request_employment_verification_3_yrs': False,
'request_employment_verification_5_yrs': False,
'request_employment_verification_7_yrs': False,
'request_us_criminal_softcheck': False,
'request_us_ssn_verification': False,
'request_employer_references': True,
'request_address_references': True,
'exclude_softcheck_possible_matches': False,
},
'billing_plan': {
'pm_softcheck_price': '9.99',
'hr_softcheck_price': '9.99',
'pm_equifax_price': '14.99',
'hr_equifax_price': '14.99',
'pm_identity_verification_price': '1.99',
'hr_identity_verification_price': '1.99',
'pm_enhanced_identity_verification_price': '4.99',
'hr_enhanced_identity_verification_price': '4.99',
'pm_criminal_record_check_price': '29.99',
'hr_criminal_record_check_price': '29.99',
'pm_motor_vehicle_records_price': '24.99',
'hr_motor_vehicle_records_price': '24.99',
'pm_us_criminal_softcheck_price': '14.99',
'hr_us_criminal_softcheck_price': '14.99',
'pm_us_ssn_verification_price': '4.99',
'hr_us_ssn_verification_price': '4.99',
'pm_education_verification_price': '14.99',
'hr_education_verification_price': '14.99',
'pm_credential_verification_price': '14.99',
'hr_credential_verification_price': '14.99',
'pm_employment_verification_3_yrs_price': '14.99',
'hr_employment_verification_3_yrs_price': '14.99',
'pm_employment_verification_5_yrs_price': '19.99',
'hr_employment_verification_5_yrs_price': '19.99',
'pm_employment_verification_7_yrs_price': '22.99',
'hr_employment_verification_7_yrs_price': '22.99',
'pm_us_criminal_record_check_tier_1_price': '15.00',
'hr_us_criminal_record_check_tier_1_price': '15.00',
'pm_us_criminal_record_check_tier_2_price': '30.00',
'hr_us_criminal_record_check_tier_2_price': '30.00',
'pm_us_criminal_record_check_tier_3_price': '40.00',
'hr_us_criminal_record_check_tier_3_price': '40.00',
'pm_employer_references_price': '4.99',
'pm_address_references_price': '4.99',
'pm_education_references_price': '4.99',
'pm_credential_references_price': '4.99',
'hr_employer_references_price': '4.99',
'hr_address_references_price': '4.99',
'hr_education_references_price': '4.99',
'hr_credential_references_price': '4.99',
},
},
},
'listing': None,
'property': None,
'status': 'Complete',
'applicants': [
{
'id': 'db13d65c-4311-46e7-8d4d-97feb693e113',
'status': 'Analyzing',
'first_name': 'Andrew James',
'last_name': 'McLeod',
'email': None,
'certn_score': None,
'share_of_rent': None,
'is_cosigner': False,
'application_url': None,
'report_url': (
'https://demo-app.certn.co/pm/applications/'
'db13d65c-4311-46e7-8d4d-97feb693e113/'
),
}
],
'is_active': True,
'is_selected': False,
'applicant_status': 'N',
'get_applicant_status_display': 'None',
},
'behavioural_result_summary': (
'The Behavioural score is determined by analysing psychometric '
'personality tests, social media analysis, and more.'
),
'information': {
'id': '00db959b-eb37-4e7c-83bf-8d68a6abcecd',
'created': '2019-03-04T17:00:38.663344Z',
'modified': '2019-03-04T17:00:38.707965Z',
'id_url': None,
'id_file_name': None,
'first_name': 'Andrew James',
'last_name': 'McLeod',
'middle_name': None,
'employers': [],
'applicant_created': False,
'applicant_verified': False,
'educations': [],
'cover_letter': None,
'addresses': [
{
'id': '0a1e0897-8239-42c3-bba4-52839d975fd3',
'created': '2019-03-04T17:00:38.668610Z',
'current': True,
'address': '3023 BODEGA ROAD',
'unit': None,
'rent_or_own': 'R',
'city': 'VICTORIA',
'province_state': 'BC',
'country': 'CA',
'postal_code': None,
'cost': None,
'start_date': None,
'end_date': None,
'reason_for_leaving': 'N',
'landlords_first_name': None,
'landlords_last_name': None,
'landlords_phone': None,
'landlords_email': None,
'reference': None,
'full_address': ' 3023 BODEGA ROAD VICTORIA BC CA',
'information': {'first_name': 'Andrew James', 'last_name': 'McLeod'},
'address_reference': None,
'other_reason_for_leaving': None,
'auto_address': None,
'place_id': None,
'verification': None,
'consistency': None,
'rent_or_own_label': 'Rent',
'reference_verified': False,
'other_province_state': None,
'county': None,
}
],
'date_of_birth': '1987-03-04',
'phone_number': None,
'co_signer': None,
'applicant_account': {'id': 'e8d7dd67-7b3f-4edb-acb9-8c28c2f10dbe', 'email': None},
'co_signer_living_with_applicant': None,
'co_signer_association': 'N',
'co_signer_first_name': None,
'co_signer_last_name': None,
'co_signer_email': None,
'co_signer_phone': None,
'car': None,
'car_make': None,
'car_model': None,
'car_year': None,
'car_color': None,
'health_insurance_label': 'No Coverage',
'car_prov_state': None,
'car_plate': None,
'smoke': None,
'conviction_explanation': None,
'personal_reference_association_label': 'None',
'bankruptcy_explanation': None,
'eviction_explanation': None,
'status': 'C',
'rent_refusal_explanation': None,
'health_insurance': 'NC',
'feedback': None,
'pets': None,
'license_number': None,
'license_valid': None,
'license_prov_state': None,
'pets_type': None,
'emergency_contact': False,
'emergency_contact_first_name': None,
'emergency_contact_last_name': None,
'emergency_contact_email': None,
'expected_tenancy_length': None,
'personal_reference': False,
'emergency_contact_phone': None,
'personal_reference_first_name': None,
'personal_reference_last_name': None,
'personal_reference_phone': None,
'personal_reference_email': None,
'personal_reference_association': 'N',
'occupants': [],
'sin_ssn': None,
'facebook_link': None,
'twitter_link': None,
'linkedin_link': None,
'googleplus_link': None,
'desired_move_in_date': None,
'skills': [],
'documents': [],
'pet_details': [],
'rent_refusals': [],
'bankruptcies': [],
'evictions': [],
'convictions': [],
'co_signer_association_label': 'None',
'former_names': None,
'last_name_at_birth': None,
'alias': None,
'gender': None,
'birth_city': None,
'birth_province_state': None,
'birth_country': None,
'birth_other_province_state': None,
'personal_references': [],
'terms_accepted': False,
'rcmp_consent_given': False,
'co_signer_report_url': None,
'phone': None,
},
'psychometric_test': None,
'facebook': None,
'linkedin': None,
'informational_result': None,
'behavioural_result': None,
'risk_result': {
'id': '71c4d042-c19e-46e1-a81e-3779b992ae03',
'status': 'NONE',
'overall_score': 'NONE',
'risk_evaluations': [],
'red_flags': None,
'green_flags': None,
'description': (
'The social score is based on self-provided information from the '
'applicant and an analysis of the applicants information available '
'to Certn. This includes a criminal identity scan, social profile '
'scan, as well as other important informational data points. *Note '
'that when criminal identities are discovered the overall Certn '
'score is not impacted by the results found unless the match '
'similarity is above our confidence threshold of 95%.'
),
'risk_description': (
'The criminal identity risk is an assessment of the risk posed to '
'assets given the results of the criminal identity scan. Please '
'review any risk relevant information from our negative news and '
'criminal database analysis below.'
),
'similarity_description': (
'The criminal identity similarity percentage is a comparison between '
'the applicants self-provided information and the corresponding '
'information we find in our databases. As a general guideline, if '
'the results of our criminal identity scan has a similarity percentage '
'of above 95%, Certn can confidently predict that the information '
'presented below corresponds correctly to the applicant being '
'screened. However, if the criminal identity scan returns results '
'with a similarity percentage below 95%, the onus falls to the '
'client to accurately verify the results.'
),
'match_description': (
'The criminal identity Match Score is a comparison between the '
'applicants self-provided information and the corresponding '
'information we find in our databases. As a general guideline, if '
'the results of our criminal identity scan finds a "Likely Match", '
'Certn can confidently predict that the information presented below '
'corresponds correctly to the applicant being screened. However, if '
'the criminal identity scan returns reasults with a Match Score of '
'"Possible Match:", the onus falls on the client to accurately verify '
'the results.'
),
},
'equifax_result': None,
'identity_verification': None,
'enhanced_identity_verification': None,
'manual_id_verification': None,
'late_rent_potential_description': (
'Late rent potential risk is assessed using an analysis of an applicants '
'financial stability and / or behavioural credibility characteristics '
'that predicts the likelihood of late or missed payments.'
),
'damage_to_property_description': (
'Damage to property risk is assessed using an analysis of an applicants '
'personal history and / or behavioural cleanliness and neighbour '
'characteristics that predicts the likelihood of causing damage to property.'
),
'eviction_potential_description': (
'Early vacancy risk if assessed using an analysis of an applicants '
'tenancy history and / or behavioural stability characteristics that '
'predicts the likelihood of breaking a lease.'
),
'applicant_result_description': (
'Certn Rating and Certn Score is a summary assessment of the applicants '
'unique characteristics and personal information. Andrew James McLeod has '
'received a Certn rating of "NONE" which indicates an analysis with no '
'potential issues. Although the score is a summary assessment, Certn '
'still recommends you carefully review each section of this report to '
'determine if the applicant meets your specific requirements.'
),
'applicant_result_summary': (
'The Applicant score is determined by analysing tenancy history, '
'employment history, and more.'
),
'social_result_summary': (
'The Social score is determined by analysing criminal identity, negative '
'news, social profile scans, and more.'
),
'financial_result_summary': (
'The Financial score is determined by analysing an Equifax credit check, '
'and more.'
),
'identity_verified': None,
'identity_verified_summary': (
'Upgrade Certn Report to verify Andrew James McLeods identity.'
),
'request_equifax': False,
'request_softcheck': True,
'request_identity_verification': False,
'request_criminal_record_check': False,
'request_motor_vehicle_records': False,
'request_behavioural': True,
'request_enhanced_identity_verification': False,
'request_education_verification': False,
'request_credential_verification': False,
'request_employment_verification_3_yrs': False,
'request_employment_verification_5_yrs': False,
'request_employment_verification_7_yrs': False,
'can_upgrade': True,
'reference_result': None,
'tag': None,
'comments': [],
'request_us_criminal_softcheck': False,
'request_us_ssn_verification': False,
'country': 'CA',
'request_employer_references': True,
'request_address_references': True,
'request_us_criminal_record_check_tier_1': False,
'request_us_criminal_record_check_tier_2': False,
'request_us_criminal_record_check_tier_3': False,
'us_criminal_record_check_result': None,
}
INVITE_RESPONSE = {
'id': '1be22b51-9772-4b40-b2ee-09d3595c9b72',
'created': '2019-03-04T15:13:56.691298Z',
'modified': '2019-03-04T15:13:56.691326Z',
'owner': {
'id': 'b8959d81-89aa-4e3e-9a5a-66f46bad591b',
'email': '******************',
'team': {
'id': 'c7105cf6-0ae6-4d0b-9373-45f3c25b83f5',
'name': 'Bungalow',
'country': 'CA',
'industry': '',
'team_type': 'PM',
'app_url': 'https://demo-app.certn.co/',
'settings_config': {
'get_org_name': 'Bungalow',
'org_name': 'Bungalow',
'org_logo_link': None,
'org_primary_color': None,
'behavioural_test_req': False,
'emergency_contact_req': False,
'personal_ref_req': False,
'education_req': False,
'tenancy_years_amount_req': 2,
'tenancy_ref_amount_req': 1,
'tenancy_ref_email_req': False,
'tenancy_ref_phone_req': True,
'employer_records_amount_req': 1,
'employer_years_amount_req': 0,
'employer_ref_req': True,
'employer_ref_email_req': False,
'employer_ref_phone_req': False,
'document_required': False,
'cover_letter_req': False,
'government_id_req': True,
'proof_of_income_req': True,
'resume_req': False,
'personal_ref_amount_req': 1,
'request_base': True,
'request_behavioural': True,
'request_softcheck': True,
'request_equifax': False,
'request_identity_verification': False,
'request_enhanced_identity_verification': False,
'request_criminal_record_check': False,
'request_enhanced_criminal_record_check': False,
'request_motor_vehicle_records': False,
'request_education_verification': False,
'request_employment_verification_3_yrs': False,
'request_employment_verification_5_yrs': False,
'request_employment_verification_7_yrs': False,
'request_us_criminal_softcheck': False,
'request_us_ssn_verification': False,
'request_employer_references': True,
'request_address_references': True,
'exclude_softcheck_possible_matches': False,
},
'billing_plan': {
'pm_softcheck_price': '9.99',
'hr_softcheck_price': '9.99',
'pm_equifax_price': '14.99',
'hr_equifax_price': '14.99',
'pm_identity_verification_price': '1.99',
'hr_identity_verification_price': '1.99',
'pm_enhanced_identity_verification_price': '4.99',
'hr_enhanced_identity_verification_price': '4.99',
'pm_criminal_record_check_price': '29.99',
'hr_criminal_record_check_price': '29.99',
'pm_motor_vehicle_records_price': '24.99',
'hr_motor_vehicle_records_price': '24.99',
'pm_us_criminal_softcheck_price': '14.99',
'hr_us_criminal_softcheck_price': '14.99',
'pm_us_ssn_verification_price': '4.99',
'hr_us_ssn_verification_price': '4.99',
'pm_education_verification_price': '14.99',
'hr_education_verification_price': '14.99',
'pm_credential_verification_price': '14.99',
'hr_credential_verification_price': '14.99',
'pm_employment_verification_3_yrs_price': '14.99',
'hr_employment_verification_3_yrs_price': '14.99',
'pm_employment_verification_5_yrs_price': '19.99',
'hr_employment_verification_5_yrs_price': '19.99',
'pm_employment_verification_7_yrs_price': '22.99',
'hr_employment_verification_7_yrs_price': '22.99',
'pm_us_criminal_record_check_tier_1_price': '15.00',
'hr_us_criminal_record_check_tier_1_price': '15.00',
'pm_us_criminal_record_check_tier_2_price': '30.00',
'hr_us_criminal_record_check_tier_2_price': '30.00',
'pm_us_criminal_record_check_tier_3_price': '40.00',
'hr_us_criminal_record_check_tier_3_price': '40.00',
'pm_employer_references_price': '4.99',
'pm_address_references_price': '4.99',
'pm_education_references_price': '4.99',
'pm_credential_references_price': '4.99',
'hr_employer_references_price': '4.99',
'hr_address_references_price': '4.99',
'hr_education_references_price': '4.99',
'hr_credential_references_price': '4.99',
},
},
},
'listing': None,
'property': None,
'status': 'Incomplete',
'applicants': [
{
'id': 'a65edc29-ab29-490c-a50f-c45df9342531',
'status': 'Pending',
'first_name': '',
'last_name': '',
'email': 'fake@fake.com',
'certn_score': None,
'share_of_rent': None,
'is_cosigner': False,
'application_url': (
'https://demo-app.certn.co/welcome/submit?'
'&session=6018922c-f89a-4ceb-be12-06312f9519a2'
'&token=bb54742b-8e7d-4103-bf19-78602411340b'
),
'report_url': (
'https://demo-app.certn.co/pm/applications'
'/a65edc29-ab29-490c-a50f-c45df9342531/'
),
}
],
'is_active': True,
'is_selected': False,
'applicant_status': 'N',
'get_applicant_status_display': 'None',
}
PROPERTY_GET_RESULT = {
'id': 'f55abccb-ed01-4e2d-8ac8-564640306961',
'status': 'N',
'get_status_display': 'No Vacancy',
'created': '2019-03-04T21:02:03.616679Z',
'modified': '2019-03-04T21:02:03.734211Z',
'last_updated': '2019-03-04T21:02:03.616744Z',
'building': None,
'building_code': None,
'address': '123 Fakestreet',
'city': 'Abotsford',
'province_state': 'BC',
'country': 'N',
'postal_code': None,
'is_active': True,
'owner': {
'id': 'b8959d81-89aa-4e3e-9a5a-66f46bad591b',
'email': 'fake@fake.com',
'team': {
'id': 'c7105cf6-0ae6-4d0b-9373-45f3c25b83f5',
'name': 'Bungalow',
'country': 'CA',
'industry': '',
'team_type': 'PM',
'app_url': 'https://demo-app.certn.co/',
'settings_config': {
'get_org_name': 'Bungalow',
'org_name': 'Bungalow',
'org_logo_link': None,
'org_primary_color': None,
'behavioural_test_req': False,
'emergency_contact_req': False,
'personal_ref_req': False,
'education_req': False,
'tenancy_years_amount_req': 2,
'tenancy_ref_amount_req': 1,
'tenancy_ref_email_req': False,
'tenancy_ref_phone_req': True,
'employer_records_amount_req': 1,
'employer_years_amount_req': 0,
'employer_ref_req': True,
'employer_ref_email_req': False,
'employer_ref_phone_req': False,
'document_required': False,
'cover_letter_req': False,
'government_id_req': True,
'proof_of_income_req': True,
'resume_req': False,
'personal_ref_amount_req': 1,
'request_base': True,
'request_behavioural': True,
'request_softcheck': True,
'request_equifax': False,
'request_identity_verification': False,
'request_enhanced_identity_verification': False,
'request_criminal_record_check': False,
'request_enhanced_criminal_record_check': False,
'request_motor_vehicle_records': False,
'request_education_verification': False,
'request_employment_verification_3_yrs': False,
'request_employment_verification_5_yrs': False,
'request_employment_verification_7_yrs': False,
'request_us_criminal_softcheck': False,
'request_us_ssn_verification': False,
'request_employer_references': True,
'request_address_references': True,
'exclude_softcheck_possible_matches': False,
},
'billing_plan': {
'pm_softcheck_price': '9.99',
'hr_softcheck_price': '9.99',
'pm_equifax_price': '14.99',
'hr_equifax_price': '14.99',
'pm_identity_verification_price': '1.99',
'hr_identity_verification_price': '1.99',
'pm_enhanced_identity_verification_price': '4.99',
'hr_enhanced_identity_verification_price': '4.99',
'pm_criminal_record_check_price': '29.99',
'hr_criminal_record_check_price': '29.99',
'pm_motor_vehicle_records_price': '24.99',
'hr_motor_vehicle_records_price': '24.99',
'pm_us_criminal_softcheck_price': '14.99',
'hr_us_criminal_softcheck_price': '14.99',
'pm_us_ssn_verification_price': '4.99',
'hr_us_ssn_verification_price': '4.99',
'pm_education_verification_price': '14.99',
'hr_education_verification_price': '14.99',
'pm_credential_verification_price': '14.99',
'hr_credential_verification_price': '14.99',
'pm_employment_verification_3_yrs_price': '14.99',
'hr_employment_verification_3_yrs_price': '14.99',
'pm_employment_verification_5_yrs_price': '19.99',
'hr_employment_verification_5_yrs_price': '19.99',
'pm_employment_verification_7_yrs_price': '22.99',
'hr_employment_verification_7_yrs_price': '22.99',
'pm_us_criminal_record_check_tier_1_price': '15.00',
'hr_us_criminal_record_check_tier_1_price': '15.00',
'pm_us_criminal_record_check_tier_2_price': '30.00',
'hr_us_criminal_record_check_tier_2_price': '30.00',
'pm_us_criminal_record_check_tier_3_price': '40.00',
'hr_us_criminal_record_check_tier_3_price': '40.00',
'pm_employer_references_price': '4.99',
'pm_address_references_price': '4.99',
'pm_education_references_price': '4.99',
'pm_credential_references_price': '4.99',
'hr_employer_references_price': '4.99',
'hr_address_references_price': '4.99',
'hr_education_references_price': '4.99',
'hr_credential_references_price': '4.99',
},
},
},
'listing_count': 0,
'full_address': '123 Fakestreet Abotsford BC N ',
'url_code': 'f55abccb-ed01-4e2d-8ac8-564640306961',
}
PROPERTIES_LIST_RESPONSE = {
'count': 239,
'next': 'http://demo-api.certn.co/api/v2/properties/?page=2',
'previous': None,
'results': [PROPERTY_GET_RESULT],
}
API_ERROR_SAMPLE_JSON = '''{
"error_type": "INVALID_REQUEST",
"error_message": "This is an invalid request",
"error_code": 400,
"display_message": None
}'''
| 44.603257
| 91
| 0.565459
| 6,094
| 60,259
| 5.218576
| 0.095504
| 0.012829
| 0.018112
| 0.027168
| 0.94296
| 0.926325
| 0.911326
| 0.908559
| 0.905446
| 0.900855
| 0
| 0.062914
| 0.320782
| 60,259
| 1,350
| 92
| 44.636296
| 0.71409
| 0
| 0
| 0.843797
| 0
| 0
| 0.553444
| 0.279261
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.000747
| 0.001495
| 0
| 0.001495
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f8546bf16ef4e9ed18a8ac759a2b0347da72b267
| 1,938
|
py
|
Python
|
tests/test_ccsa_pspnet.py
|
xingyizhou/mseg-semantic
|
f93f2b21397aa6296a0f33775ae2f9712aa32858
|
[
"MIT"
] | 1
|
2021-01-13T08:39:25.000Z
|
2021-01-13T08:39:25.000Z
|
tests/test_ccsa_pspnet.py
|
xingyizhou/mseg-semantic
|
f93f2b21397aa6296a0f33775ae2f9712aa32858
|
[
"MIT"
] | null | null | null |
tests/test_ccsa_pspnet.py
|
xingyizhou/mseg-semantic
|
f93f2b21397aa6296a0f33775ae2f9712aa32858
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from domain_generalization.ccsa_pspnet import CCSA_PSPNet
def test_CCSA_PSPNet_dims():
""" """
layers = 50
classes = 183
network_name = None
zoom_factor = 8 # zoom factor for final prediction during training, be in [1, 2, 4, 8]
ignore_label = 255
criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
BatchNorm = torch.nn.BatchNorm2d # torch.nn.SyncBatchNorm
model = CCSA_PSPNet(
layers=layers,
classes=classes,
zoom_factor=zoom_factor,
criterion=criterion,
BatchNorm=BatchNorm,
network_name=network_name,
pretrained=False) # unlike actual training time.
x = torch.randint(high=255, size=(4,3,201,201)).type(torch.float32)
y = torch.randint(high=10,size=(4,201,201))
batch_domain_idxs = torch.tensor([0,1,2,1])
out_cache = model(x,y,batch_domain_idxs)
def test_CCSA_PSPNet_dims_cuda():
""" """
layers = 50
classes = 183
network_name = None
zoom_factor = 8 # zoom factor for final prediction during training, be in [1, 2, 4, 8]
ignore_label = 255
criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
BatchNorm = torch.nn.BatchNorm2d # torch.nn.SyncBatchNorm
model = CCSA_PSPNet(
layers=layers,
classes=classes,
zoom_factor=zoom_factor,
criterion=criterion,
BatchNorm=BatchNorm,
network_name=network_name,
pretrained=False) # unlike actual training time.
model = model.cuda()
x = torch.randint(high=255, size=(4,3,201,201)).type(torch.float32)
y = torch.randint(high=10,size=(4,201,201))
batch_domain_idxs = torch.tensor([0,1,2,1])
x = x.cuda()
y = y.cuda()
batch_domain_idxs = batch_domain_idxs.cuda()
out_cache = model(x,y,batch_domain_idxs)
if __name__ == '__main__':
""" """
test_CCSA_PSPNet_dims()
test_CCSA_PSPNet_dims_cuda()
| 27.685714
| 91
| 0.667183
| 262
| 1,938
| 4.709924
| 0.251908
| 0.06483
| 0.072934
| 0.058347
| 0.865478
| 0.810373
| 0.810373
| 0.810373
| 0.76175
| 0.76175
| 0
| 0.05305
| 0.221878
| 1,938
| 69
| 92
| 28.086957
| 0.765252
| 0.124355
| 0
| 0.76
| 0
| 0
| 0.004802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.06
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e427f5bf2e3963a6d4e1eb88be0713af2ae831e5
| 5,380
|
py
|
Python
|
tests/test_unit_sql_fetch.py
|
IBM/python-itoolk
|
36054a7ebdd8f5556c548d4c315e00e3c8d04904
|
[
"MIT"
] | 11
|
2019-01-09T12:31:04.000Z
|
2021-08-29T05:26:35.000Z
|
tests/test_unit_sql_fetch.py
|
IBM/python-itoolk
|
36054a7ebdd8f5556c548d4c315e00e3c8d04904
|
[
"MIT"
] | 50
|
2018-12-21T18:52:25.000Z
|
2021-05-25T13:38:15.000Z
|
tests/test_unit_sql_fetch.py
|
IBM/python-itoolk
|
36054a7ebdd8f5556c548d4c315e00e3c8d04904
|
[
"MIT"
] | 9
|
2018-12-25T00:02:19.000Z
|
2022-02-22T00:58:13.000Z
|
import xml.etree.ElementTree as ET
from itoolkit import iSqlFetch
def test_sql_fetch():
key = 'mulblnxo'
element = ET.fromstring(iSqlFetch(key).xml_in())
assert(element.tag == 'sql')
assert(len(element.attrib) == 1)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
assert(element.text == '\n')
children = tuple(iter(element))
assert(len(children) == 1)
element = children[0]
assert(element.tag == 'fetch')
assert(len(element.attrib) == 3)
assert('error' in element.attrib)
assert(element.attrib['error'] == 'fast')
assert('block' in element.attrib)
assert(element.attrib['block'] == 'all')
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
def test_sql_fetch_error_on():
key = 'opaffdjr'
error = 'on'
element = ET.fromstring(iSqlFetch(key, {'error': error}).xml_in())
assert(element.tag == 'sql')
assert(len(element.attrib) == 1)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
assert(element.text == '\n')
children = tuple(iter(element))
assert(len(children) == 1)
element = children[0]
assert(element.tag == 'fetch')
assert(len(element.attrib) == 3)
assert('error' in element.attrib)
assert(element.attrib['error'] == error)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
def test_sql_fetch_error_off():
key = 'ysdifjyx'
error = 'off'
element = ET.fromstring(iSqlFetch(key, {'error': error}).xml_in())
assert(element.tag == 'sql')
assert(len(element.attrib) == 1)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
assert(element.text == '\n')
children = tuple(iter(element))
assert(len(children) == 1)
element = children[0]
assert(element.tag == 'fetch')
assert(len(element.attrib) == 3)
assert('error' in element.attrib)
assert(element.attrib['error'] == error)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
def test_sql_fetch_block_set():
key = 'ojaxupoq'
block = '10'
element = ET.fromstring(iSqlFetch(key, {'block': block}).xml_in())
assert(element.tag == 'sql')
assert(len(element.attrib) == 1)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
assert(element.text == '\n')
children = tuple(iter(element))
assert(len(children) == 1)
element = children[0]
assert(element.tag == 'fetch')
assert(len(element.attrib) == 3)
assert('block' in element.attrib)
assert(element.attrib['block'] == block)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
def test_sql_fetch_desc_on():
key = 'sefufeoq'
describe = 'on'
element = ET.fromstring(iSqlFetch(key, {'desc': describe}).xml_in())
assert(element.tag == 'sql')
assert(len(element.attrib) == 1)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
assert(element.text == '\n')
children = tuple(iter(element))
assert(len(children) == 1)
element = children[0]
assert(element.tag == 'fetch')
assert(len(element.attrib) == 4)
assert('desc' in element.attrib)
assert(element.attrib['desc'] == describe)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
def test_sql_fetch_desc_off():
key = 'jtucgypy'
describe = 'off'
element = ET.fromstring(iSqlFetch(key, {'desc': describe}).xml_in())
assert(element.tag == 'sql')
assert(len(element.attrib) == 1)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
assert(element.text == '\n')
children = tuple(iter(element))
assert(len(children) == 1)
element = children[0]
assert(element.tag == 'fetch')
assert(len(element.attrib) == 4)
assert('desc' in element.attrib)
assert(element.attrib['desc'] == describe)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
def test_sql_fetch_stmt_set():
key = 'slkgfrav'
stmt = 'stmt-label'
element = ET.fromstring(iSqlFetch(key, {'stmt': stmt}).xml_in())
assert(element.tag == 'sql')
assert(len(element.attrib) == 1)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
assert(element.text == '\n')
children = tuple(iter(element))
assert(len(children) == 1)
element = children[0]
assert(element.tag == 'fetch')
assert(len(element.attrib) == 4)
assert('stmt' in element.attrib)
assert(element.attrib['stmt'] == stmt)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
def test_sql_fetch_rec_set():
key = 'slkgfrav'
records = '10'
element = ET.fromstring(iSqlFetch(key, {'rec': records}).xml_in())
assert(element.tag == 'sql')
assert(len(element.attrib) == 1)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
assert(element.text == '\n')
children = tuple(iter(element))
assert(len(children) == 1)
element = children[0]
assert(element.tag == 'fetch')
assert(len(element.attrib) == 4)
assert('rec' in element.attrib)
assert(element.attrib['rec'] == records)
assert('var' in element.attrib)
assert(element.attrib['var'] == key)
| 21.869919
| 72
| 0.621747
| 671
| 5,380
| 4.928465
| 0.077496
| 0.25945
| 0.113396
| 0.158754
| 0.907469
| 0.888721
| 0.845177
| 0.845177
| 0.845177
| 0.816752
| 0
| 0.008401
| 0.203532
| 5,380
| 245
| 73
| 21.959184
| 0.763361
| 0
| 0
| 0.768707
| 0
| 0
| 0.070818
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.054422
| false
| 0
| 0.013605
| 0
| 0.068027
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e46c6d69d21fc9d469f6e9bdd5201385cdb22ed5
| 13,577
|
py
|
Python
|
highlevel/robot/controller/motion/motion_test.py
|
outech-robotic/code
|
b57acba3faae606f4d0c3cf210bc0716d7fef4e7
|
[
"MIT"
] | 7
|
2020-04-15T16:42:56.000Z
|
2021-12-25T10:12:13.000Z
|
highlevel/robot/controller/motion/motion_test.py
|
outech-robotic/code
|
b57acba3faae606f4d0c3cf210bc0716d7fef4e7
|
[
"MIT"
] | 37
|
2020-04-15T15:49:31.000Z
|
2022-02-27T03:53:48.000Z
|
highlevel/robot/controller/motion/motion_test.py
|
outech-robotic/code
|
b57acba3faae606f4d0c3cf210bc0716d7fef4e7
|
[
"MIT"
] | null | null | null |
"""
Tests for the motion controller module.
"""
import asyncio
import dataclasses
import math
import pytest
from pytest import fixture
from highlevel.robot.controller.motion.motion import MotionController, MotionResult
from highlevel.robot.entity.configuration import Configuration
from highlevel.util.type import MillimeterPerSec, mm_to_tick
from highlevel.util.filter.pid import PIDConstants
from highlevel.util.geometry.vector import Vector2
def _mm_to_tick(distance: MillimeterPerSec,
configuration: Configuration) -> MillimeterPerSec:
"""
Converts locally millimeter distance to encoder ticks
"""
return mm_to_tick(distance, configuration.encoder_ticks_per_revolution,
configuration.wheel_radius)
@fixture(name='configuration')
def configuration_stub(configuration_test: Configuration) -> Configuration:
"""
Configuration for tests.
"""
return dataclasses.replace(configuration_test,
distance_between_wheels=2,
encoder_update_rate=1,
max_wheel_speed=5,
max_wheel_acceleration=1,
max_angular_velocity=5,
max_angular_acceleration=1,
wheel_radius=1 / (2 * math.pi),
tolerance_distance=0.1,
tolerance_angle=0.04,
pid_constants_distance=PIDConstants(
0.0, 0.0, 0.0),
pid_constants_angle=PIDConstants(0.0, 0.0, 0.0))
@fixture(name='motion_controller')
def motion_controller_setup(position_controller_mock, motor_gateway_mock,
configuration):
"""
Set up the motion controller to test.
"""
position_controller_mock.angle = 0
position_controller_mock.position = Vector2(0, 0)
position_controller_mock.distance_travelled = 0
position_controller_mock.speed = 0
position_controller_mock.angular_velocity = 0
position_controller_mock.position_left = 0
position_controller_mock.position_right = 0
return MotionController(
position_controller=position_controller_mock,
motor_gateway=motor_gateway_mock,
configuration=configuration,
)
class TestMotionController:
"""
Test the motion controller.
All the tests assume the PIDs has 0 coefficients.
"""
@staticmethod
@pytest.mark.asyncio
async def test_translate_and_rotate_zero(motion_controller,
motor_gateway_mock):
"""
Robot translates 0 mm to verify that it doesn't do anything on short enough distances
"""
result = await motion_controller.translate(0.0)
motor_gateway_mock.set_target_speeds.assert_called_once_with(0, 0)
assert result == MotionResult.OK
motor_gateway_mock.set_target_speeds.reset_mock()
result = await motion_controller.rotate(0.0)
motor_gateway_mock.set_target_speeds.assert_called_once_with(0, 0)
assert result == MotionResult.OK
@staticmethod
@pytest.mark.asyncio
async def test_translate_and_rotate_busy_ignores(motion_controller):
"""
Motion controller should ignore movement requests if it is already moving.
"""
# translate -> rotate
task = asyncio.create_task(motion_controller.translate(100000))
await asyncio.sleep(0)
result = await motion_controller.rotate(21231)
assert result == MotionResult.BUSY
task.cancel()
# rotate -> translate
task = asyncio.create_task(motion_controller.rotate(100000))
await asyncio.sleep(0)
result = await motion_controller.translate(21231)
assert result == MotionResult.BUSY
task.cancel()
# translate -> translate
task = asyncio.create_task(motion_controller.translate(100000))
await asyncio.sleep(0)
result = await motion_controller.translate(21231)
assert result == MotionResult.BUSY
task.cancel()
# rotate -> rotate
task = asyncio.create_task(motion_controller.rotate(100000))
await asyncio.sleep(0)
result = await motion_controller.rotate(21231)
assert result == MotionResult.BUSY
task.cancel()
@staticmethod
@pytest.mark.asyncio
async def test_translate_correct_speed(
motion_controller,
position_controller_mock,
motor_gateway_mock,
):
"""
Robot translates a given distance. Check that the speed increases with
the maximum acceleration.
"""
position_controller_mock.distance_travelled = 0
task = asyncio.create_task(motion_controller.translate(100))
await asyncio.sleep(0)
# Yield once to let the controller run, check that it started the movement
motor_gateway_mock.set_target_speeds.assert_called_once_with(1, 1)
motor_gateway_mock.set_target_speeds.reset_mock()
await asyncio.sleep(0)
# yield again and check that the controller correctly waits for a trigger
motor_gateway_mock.set_target_speeds.assert_not_called()
motion_controller.trigger_update()
await asyncio.sleep(0)
# check that the movement continues
motor_gateway_mock.set_target_speeds.assert_called_once_with(2, 2)
task.cancel()
@staticmethod
@pytest.mark.asyncio
async def test_translate_correct_speed_negative(motion_controller,
position_controller_mock,
motor_gateway_mock):
"""
Robot translates a given negative distance. Check that the maximum acceleration is used.
"""
position_controller_mock.distance_travelled = 0
task = asyncio.create_task(motion_controller.translate(-100))
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_called_once_with(-1, -1)
motor_gateway_mock.set_target_speeds.reset_mock()
motion_controller.trigger_update()
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_called_once_with(-2, -2)
task.cancel()
@staticmethod
@pytest.mark.asyncio
async def test_translate_stops_at_target(motion_controller,
position_controller_mock,
motor_gateway_mock):
"""
Robot translates a given distance. Check that the speed is zero at the end.
"""
position_controller_mock.distance_travelled = 0
position_controller_mock.position_left = 0
position_controller_mock.position_right = 0
# Speeds are just increased and decreased with a constant acceleration.
speeds = [1, 2, 3, 4, 5, 4, 3, 2, 1, 0]
target_position = sum(speeds)
task = asyncio.create_task(
motion_controller.translate(target_position))
current = 0
for speed in speeds:
position_controller_mock.position_left = current
position_controller_mock.position_right = current
position_controller_mock.distance_travelled = current
motion_controller.trigger_update()
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_called_with(
speed, speed)
current += speed
result = await task
assert result == MotionResult.OK
@staticmethod
@pytest.mark.asyncio
async def test_translate_stops_at_target_negative(motion_controller,
position_controller_mock,
motor_gateway_mock):
"""
Robot translates a given negative distance. Check that the speed is zero at the end.
"""
position_controller_mock.distance_travelled = 0
position_controller_mock.position_left = 0
position_controller_mock.position_right = 0
# Speeds are just increased and decreased with a constant acceleration.
speeds = [1, 2, 3, 4, 5, 4, 3, 2, 1, 0]
speeds = [-s for s in speeds]
target_position = sum(speeds)
task = asyncio.create_task(
motion_controller.translate(target_position))
current = 0
for speed in speeds:
position_controller_mock.position_left = current
position_controller_mock.position_right = current
position_controller_mock.distance_travelled = current
motion_controller.trigger_update()
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_called_with(
speed, speed)
current += speed
result = await task
assert result == MotionResult.OK
@staticmethod
@pytest.mark.asyncio
async def test_rotate_correct_speed(motion_controller,
position_controller_mock,
motor_gateway_mock):
"""
Robot rotates for a given relative angle. Check that the maximum acceleration is used.
"""
position_controller_mock.angle = 0
task = asyncio.create_task(motion_controller.rotate(math.pi / 2))
await asyncio.sleep(0)
# a positive relative angle means the left wheel goes backwards, right forwards
motor_gateway_mock.set_target_speeds.assert_called_once_with(-1, 1)
motor_gateway_mock.set_target_speeds.reset_mock()
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_not_called()
motor_gateway_mock.set_target_speeds.reset_mock()
motion_controller.trigger_update()
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_called_once_with(-2, 2)
task.cancel()
@staticmethod
@pytest.mark.asyncio
async def test_rotate_correct_speed_negative(motion_controller,
position_controller_mock,
motor_gateway_mock):
"""
Robot rotates for a given negative relative angle. Check that the maximum speed is used.
"""
position_controller_mock.angle = 0
task = asyncio.create_task(motion_controller.rotate(-math.pi / 2))
await asyncio.sleep(0)
# a positive relative angle means the left wheel goes backwards, right forwards
motor_gateway_mock.set_target_speeds.assert_called_once_with(1, -1)
motor_gateway_mock.set_target_speeds.reset_mock()
motion_controller.trigger_update()
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_called_once_with(2, -2)
task.cancel()
@staticmethod
@pytest.mark.asyncio
async def test_rotate_stops_at_target(motion_controller,
position_controller_mock,
motor_gateway_mock):
"""
Robot rotates for a given relative angle. Check that the controller stops at target.
"""
position_controller_mock.angle = 0
position_controller_mock.position_left = 0
position_controller_mock.position_right = 0
# Speeds are just increased and decreased with a constant acceleration.
speeds = [1, 2, 3, 4, 5, 4, 3, 2, 1, 0]
target_position = sum(speeds)
task = asyncio.create_task(motion_controller.rotate(target_position))
current = 0
for speed in speeds:
position_controller_mock.position_left = current
position_controller_mock.position_right = current
position_controller_mock.angle = current
motion_controller.trigger_update()
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_called_with(
-speed, speed)
current += speed
result = await task
assert result == MotionResult.OK
@staticmethod
@pytest.mark.asyncio
async def test_rotate_stops_at_target_negative(motion_controller,
position_controller_mock,
motor_gateway_mock):
"""
Robot rotates for a given negative relative angle. Check for a stop at target.
"""
position_controller_mock.angle = 0
position_controller_mock.position_left = 0
position_controller_mock.position_right = 0
# Speeds are just increased and decreased with a constant acceleration.
speeds = [1, 2, 3, 4, 5, 4, 3, 2, 1, 0]
speeds = [-s for s in speeds]
target_position = sum(speeds)
task = asyncio.create_task(motion_controller.rotate(target_position))
current = 0
for speed in speeds:
position_controller_mock.position_left = current
position_controller_mock.position_right = current
position_controller_mock.angle = current
motion_controller.trigger_update()
await asyncio.sleep(0)
motor_gateway_mock.set_target_speeds.assert_called_with(
-speed, speed)
current += speed
result = await task
assert result == MotionResult.OK
| 39.126801
| 96
| 0.639243
| 1,488
| 13,577
| 5.552419
| 0.112903
| 0.100218
| 0.119826
| 0.050593
| 0.802348
| 0.789034
| 0.777415
| 0.769426
| 0.757444
| 0.736262
| 0
| 0.020025
| 0.297488
| 13,577
| 346
| 97
| 39.239884
| 0.846194
| 0.068498
| 0
| 0.714286
| 0
| 0
| 0.002614
| 0
| 0
| 0
| 0
| 0
| 0.109244
| 1
| 0.012605
| false
| 0
| 0.042017
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e490ec4bd1224a4eb162f3a82a93da027252c334
| 4,538
|
py
|
Python
|
tests/test_docker.py
|
kfirz/deployster
|
b95fdb9cf150eee765f7ef3dbdee3666119e76f9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_docker.py
|
kfirz/deployster
|
b95fdb9cf150eee765f7ef3dbdee3666119e76f9
|
[
"Apache-2.0"
] | 19
|
2017-12-28T19:39:37.000Z
|
2018-04-18T23:24:45.000Z
|
tests/test_docker.py
|
kfirz/deployster
|
b95fdb9cf150eee765f7ef3dbdee3666119e76f9
|
[
"Apache-2.0"
] | 1
|
2018-04-06T16:50:49.000Z
|
2018-04-06T16:50:49.000Z
|
import json
from pathlib import Path
import pytest
from mock_external_services import MockDockerInvoker
from util import UserError, Logger
def test_docker_invoker_run_json():
with pytest.raises(UserError, match='Docker command terminated with exit code #-1'):
MockDockerInvoker(return_code=-1,
stderr='ERROR!',
stdout='invalid JSON here').run_json(logger=Logger(),
local_work_dir=Path('/'),
container_work_dir='/',
image='some_image',
entrypoint=None,
args=None,
input=None)
with pytest.raises(UserError, match='Docker command terminated with exit code #-1'):
MockDockerInvoker(return_code=-1,
stderr='ERROR!',
stdout='invalid JSON here').run(logger=Logger(),
local_work_dir=Path('/'),
container_work_dir='/',
image='some_image',
entrypoint=None,
args=None,
input=None)
with pytest.raises(UserError, match='Docker command did not provide any JSON back'):
MockDockerInvoker(return_code=0,
stderr='ERROR!',
stdout='').run_json(logger=Logger(),
local_work_dir=Path('/'),
container_work_dir='/',
image='some_image',
entrypoint=None,
args=None,
input=None)
with pytest.raises(UserError, match='Docker command provided invalid JSON'):
MockDockerInvoker(return_code=0,
stderr='ERROR!',
stdout='{invalidate JSON here too').run_json(logger=Logger(),
local_work_dir=Path('/'),
container_work_dir='/',
image='some_image',
entrypoint=None,
args=None,
input=None)
data: dict = {'k1': 'v1'}
result: dict = MockDockerInvoker(return_code=0,
stderr='ERROR!',
stdout=json.dumps(data)).run_json(logger=Logger(),
local_work_dir=Path('/'),
container_work_dir='/',
image='some_image',
entrypoint=None,
args=None,
input=None)
assert data == result
MockDockerInvoker(return_code=0, stderr='ERROR!', stdout=json.dumps(data)).run(logger=Logger(),
local_work_dir=Path('/'),
container_work_dir='/',
image='some_image',
entrypoint=None,
args=None,
input=None)
| 61.324324
| 108
| 0.309828
| 257
| 4,538
| 5.291829
| 0.225681
| 0.061765
| 0.119118
| 0.092647
| 0.817647
| 0.817647
| 0.817647
| 0.751471
| 0.751471
| 0.751471
| 0
| 0.005869
| 0.624504
| 4,538
| 73
| 109
| 62.164384
| 0.792254
| 0
| 0
| 0.734375
| 0
| 0
| 0.074703
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 1
| 0.015625
| true
| 0
| 0.078125
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e4e3be51c583563a5ea9749bc733f9afd9b2cf7c
| 73
|
py
|
Python
|
src/poq/__init__.py
|
dzhg/poq
|
c45f591450ff999518073e56012c544531e11326
|
[
"MIT"
] | null | null | null |
src/poq/__init__.py
|
dzhg/poq
|
c45f591450ff999518073e56012c544531e11326
|
[
"MIT"
] | null | null | null |
src/poq/__init__.py
|
dzhg/poq
|
c45f591450ff999518073e56012c544531e11326
|
[
"MIT"
] | null | null | null |
from .poq import _query
def query(path, o):
return _query(path, o)
| 12.166667
| 26
| 0.671233
| 12
| 73
| 3.916667
| 0.666667
| 0.382979
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219178
| 73
| 5
| 27
| 14.6
| 0.824561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
900b6125e653ba354c04555b81e86b3a4c60b777
| 198
|
py
|
Python
|
cupy/binary/packing.py
|
umitanuki/chainer
|
225c56b233e684ff4855451d2af4c2fb66915f21
|
[
"MIT"
] | null | null | null |
cupy/binary/packing.py
|
umitanuki/chainer
|
225c56b233e684ff4855451d2af4c2fb66915f21
|
[
"MIT"
] | null | null | null |
cupy/binary/packing.py
|
umitanuki/chainer
|
225c56b233e684ff4855451d2af4c2fb66915f21
|
[
"MIT"
] | 1
|
2018-11-18T00:36:51.000Z
|
2018-11-18T00:36:51.000Z
|
def packbits(myarray, axis=None):
# TODO(beam2d): Implement it
raise NotImplementedError
def unpackbits(myarray, axis=None):
# TODO(beam2d): Implement it
raise NotImplementedError
| 22
| 35
| 0.727273
| 22
| 198
| 6.545455
| 0.545455
| 0.152778
| 0.208333
| 0.263889
| 0.833333
| 0.833333
| 0.833333
| 0.833333
| 0.833333
| 0
| 0
| 0.012346
| 0.181818
| 198
| 8
| 36
| 24.75
| 0.876543
| 0.267677
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
90265e73210fbcff83c15d0ec1572fde09ff4374
| 2,015
|
py
|
Python
|
clist/migrations/0068_auto_20210703_2308.py
|
horacexd/clist
|
9759dfea97b86514bec9825d2430abc36decacf0
|
[
"Apache-2.0"
] | 166
|
2019-05-16T23:46:08.000Z
|
2022-03-31T05:20:23.000Z
|
clist/migrations/0068_auto_20210703_2308.py
|
horacexd/clist
|
9759dfea97b86514bec9825d2430abc36decacf0
|
[
"Apache-2.0"
] | 92
|
2020-01-18T22:51:53.000Z
|
2022-03-12T01:23:57.000Z
|
clist/migrations/0068_auto_20210703_2308.py
|
horacexd/clist
|
9759dfea97b86514bec9825d2430abc36decacf0
|
[
"Apache-2.0"
] | 23
|
2020-02-09T17:38:43.000Z
|
2021-12-09T14:39:07.000Z
|
# Generated by Django 3.1.12 on 2021-07-03 23:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clist', '0067_auto_20210320_0559'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='banner',
name='modified',
field=models.DateTimeField(auto_now=True, db_index=True),
),
migrations.AlterField(
model_name='problem',
name='created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='problem',
name='modified',
field=models.DateTimeField(auto_now=True, db_index=True),
),
migrations.AlterField(
model_name='problemtag',
name='created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='problemtag',
name='modified',
field=models.DateTimeField(auto_now=True, db_index=True),
),
migrations.AlterField(
model_name='resource',
name='created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='resource',
name='modified',
field=models.DateTimeField(auto_now=True, db_index=True),
),
migrations.AlterField(
model_name='timingcontest',
name='created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='timingcontest',
name='modified',
field=models.DateTimeField(auto_now=True, db_index=True),
),
]
| 31.484375
| 73
| 0.573201
| 194
| 2,015
| 5.757732
| 0.216495
| 0.179051
| 0.223814
| 0.259624
| 0.866607
| 0.866607
| 0.829902
| 0.829902
| 0.829902
| 0.829902
| 0
| 0.023172
| 0.31464
| 2,015
| 63
| 74
| 31.984127
| 0.785663
| 0.022829
| 0
| 0.877193
| 1
| 0
| 0.097102
| 0.011693
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017544
| 0
| 0.070175
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
90491834dd9c5b09c9b2191163a7d57f37a0e3a0
| 15,055
|
py
|
Python
|
python/set_up_Im_matrix.py
|
KGHustad/emi-book-2020-splitting-code
|
4cd8acb47a29212d8c06a18f0f6ff2dde5911904
|
[
"MIT"
] | 1
|
2020-03-12T11:48:26.000Z
|
2020-03-12T11:48:26.000Z
|
python/set_up_Im_matrix.py
|
KGHustad/emi-book-2020-splitting-code
|
4cd8acb47a29212d8c06a18f0f6ff2dde5911904
|
[
"MIT"
] | null | null | null |
python/set_up_Im_matrix.py
|
KGHustad/emi-book-2020-splitting-code
|
4cd8acb47a29212d8c06a18f0f6ff2dde5911904
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.sparse
def set_up_Im_matrix(G, mesh):
"Set up matrix used to extract the membrane current I_m from the intracellular potential"
# TODO: Rewrite this to reuse the arrays better, like how it's done in set_up_symmetric_extracellular_matrix
# Load parameters
N = G.N
Nx = G.Nx
Ny = G.Ny
dx = G.dx
dy = G.dy
dz = G.dz
sigma_i = G.sigma_i
m_lsw = mesh.m_lsw
m_lse = mesh.m_lse
m_lnw = mesh.m_lnw
m_lne = mesh.m_lne
m_hsw = mesh.m_hsw
m_hse = mesh.m_hse
m_hnw = mesh.m_hnw
m_hne = mesh.m_hne
m_hw = mesh.m_hw
m_he = mesh.m_he
m_hs = mesh.m_hs
m_hn = mesh.m_hn
m_lw = mesh.m_lw
m_le = mesh.m_le
m_ls = mesh.m_ls
m_ln = mesh.m_ln
m_ne = mesh.m_ne
m_sw = mesh.m_sw
m_se = mesh.m_se
m_nw = mesh.m_nw
m_w = mesh.m_w
m_e = mesh.m_e
m_s = mesh.m_s
m_n = mesh.m_n
m_h = mesh.m_h
m_l = mesh.m_l
stride_x = mesh[1, 0, 0] - mesh[0, 0, 0]
stride_y = mesh[0, 1, 0] - mesh[0, 0, 0]
stride_z = mesh[0, 0, 1] - mesh[0, 0, 0]
# A: Planes
# 1a) Set up factors for the low membrane
index = m_l
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
M = scipy.sparse.spdiags(-sigma_i/dz*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(sigma_i/dz*i_vec_qp, stride_z, N, N)
# 2a) Set up factors for the high membrane
index = m_h
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
M = M + scipy.sparse.spdiags(-sigma_i/dz*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(sigma_i/dz*i_vec_qm, -stride_z, N, N)
# 3a) Set up factors for the south membrane
index = m_s
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
M = M + scipy.sparse.spdiags(-sigma_i/dy*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(sigma_i/dy*i_vec_jp, stride_y, N, N)
# 4a) Set up factors for the north membrane
index = m_n
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
M = M + scipy.sparse.spdiags(-sigma_i/dy*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(sigma_i/dy*i_vec_jm, -stride_y, N, N)
# 5a) Set up factors for the left membrane
index = m_w
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-sigma_i/dx*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(sigma_i/dx*i_vec_kp, stride_x, N, N)
# 6a) Set up factors for the right membrane
index = m_e
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-sigma_i/dx*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(sigma_i/dx*i_vec_km, -stride_x, N, N)
# B: Edges
# 1b) Set up factors for the high left membrane
index = m_hw
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dz+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dz*i_vec_qm, -stride_z, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dx*i_vec_kp, stride_x, N, N)
# 2b) Set up factors for the high right membrane
index = m_he
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dz+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dz*i_vec_qm, -stride_z, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dx*i_vec_km, -stride_x, N, N)
# 3b) Set up factors for the high south membrane
index = m_hs
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dz+sigma_i/dy)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dz*i_vec_qm, -stride_z, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dy*i_vec_jp, stride_y, N, N)
# 4b) Set up factors for the high north membrane
index = m_hn
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dz+sigma_i/dy)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dz*i_vec_qm, -stride_z, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dy*i_vec_jm, -stride_y, N, N)
# 5b) Set up factors for the low left membrane
index = m_lw
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dz+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dz*i_vec_qp, stride_z, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dx*i_vec_kp, stride_x, N, N)
# 6b) Set up factors for the low right membrane
index = m_le
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dz+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dz*i_vec_qp, stride_z, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dx*i_vec_km, -stride_x, N, N)
# 7b) Set up factors for the low south membrane
index = m_ls
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dz+sigma_i/dy)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dz*i_vec_qp, stride_z, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dy*i_vec_jp, stride_y, N, N)
# 8b) Set up factors for the low north membrane
index = m_ln
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dz+sigma_i/dy)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dz*i_vec_qp, stride_z, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dy*i_vec_jm, -stride_y, N, N)
# 9b) Set up factors for the north left membrane
index = m_nw
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dy*i_vec_jm, -stride_y, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dx*i_vec_kp, stride_x, N, N)
# 10b) Set up factors for the north right membrane
index = m_ne
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dy*i_vec_jm, -stride_y, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dx*i_vec_km, -stride_x, N, N)
# 11b) Set up factors for the south left membrane
index = m_sw
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dy*i_vec_jp, stride_y, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dx*i_vec_kp, stride_x, N, N)
# 12b) Set up factors for the south east membrane
index = m_se
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-0.5*(sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dy*i_vec_jp, stride_y, N, N)
M = M + scipy.sparse.spdiags(0.5*sigma_i/dx*i_vec_km, -stride_x, N, N)
# C: Corners
# 1c) Set up factors for the lower, south, left membrane
index = m_lsw
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-1/3*(sigma_i/dz+sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dz*i_vec_qp, stride_z, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dy*i_vec_jp, stride_y, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dx*i_vec_kp, stride_x, N, N)
# 2c) Set up factors for the lower, south, east membrane
index = m_lse
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-1/3*(sigma_i/dz+sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dz*i_vec_qp, stride_z, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dy*i_vec_jp, stride_y, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dx*i_vec_km, -stride_x, N, N)
# 3c) Set up factors for the lower, north, left membrane
index = m_lnw
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-1/3*(sigma_i/dz+sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dz*i_vec_qp, stride_z, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dy*i_vec_jm, -stride_y, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dx*i_vec_kp, stride_x, N, N)
# 4c) Set up factors for the lower, north, right membrane
index = m_lne
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qp = np.zeros(N, dtype=np.float64)
i_vec_qp[index+stride_z] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-1/3*(sigma_i/dz+sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dz*i_vec_qp, stride_z, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dy*i_vec_jm, -stride_y, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dx*i_vec_km, -stride_x, N, N)
# 5c) Set up factors for the higher, south, left membrane
index = m_hsw
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-1/3*(sigma_i/dz+sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dz*i_vec_qm, -stride_z, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dy*i_vec_jp, stride_y, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dx*i_vec_kp, stride_x, N, N)
# 6c) Set up factors for the higher, south, east membrane
index = m_hse
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
i_vec_jp = np.zeros(N, dtype=np.float64)
i_vec_jp[index+stride_y] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-1/3*(sigma_i/dz+sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dz*i_vec_qm, -stride_z, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dy*i_vec_jp, stride_y, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dx*i_vec_km, -stride_x, N, N)
# 7c) Set up factors for the higher, north, left membrane
index = m_hnw
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
i_vec_kp = np.zeros(N, dtype=np.float64)
i_vec_kp[index+stride_x] = 1
M = M + scipy.sparse.spdiags(-1/3*(sigma_i/dz+sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dz*i_vec_qm, -stride_z, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dy*i_vec_jm, -stride_y, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dx*i_vec_kp, stride_x, N, N)
# 8c) Set up factors for the higher, north, right membrane
index = m_hne
i_vec = np.zeros(N, dtype=np.float64)
i_vec[index] = 1
i_vec_qm = np.zeros(N, dtype=np.float64)
i_vec_qm[index-stride_z] = 1
i_vec_jm = np.zeros(N, dtype=np.float64)
i_vec_jm[index-stride_y] = 1
i_vec_km = np.zeros(N, dtype=np.float64)
i_vec_km[index-stride_x] = 1
M = M + scipy.sparse.spdiags(-1/3*(sigma_i/dz+sigma_i/dy+sigma_i/dx)*i_vec, 0, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dz*i_vec_qm, -stride_z, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dy*i_vec_jm, -stride_y, N, N)
M = M + scipy.sparse.spdiags(1/3*sigma_i/dx*i_vec_km, -stride_x, N, N)
# Convert to CSR
M = M.tocsr()
# Reshape matrix to the intracellular domain
M = M[mesh.i_all, :]
M = M[:, mesh.i_all]
return M
| 38.113924
| 112
| 0.64178
| 3,120
| 15,055
| 2.86891
| 0.045192
| 0.107251
| 0.0715
| 0.116188
| 0.888057
| 0.857334
| 0.814769
| 0.789297
| 0.789297
| 0.789297
| 0
| 0.037712
| 0.209166
| 15,055
| 394
| 113
| 38.21066
| 0.714094
| 0.103288
| 0
| 0.748387
| 0
| 0
| 0.006418
| 0
| 0
| 0
| 0
| 0.002538
| 0
| 1
| 0.003226
| false
| 0
| 0.006452
| 0
| 0.012903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5fa5e8bc5e9a45bd6dedc79f5385c47a008c3958
| 4,279
|
py
|
Python
|
eerie/bsplines/utils.py
|
RomeroGuDw/wavelet_networks
|
0fd6871ff9f03a3cb26f1c414728aed89a33b99c
|
[
"MIT"
] | 59
|
2020-06-12T09:16:52.000Z
|
2022-03-10T09:30:58.000Z
|
eerie/bsplines/utils.py
|
RomeroGuDw/wavelet_networks
|
0fd6871ff9f03a3cb26f1c414728aed89a33b99c
|
[
"MIT"
] | 1
|
2020-09-13T01:43:44.000Z
|
2022-02-16T14:33:18.000Z
|
eerie/bsplines/utils.py
|
RomeroGuDw/wavelet_networks
|
0fd6871ff9f03a3cb26f1c414728aed89a33b99c
|
[
"MIT"
] | 1
|
2020-07-31T14:23:43.000Z
|
2020-07-31T14:23:43.000Z
|
import torch
import numpy as np
## Returns the support of the 1D cardinal B-spline in terms of a min-max range
def B_supp(n, s=1, dx=0, intsupp=False):
""" Returns a min and max value of the domain on which the 1D cardinal B-spline of order n is non-zero.
INPUT:
- degree n, an integer
INPUT (optional):
- scale s, a real scalar number. Specifies the support of scaled B-splines via supp( B( . / s) )
- offset dx, a real scalar number. Specifies the support of scaled+shifted B-splines via supp(B( . / s - dx)
- intsupp, a boolean. Specifies whether or not the support should be on an integer grid. E.g. if xMax would
be 2.3, and we only sample integer positions x. Then 2 would still be non-zero, but 3 would evaluate to
zero. In this case the non-zero interval would be [-2,2] whereas in the intsupp=False case it would be
[-2.3,2.3]
OUTPUT:
- (xMin, xMax), the min-max range of the support
"""
xMinMax = s * (n + 1) / 2
xMin = -xMinMax + dx
xMax = xMinMax + dx
if intsupp:
xMax = (int(xMax) - 1 if int(xMax) == xMax else int(xMax))
xMin = (int(xMin) + 1 if int(xMin) == xMin else int(xMin))
return (xMin, xMax)
## Returns the grid (1D torch tensor) with unit gridpoint spacing
def B_supp_grid(n, s=1, dx=0, intsupp=False, device='CPU'):
""" Returns a grid (1D torch tensor) with unit spacing between the grid points (e.g. [xMin,...,-1,0,1,...,xMax]).
The min-max range is computed via B_supp.
INPUT:
- degree n, an integer
INPUT (optional):
- scale s, a real scalar number. Specifies the support of scaled B-splines via supp( B( . / s) )
- offset dx, a real scalar number. Specifies the support of scaled+shifted B-splines via supp(B( . / s - dx)
- intsupp, a boolean. Specifies whether or not the support should be on an integer grid. E.g. if xMax would
be 2.3, and we only sample integer positions x. Then 2 would still be non-zero, but 3 would evaluate to
zero. In this case the non-zero interval would be [-2,2] whereas in the intsupp=False case it would be
[-2.3,2.3]
OUTPUT:
- xx, a 1D torch.tensor of x-values for which B(x) is non-zero
"""
xMin, xMax = B_supp(n, s, dx, intsupp) # TODO: With intsupp=False, I get [-1, 0, 1, 2]. But i think it should be symmetrical. Right?
return torch.arange(xMin,xMax+1,dtype=torch.int16,device=device) #TODO device not requried. Managed automatically by model.device().
## Returns the grid (1D torch tensor) with unit gridpoint spacing
def B_supp_grid_2(n, s=1, intsupp=False, device='CPU'):
""" Returns a grid (1D torch tensor) with unit spacing between the grid points (e.g. [xMin,...,-1,0,1,...,xMax]).
The min-max range is computed via B_supp.
INPUT:
- degree n, an integer
INPUT (optional):
- scale s, a real scalar number. Specifies the support of scaled B-splines via supp( B( . / s) )
- offset dx, a real scalar number. Specifies the support of scaled+shifted B-splines via supp(B( . / s - dx)
- intsupp, a boolean. Specifies whether or not the support should be on an integer grid. E.g. if xMax would
be 2.3, and we only sample integer positions x. Then 2 would still be non-zero, but 3 would evaluate to
zero. In this case the non-zero interval would be [-2,2] whereas in the intsupp=False case it would be
[-2.3,2.3]
OUTPUT:
- xx, a 1D torch.tensor of x-values for which B(x) is non-zero
"""
xMin, xMax = B_supp(n, s, 0, intsupp) # TODO: With intsupp=False, I get [-1, 0, 1, 2]. But i think it should be symmetrical. Right?
return xMin, xMax, torch.arange(xMin,xMax+1,dtype=torch.int16, device=device)
if __name__ == '__main__':
from eerie.bsplines.b_1d import B
n = 3
s = 1
dx=0.2
Bfunc = B(3)
xlist = B_supp_grid(n, s, dx, True)
print(B_supp(n, s, dx))
print(xlist)
print(Bfunc((xlist - dx) / s))
| 49.183908
| 142
| 0.606216
| 693
| 4,279
| 3.7114
| 0.177489
| 0.042768
| 0.027994
| 0.039658
| 0.828149
| 0.797045
| 0.797045
| 0.787714
| 0.787714
| 0.787714
| 0
| 0.024777
| 0.292592
| 4,279
| 86
| 143
| 49.755814
| 0.824909
| 0.681935
| 0
| 0
| 0
| 0
| 0.01469
| 0
| 0
| 0
| 0
| 0.023256
| 0
| 1
| 0.115385
| false
| 0
| 0.115385
| 0
| 0.346154
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3958d59b573d52b08441d446901d936f0649bc70
| 3,234
|
py
|
Python
|
src/odyssey_tests/cli_parser_tests/argument_tests.py
|
GodwinneLorayne/odyssey
|
b5576818d70bea011772b944a4dd947777a5ac2f
|
[
"MIT"
] | 1
|
2020-06-01T20:52:37.000Z
|
2020-06-01T20:52:37.000Z
|
src/odyssey_tests/cli_parser_tests/argument_tests.py
|
GodwinneLorayne/odyssey
|
b5576818d70bea011772b944a4dd947777a5ac2f
|
[
"MIT"
] | 4
|
2020-06-06T04:50:24.000Z
|
2021-02-03T07:14:49.000Z
|
src/odyssey_tests/cli_parser_tests/argument_tests.py
|
python-odyssey/odyssey
|
b5576818d70bea011772b944a4dd947777a5ac2f
|
[
"MIT"
] | 1
|
2020-05-30T21:59:11.000Z
|
2020-05-30T21:59:11.000Z
|
import odyssey.cli_parser.pattern as pattern
import odyssey.cli_parser.argument as argument
def test_parse():
arguments = [
"dragons",
"and",
"things",
"--first-flag",
"--second-flag",
"value",
"--first-assignment=first",
"--second-assignment=second",
]
style = pattern.PatternStyle.DEFAULT
matched_arguments = pattern.match(style, arguments)
parsed_arguments = argument.make_argument_list(matched_arguments)
assert parsed_arguments[0].kind == argument.ArgumentKind.Positional
assert parsed_arguments[0].value == "dragons"
assert parsed_arguments[1].kind == argument.ArgumentKind.Positional
assert parsed_arguments[1].value == "and"
assert parsed_arguments[2].kind == argument.ArgumentKind.Positional
assert parsed_arguments[2].value == "things"
assert parsed_arguments[3].kind == argument.ArgumentKind.Flag
assert parsed_arguments[3].name == "first-flag"
assert parsed_arguments[4].kind == argument.ArgumentKind.Flag
assert parsed_arguments[4].name == "second-flag"
assert parsed_arguments[5].kind == argument.ArgumentKind.Positional
assert parsed_arguments[5].value == "value"
assert parsed_arguments[6].kind == argument.ArgumentKind.Assignment
assert parsed_arguments[6].name == "first-assignment"
assert parsed_arguments[6].value == "first"
assert parsed_arguments[7].kind == argument.ArgumentKind.Assignment
assert parsed_arguments[7].name == "second-assignment"
assert parsed_arguments[7].value == "second"
def test_parse_with_slashes():
arguments = [
"dragons",
"and",
"things",
"/first-flag",
"/second-flag",
"value",
"/first-assignment:first",
"/second-assignment:second",
]
style = (
pattern.PatternStyle.NAME_LOWERCASE_LETTERS
| pattern.PatternStyle.NAME_DASHES
| pattern.PatternStyle.SINGLE_SLASH_FLAG
| pattern.PatternStyle.COLON_ASSIGNMENT
)
matched_arguments = pattern.match(style, arguments)
parsed_arguments = argument.make_argument_list(matched_arguments)
assert parsed_arguments[0].kind == argument.ArgumentKind.Positional
assert parsed_arguments[0].value == "dragons"
assert parsed_arguments[1].kind == argument.ArgumentKind.Positional
assert parsed_arguments[1].value == "and"
assert parsed_arguments[2].kind == argument.ArgumentKind.Positional
assert parsed_arguments[2].value == "things"
assert parsed_arguments[3].kind == argument.ArgumentKind.Flag
assert parsed_arguments[3].name == "first-flag"
assert parsed_arguments[4].kind == argument.ArgumentKind.Flag
assert parsed_arguments[4].name == "second-flag"
assert parsed_arguments[5].kind == argument.ArgumentKind.Positional
assert parsed_arguments[5].value == "value"
assert parsed_arguments[6].kind == argument.ArgumentKind.Assignment
assert parsed_arguments[6].name == "first-assignment"
assert parsed_arguments[6].value == "first"
assert parsed_arguments[7].kind == argument.ArgumentKind.Assignment
assert parsed_arguments[7].name == "second-assignment"
assert parsed_arguments[7].value == "second"
| 40.425
| 71
| 0.707174
| 359
| 3,234
| 6.208914
| 0.133705
| 0.25572
| 0.339166
| 0.122028
| 0.893674
| 0.893674
| 0.893674
| 0.893674
| 0.893674
| 0.893674
| 0
| 0.013483
| 0.174397
| 3,234
| 79
| 72
| 40.936709
| 0.821348
| 0
| 0
| 0.704225
| 0
| 0
| 0.111317
| 0.030303
| 0
| 0
| 0
| 0
| 0.507042
| 1
| 0.028169
| false
| 0
| 0.028169
| 0
| 0.056338
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
398ec91ae7a9e1bd86d3680808ffd7cd09acb3ff
| 6,820
|
py
|
Python
|
nyc_data/ppe/migrations/0005_auto_20200410_1635.py
|
nyccto-rapicastillo/nyc-ppe
|
e6d5ba45cf2815f7659298103d3b5bc7210ed8cf
|
[
"MIT"
] | 3
|
2020-04-16T03:24:17.000Z
|
2020-09-11T22:12:31.000Z
|
nyc_data/ppe/migrations/0005_auto_20200410_1635.py
|
nyccto-rapicastillo/nyc-ppe
|
e6d5ba45cf2815f7659298103d3b5bc7210ed8cf
|
[
"MIT"
] | 47
|
2020-04-10T20:02:09.000Z
|
2021-09-08T02:05:09.000Z
|
nyc_data/ppe/migrations/0005_auto_20200410_1635.py
|
nyccto-rapicastillo/nyc-ppe
|
e6d5ba45cf2815f7659298103d3b5bc7210ed8cf
|
[
"MIT"
] | 1
|
2020-04-22T19:10:24.000Z
|
2020-04-22T19:10:24.000Z
|
# Generated by Django 3.0.5 on 2020-04-10 16:35
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
("ppe", "0004_auto_20200409_2232"),
]
operations = [
migrations.CreateModel(
name="Inventory",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created_at", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"data_source",
models.TextField(
choices=[
("EDC_PPE", "EDC_PPE"),
("EDC_MAKE", "EDC_MAKE"),
("INVENTORY", "INVENTORY"),
],
default=None,
),
),
("replaced", models.BooleanField(default=False)),
(
"item",
models.TextField(
choices=[
("faceshield", "faceshield"),
("gown", "gown"),
("gown_material", "gown_material"),
("coveralls", "coveralls"),
("n95_mask_non_surgical", "n95_mask_non_surgical"),
("n95_mask_surgical", "n95_mask_surgical"),
("kn95_mask", "kn95_mask"),
("surgical_mask", "surgical_mask"),
("mask_other", "mask_other"),
("goggles", "goggles"),
("generic_eyeware", "generic_eyeware"),
("gloves", "gloves"),
("ventilators_full_service", "ventilators_full_service"),
(
"ventilators_non_full_service",
"ventilators_non_full_service",
),
("bipap_machines", "bipap_machines"),
("ppe_other", "ppe_other"),
("unknown", "unknown"),
("body_bags", "body_bags"),
],
default=None,
),
),
("quantity", models.IntegerField()),
("raw_data", django.contrib.postgres.fields.jsonb.JSONField()),
],
options={"abstract": False,},
),
migrations.AlterField(
model_name="delivery",
name="data_source",
field=models.TextField(
choices=[
("EDC_PPE", "EDC_PPE"),
("EDC_MAKE", "EDC_MAKE"),
("INVENTORY", "INVENTORY"),
],
default=None,
),
),
migrations.AlterField(
model_name="hospital",
name="data_source",
field=models.TextField(
choices=[
("EDC_PPE", "EDC_PPE"),
("EDC_MAKE", "EDC_MAKE"),
("INVENTORY", "INVENTORY"),
],
default=None,
),
),
migrations.AlterField(
model_name="need",
name="data_source",
field=models.TextField(
choices=[
("EDC_PPE", "EDC_PPE"),
("EDC_MAKE", "EDC_MAKE"),
("INVENTORY", "INVENTORY"),
],
default=None,
),
),
migrations.AlterField(
model_name="need",
name="item",
field=models.TextField(
choices=[
("faceshield", "faceshield"),
("gown", "gown"),
("gown_material", "gown_material"),
("coveralls", "coveralls"),
("n95_mask_non_surgical", "n95_mask_non_surgical"),
("n95_mask_surgical", "n95_mask_surgical"),
("kn95_mask", "kn95_mask"),
("surgical_mask", "surgical_mask"),
("mask_other", "mask_other"),
("goggles", "goggles"),
("generic_eyeware", "generic_eyeware"),
("gloves", "gloves"),
("ventilators_full_service", "ventilators_full_service"),
("ventilators_non_full_service", "ventilators_non_full_service"),
("bipap_machines", "bipap_machines"),
("ppe_other", "ppe_other"),
("unknown", "unknown"),
("body_bags", "body_bags"),
]
),
),
migrations.AlterField(
model_name="purchase",
name="data_source",
field=models.TextField(
choices=[
("EDC_PPE", "EDC_PPE"),
("EDC_MAKE", "EDC_MAKE"),
("INVENTORY", "INVENTORY"),
],
default=None,
),
),
migrations.AlterField(
model_name="purchase",
name="item",
field=models.TextField(
choices=[
("faceshield", "faceshield"),
("gown", "gown"),
("gown_material", "gown_material"),
("coveralls", "coveralls"),
("n95_mask_non_surgical", "n95_mask_non_surgical"),
("n95_mask_surgical", "n95_mask_surgical"),
("kn95_mask", "kn95_mask"),
("surgical_mask", "surgical_mask"),
("mask_other", "mask_other"),
("goggles", "goggles"),
("generic_eyeware", "generic_eyeware"),
("gloves", "gloves"),
("ventilators_full_service", "ventilators_full_service"),
("ventilators_non_full_service", "ventilators_non_full_service"),
("bipap_machines", "bipap_machines"),
("ppe_other", "ppe_other"),
("unknown", "unknown"),
("body_bags", "body_bags"),
],
default=None,
),
),
]
| 38.531073
| 87
| 0.399853
| 451
| 6,820
| 5.713969
| 0.21286
| 0.032596
| 0.034924
| 0.041909
| 0.817229
| 0.770664
| 0.753201
| 0.753201
| 0.753201
| 0.753201
| 0
| 0.019048
| 0.47654
| 6,820
| 176
| 88
| 38.75
| 0.702801
| 0.006598
| 0
| 0.805882
| 1
| 0
| 0.263694
| 0.068064
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017647
| 0
| 0.035294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
39d50d638a10383644d2ecf7a968445b8518baa0
| 2,977
|
py
|
Python
|
tests/configuration/configuration_test.py
|
martvanrijthoven/creationism
|
510040dc4f3cd622c48006318e3d291a66f5335f
|
[
"MIT"
] | null | null | null |
tests/configuration/configuration_test.py
|
martvanrijthoven/creationism
|
510040dc4f3cd622c48006318e3d291a66f5335f
|
[
"MIT"
] | null | null | null |
tests/configuration/configuration_test.py
|
martvanrijthoven/creationism
|
510040dc4f3cd622c48006318e3d291a66f5335f
|
[
"MIT"
] | null | null | null |
from creationism.configuration.config import ConfigDict
from pathlib import Path
class TestConfig:
def test_create_config_dict(self):
config = {"a": 1, "b": "b", "c": [1, 2, 3]}
config_dict = ConfigDict(config_value=config)
assert isinstance(config_dict, ConfigDict)
def test_create_config_dict_with_replace_is_true_list(self):
config = {"a": 1, "b": "b", "c[replace=true]": [1, 2, 3]}
config_dict = ConfigDict(config_value=config)
assert config_dict["c"].replace is True
def test_create_config_dict_with_replace_is_false_list(self):
config = {"a": 1, "b": "b", "c[replace=false]": [1, 2, 3]}
config_dict = ConfigDict(config_value=config)
assert config_dict["c"].replace is False
def test_create_config_dict_with_replace_is_true_dict(self):
config = {"a": 1, "b": "b", "c[replace=true]": {"c2": "hello"}}
config_dict = ConfigDict(config_value=config)
assert config_dict["c"].replace is True
def test_create_config_dict_with_replace_is_true_dict(self):
config = {"a": 1, "b": "b", "c[replace=false]": {"c2": "hello"}}
config_dict = ConfigDict(config_value=config)
assert config_dict["c"].replace is False
def test_create_config_dict_with_yaml_reference(self):
config = {"a": 1, "b": "b", "c": str(Path(__file__).parent / "test.yml")}
config_dict = ConfigDict(config_value=config).cast()
assert config_dict["c"]["name"] == [1,2,3]
def test_create_config_dict_with_yaml_reference_replace(self):
config = {"a": 1, "b": "b", "c": str(Path(__file__).parent / "test.yml")}
config_dict = ConfigDict(config_value=config)
assert config_dict["c"]["name"].replace is False
def test_merge_replace_dict_true(self):
config = {"a": 1, "b": "b", "c": {'k': {'n': 4}}}
config2 = {"a": 1, "b": "b", "c[replace=true]": {'k': {'l': 5}}}
config_dict = ConfigDict(config_value=config)
config_dict2 = ConfigDict(config_value=config2)
config_dict.merge(config_dict2)
assert config_dict["c"]["k"]['l'].cast() == 5
assert 'n' not in config_dict["c"]["k"]
# def test_merge_replace_dict_false(self):
# config = {"a": 1, "b": "b", "c": str(Path(__file__).parent / "test.yml")}
# config_dict = ConfigDict(config_value=config)
# assert config_dict["c"]["name"].replace is False
# def test_merge_replace_list_true(self):
# config = {"a": 1, "b": "b", "c": str(Path(__file__).parent / "test.yml")}
# config_dict = ConfigDict(config_value=config)
# assert config_dict["c"]["name"].replace is False
# def test_merge_replace_list_false(self):
# config = {"a": 1, "b": "b", "c": str(Path(__file__).parent / "test.yml")}
# config_dict = ConfigDict(config_value=config)
# assert config_dict["c"]["name"].replace is False
# def test_cast_config_dict(self):
| 42.528571
| 83
| 0.625462
| 413
| 2,977
| 4.205811
| 0.121065
| 0.184226
| 0.020725
| 0.027634
| 0.839378
| 0.800806
| 0.779505
| 0.770294
| 0.715602
| 0.652274
| 0
| 0.013948
| 0.20524
| 2,977
| 69
| 84
| 43.144928
| 0.720203
| 0.234128
| 0
| 0.384615
| 0
| 0
| 0.071492
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.205128
| false
| 0
| 0.051282
| 0
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ffda0bea4144681eb4a425d705de86aca24d17e8
| 21,928
|
py
|
Python
|
sdk/python/pulumi_azuredevops/service_endpoint_artifactory.py
|
pulumi/pulumi-azuredevops
|
e6d73d1501335037fb944ae627091a7afc7f0048
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2020-06-28T11:39:32.000Z
|
2022-03-05T13:34:16.000Z
|
sdk/python/pulumi_azuredevops/service_endpoint_artifactory.py
|
pulumi/pulumi-azuredevops
|
e6d73d1501335037fb944ae627091a7afc7f0048
|
[
"ECL-2.0",
"Apache-2.0"
] | 58
|
2020-06-20T14:00:28.000Z
|
2022-03-31T15:20:51.000Z
|
sdk/python/pulumi_azuredevops/service_endpoint_artifactory.py
|
pulumi/pulumi-azuredevops
|
e6d73d1501335037fb944ae627091a7afc7f0048
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-10-21T03:22:01.000Z
|
2021-12-10T18:26:59.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServiceEndpointArtifactoryArgs', 'ServiceEndpointArtifactory']
@pulumi.input_type
class ServiceEndpointArtifactoryArgs:
def __init__(__self__, *,
project_id: pulumi.Input[str],
service_endpoint_name: pulumi.Input[str],
url: pulumi.Input[str],
authentication_basic: Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationBasicArgs']] = None,
authentication_token: Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationTokenArgs']] = None,
authorization: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ServiceEndpointArtifactory resource.
:param pulumi.Input[str] project_id: The project ID or project name.
:param pulumi.Input[str] service_endpoint_name: The Service Endpoint name.
:param pulumi.Input[str] url: URL of the Artifactory server to connect with.
:param pulumi.Input[str] description: The Service Endpoint description.
"""
pulumi.set(__self__, "project_id", project_id)
pulumi.set(__self__, "service_endpoint_name", service_endpoint_name)
pulumi.set(__self__, "url", url)
if authentication_basic is not None:
pulumi.set(__self__, "authentication_basic", authentication_basic)
if authentication_token is not None:
pulumi.set(__self__, "authentication_token", authentication_token)
if authorization is not None:
pulumi.set(__self__, "authorization", authorization)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The project ID or project name.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="serviceEndpointName")
def service_endpoint_name(self) -> pulumi.Input[str]:
"""
The Service Endpoint name.
"""
return pulumi.get(self, "service_endpoint_name")
@service_endpoint_name.setter
def service_endpoint_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_endpoint_name", value)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
URL of the Artifactory server to connect with.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="authenticationBasic")
def authentication_basic(self) -> Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationBasicArgs']]:
return pulumi.get(self, "authentication_basic")
@authentication_basic.setter
def authentication_basic(self, value: Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationBasicArgs']]):
pulumi.set(self, "authentication_basic", value)
@property
@pulumi.getter(name="authenticationToken")
def authentication_token(self) -> Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationTokenArgs']]:
return pulumi.get(self, "authentication_token")
@authentication_token.setter
def authentication_token(self, value: Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationTokenArgs']]):
pulumi.set(self, "authentication_token", value)
@property
@pulumi.getter
def authorization(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "authorization")
@authorization.setter
def authorization(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "authorization", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The Service Endpoint description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class _ServiceEndpointArtifactoryState:
def __init__(__self__, *,
authentication_basic: Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationBasicArgs']] = None,
authentication_token: Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationTokenArgs']] = None,
authorization: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
service_endpoint_name: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ServiceEndpointArtifactory resources.
:param pulumi.Input[str] description: The Service Endpoint description.
:param pulumi.Input[str] project_id: The project ID or project name.
:param pulumi.Input[str] service_endpoint_name: The Service Endpoint name.
:param pulumi.Input[str] url: URL of the Artifactory server to connect with.
"""
if authentication_basic is not None:
pulumi.set(__self__, "authentication_basic", authentication_basic)
if authentication_token is not None:
pulumi.set(__self__, "authentication_token", authentication_token)
if authorization is not None:
pulumi.set(__self__, "authorization", authorization)
if description is not None:
pulumi.set(__self__, "description", description)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if service_endpoint_name is not None:
pulumi.set(__self__, "service_endpoint_name", service_endpoint_name)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="authenticationBasic")
def authentication_basic(self) -> Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationBasicArgs']]:
return pulumi.get(self, "authentication_basic")
@authentication_basic.setter
def authentication_basic(self, value: Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationBasicArgs']]):
pulumi.set(self, "authentication_basic", value)
@property
@pulumi.getter(name="authenticationToken")
def authentication_token(self) -> Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationTokenArgs']]:
return pulumi.get(self, "authentication_token")
@authentication_token.setter
def authentication_token(self, value: Optional[pulumi.Input['ServiceEndpointArtifactoryAuthenticationTokenArgs']]):
pulumi.set(self, "authentication_token", value)
@property
@pulumi.getter
def authorization(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "authorization")
@authorization.setter
def authorization(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "authorization", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The Service Endpoint description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project ID or project name.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="serviceEndpointName")
def service_endpoint_name(self) -> Optional[pulumi.Input[str]]:
"""
The Service Endpoint name.
"""
return pulumi.get(self, "service_endpoint_name")
@service_endpoint_name.setter
def service_endpoint_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_endpoint_name", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
URL of the Artifactory server to connect with.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
class ServiceEndpointArtifactory(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authentication_basic: Optional[pulumi.Input[pulumi.InputType['ServiceEndpointArtifactoryAuthenticationBasicArgs']]] = None,
authentication_token: Optional[pulumi.Input[pulumi.InputType['ServiceEndpointArtifactoryAuthenticationTokenArgs']]] = None,
authorization: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
service_endpoint_name: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Artifactory server endpoint within an Azure DevOps organization.
## Example Usage
```python
import pulumi
import pulumi_azuredevops as azuredevops
project = azuredevops.Project("project",
visibility="private",
version_control="Git",
work_item_template="Agile")
serviceendpoint = azuredevops.ServiceEndpointArtifactory("serviceendpoint",
project_id=project.id,
service_endpoint_name="Sample Artifactory",
description="Managed by Terraform",
url="https://artifactory.my.com",
authentication_token=azuredevops.ServiceEndpointArtifactoryAuthenticationTokenArgs(
token="0000000000000000000000000000000000000000",
))
```
Alternatively a username and password may be used.
```python
import pulumi
import pulumi_azuredevops as azuredevops
serviceendpoint = azuredevops.ServiceEndpointArtifactory("serviceendpoint",
project_id=azuredevops_project["project"]["id"],
service_endpoint_name="Sample Artifactory",
description="Managed by Terraform",
url="https://artifactory.my.com",
authentication_basic=azuredevops.ServiceEndpointArtifactoryAuthenticationBasicArgs(
username="sampleuser",
password="0000000000000000000000000000000000000000",
))
```
## Relevant Links
* [Azure DevOps Service Connections](https://docs.microsoft.com/en-us/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml)
* [Artifactory User Token](https://docs.artifactory.org/latest/user-guide/user-token/)
## Import
Azure DevOps Service Endpoint Artifactory can be imported using the **projectID/serviceEndpointID**, e.g.
```sh
$ pulumi import azuredevops:index/serviceEndpointArtifactory:ServiceEndpointArtifactory serviceendpoint 00000000-0000-0000-0000-000000000000/00000000-0000-0000-0000-000000000000
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The Service Endpoint description.
:param pulumi.Input[str] project_id: The project ID or project name.
:param pulumi.Input[str] service_endpoint_name: The Service Endpoint name.
:param pulumi.Input[str] url: URL of the Artifactory server to connect with.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceEndpointArtifactoryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Artifactory server endpoint within an Azure DevOps organization.
## Example Usage
```python
import pulumi
import pulumi_azuredevops as azuredevops
project = azuredevops.Project("project",
visibility="private",
version_control="Git",
work_item_template="Agile")
serviceendpoint = azuredevops.ServiceEndpointArtifactory("serviceendpoint",
project_id=project.id,
service_endpoint_name="Sample Artifactory",
description="Managed by Terraform",
url="https://artifactory.my.com",
authentication_token=azuredevops.ServiceEndpointArtifactoryAuthenticationTokenArgs(
token="0000000000000000000000000000000000000000",
))
```
Alternatively a username and password may be used.
```python
import pulumi
import pulumi_azuredevops as azuredevops
serviceendpoint = azuredevops.ServiceEndpointArtifactory("serviceendpoint",
project_id=azuredevops_project["project"]["id"],
service_endpoint_name="Sample Artifactory",
description="Managed by Terraform",
url="https://artifactory.my.com",
authentication_basic=azuredevops.ServiceEndpointArtifactoryAuthenticationBasicArgs(
username="sampleuser",
password="0000000000000000000000000000000000000000",
))
```
## Relevant Links
* [Azure DevOps Service Connections](https://docs.microsoft.com/en-us/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml)
* [Artifactory User Token](https://docs.artifactory.org/latest/user-guide/user-token/)
## Import
Azure DevOps Service Endpoint Artifactory can be imported using the **projectID/serviceEndpointID**, e.g.
```sh
$ pulumi import azuredevops:index/serviceEndpointArtifactory:ServiceEndpointArtifactory serviceendpoint 00000000-0000-0000-0000-000000000000/00000000-0000-0000-0000-000000000000
```
:param str resource_name: The name of the resource.
:param ServiceEndpointArtifactoryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceEndpointArtifactoryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authentication_basic: Optional[pulumi.Input[pulumi.InputType['ServiceEndpointArtifactoryAuthenticationBasicArgs']]] = None,
authentication_token: Optional[pulumi.Input[pulumi.InputType['ServiceEndpointArtifactoryAuthenticationTokenArgs']]] = None,
authorization: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
service_endpoint_name: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceEndpointArtifactoryArgs.__new__(ServiceEndpointArtifactoryArgs)
__props__.__dict__["authentication_basic"] = authentication_basic
__props__.__dict__["authentication_token"] = authentication_token
__props__.__dict__["authorization"] = authorization
__props__.__dict__["description"] = description
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
if service_endpoint_name is None and not opts.urn:
raise TypeError("Missing required property 'service_endpoint_name'")
__props__.__dict__["service_endpoint_name"] = service_endpoint_name
if url is None and not opts.urn:
raise TypeError("Missing required property 'url'")
__props__.__dict__["url"] = url
super(ServiceEndpointArtifactory, __self__).__init__(
'azuredevops:index/serviceEndpointArtifactory:ServiceEndpointArtifactory',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
authentication_basic: Optional[pulumi.Input[pulumi.InputType['ServiceEndpointArtifactoryAuthenticationBasicArgs']]] = None,
authentication_token: Optional[pulumi.Input[pulumi.InputType['ServiceEndpointArtifactoryAuthenticationTokenArgs']]] = None,
authorization: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
service_endpoint_name: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None) -> 'ServiceEndpointArtifactory':
"""
Get an existing ServiceEndpointArtifactory resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The Service Endpoint description.
:param pulumi.Input[str] project_id: The project ID or project name.
:param pulumi.Input[str] service_endpoint_name: The Service Endpoint name.
:param pulumi.Input[str] url: URL of the Artifactory server to connect with.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceEndpointArtifactoryState.__new__(_ServiceEndpointArtifactoryState)
__props__.__dict__["authentication_basic"] = authentication_basic
__props__.__dict__["authentication_token"] = authentication_token
__props__.__dict__["authorization"] = authorization
__props__.__dict__["description"] = description
__props__.__dict__["project_id"] = project_id
__props__.__dict__["service_endpoint_name"] = service_endpoint_name
__props__.__dict__["url"] = url
return ServiceEndpointArtifactory(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authenticationBasic")
def authentication_basic(self) -> pulumi.Output[Optional['outputs.ServiceEndpointArtifactoryAuthenticationBasic']]:
return pulumi.get(self, "authentication_basic")
@property
@pulumi.getter(name="authenticationToken")
def authentication_token(self) -> pulumi.Output[Optional['outputs.ServiceEndpointArtifactoryAuthenticationToken']]:
return pulumi.get(self, "authentication_token")
@property
@pulumi.getter
def authorization(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "authorization")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The Service Endpoint description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The project ID or project name.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="serviceEndpointName")
def service_endpoint_name(self) -> pulumi.Output[str]:
"""
The Service Endpoint name.
"""
return pulumi.get(self, "service_endpoint_name")
@property
@pulumi.getter
def url(self) -> pulumi.Output[str]:
"""
URL of the Artifactory server to connect with.
"""
return pulumi.get(self, "url")
| 44.569106
| 186
| 0.673021
| 2,156
| 21,928
| 6.616883
| 0.09462
| 0.070938
| 0.061825
| 0.041637
| 0.852096
| 0.832889
| 0.810178
| 0.799033
| 0.787957
| 0.750876
| 0
| 0.01708
| 0.228384
| 21,928
| 491
| 187
| 44.659878
| 0.826064
| 0.283792
| 0
| 0.716981
| 1
| 0
| 0.172304
| 0.093401
| 0
| 0
| 0
| 0
| 0
| 1
| 0.158491
| false
| 0.003774
| 0.026415
| 0.033962
| 0.279245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ffff0a9dbfdd0d43378288ab2594adf4d9de24fb
| 31,794
|
py
|
Python
|
tests/test_driver.py
|
michelepagot/pysds011
|
8a0e8cd401fda302b65d69dede224dd4b984e763
|
[
"MIT"
] | null | null | null |
tests/test_driver.py
|
michelepagot/pysds011
|
8a0e8cd401fda302b65d69dede224dd4b984e763
|
[
"MIT"
] | null | null | null |
tests/test_driver.py
|
michelepagot/pysds011
|
8a0e8cd401fda302b65d69dede224dd4b984e763
|
[
"MIT"
] | 1
|
2021-07-10T02:17:09.000Z
|
2021-07-10T02:17:09.000Z
|
from pysds011.driver import SDS011
import logging
class SerialMock(object):
def __init__(self):
self.__write_reg = list()
self.__read_reg = list()
self.timeout = 0
def write(self, data):
self.__write_reg.append(data)
def read(self, size):
if self.__read_reg:
frame = self.__read_reg.pop(0)
assert size == frame['size']
return frame['data']
else:
return None
def test_expect_read(self, data):
self.__read_reg.append({'size': len(data), 'data': data})
def test_get_write(self):
return self.__write_reg
HEAD = b'\xaa'
CMD_ID = b'\xb4'
RSP_ID = b'\xc5'
TAIL = b'\xab'
def compose_write(data, id):
CHECKSUM = bytes([sum(data+id) % 256])
DRIVER_WRITE = HEAD + CMD_ID + data + id + CHECKSUM + TAIL
return DRIVER_WRITE
def compose_response(data, rsp=RSP_ID):
CHECKSUM_RSP = bytes([sum(data) % 256])
return rsp+data+CHECKSUM_RSP+TAIL
def test_create():
d = SDS011(None, None)
assert d is not None
def test_cmd_set_mode():
"""
Test set data reporting mode: 'active mode'
"""
##################
# EXPECTATION
##################
# create an artificial, for test purpose, Serial object
# it will let the test code to:
# - check what driver will write to sensor
# - decide (simulate) what sensor replay to the driver
log = logging.getLogger("SDS011")
sm = SerialMock()
# this is what driver (code under test) is expected to send to the sensor
# prepared here but checked later
DATA = b'\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
# this is to simulate sensor response
sm.test_expect_read(HEAD)
DATA_RSP = b'\x02\x01\x00\x00'
SENSOR_ID_RSP = b'\xab\xcd' # simulate that sensor response come from sensor with ABCD id
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_mode(0)
##################
# VERIFICATION
##################
# check expectation about what driver should sent to sensor
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_mode_sensornotapplied():
"""
Test set data reporting mode
but in sensor reply the mode is not what requested
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
# this is to simulate sensor response
sm.test_expect_read(HEAD)
# driver set 0 but sensor replay 1 (3rd byte)
DATA_RSP = b'\x02\x01\x01\x00'
SENSOR_ID_RSP = b'\xab\xcd' # simulate that sensor response come from sensor with ABCD id
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_mode(0) is False
##################
# VERIFICATION
##################
# check expectation about what driver should sent to sensor
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_mode_docexample():
"""
Test set data reporting mode
example from the datasheet
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xa1\x60'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
# this is to simulate sensor response
sm.test_expect_read(HEAD)
# driver set 0 but sensor replay 1 (3rd byte)
DATA_RSP = b'\x02\x01\x01\x00'
SENSOR_ID_RSP = SENSOR_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_mode(1, SENSOR_ID)
##################
# VERIFICATION
##################
# check expectation about what driver should sent to sensor
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_mode_active():
"""
Test get data reporting mode: 'active mode'
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x02\x00\x00\x00'
SENSOR_ID_RSP = b'\xab\xcd' # simulate that sensor response come from sensor with ABCD id
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert 0 == d.cmd_get_mode()
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_mode_specific_id():
"""
Test get data reporting mode: 'active mode'
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xab\xcd'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x02\x00\x00\x00'
SENSOR_ID_RSP = b'\xab\xcd' # simulate that sensor response come from sensor with ABCD id
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert 0 == d.cmd_get_mode(id=SENSOR_ID)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_mode_query():
"""
Test get data reporting mode: 'query mode'
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x02\x00\x01\x00'
SENSOR_ID_RSP = b'\xab\xcd' # simulate that sensor response come from sensor with ABCD id
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert 1 == d.cmd_get_mode()
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_sleep():
"""
Test correctly processed set sleep command:
Send command, set all connected sensors to sleep
Sensor with ID ABCD confirm
"""
##################
# EXPECTATION
##################
sm = SerialMock()
log = logging.getLogger("SDS011")
DATA = b'\x06\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x01\x00\x00'
SENSOR_ID_RSP = b'\xab\xcd'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_sleep()
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_sleep_docexample1():
"""
Test correctly processed set sleep command
Send command, set the sensor with ID A160 to sleep
AA B4 06 01 00 00 00 00 00 00 00 00 00 00 00 A1 60 08 AB
Sensor with ID A160 response:
AA C5 06 01 00 00 A1 60 08 AB
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x06\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xa1\x60'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x01\x00\x00'
SENSOR_ID_RSP = SENSOR_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_sleep(id=SENSOR_ID_RSP)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_sleep_docexample2():
"""
Test correctly processed set sleep command
Send command, set the sensor with ID A160 to sleep
AA B4 06 01 01 00 00 00 00 00 00 00 00 00 00 A1 60 09 AB
Sensor with ID A160 response:
AA C5 06 01 01 00 A1 60 09 AB
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x06\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xa1\x60'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x01\x01\x00'
SENSOR_ID_RSP = SENSOR_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_sleep(sleep=0, id=SENSOR_ID_RSP)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_sleep_wakeup():
"""
Test correctly processed set sleep command
in wakeup mode
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x06\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x01\x01\x00'
SENSOR_ID_RSP = b'\xab\xcd'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_sleep(0)
##################
# VERIFICATION
##################
# check expectation about what driver should sent to sensor
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_sleep_no_replay():
"""
Test situation where sensor does not replay to sleep request
"""
log = logging.getLogger("SDS011")
sm = SerialMock()
d = SDS011(sm, log)
# calls the sleep driver but without to programm reply from serial
assert d.cmd_set_sleep() is False
def test_cmd_set_sleep_read_delayed():
"""
Check driver mechanism that look for initial sensor respons
"""
log = logging.getLogger("SDS011")
sm = SerialMock()
sm.test_expect_read(b'\xff')
sm.test_expect_read(b'\xff')
sm.test_expect_read(b'\xff')
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x01\x00\x00'
SENSOR_ID_RSP = b'\xab\xcd'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
d = SDS011(sm, log)
assert d.cmd_set_sleep()
def test_cmd_set_sleep_malformed():
"""
Check driver behavior if no valid data comes
from sensor for many time (more than max possible read size)
"""
log = logging.getLogger("SDS011")
sm = SerialMock()
for _ in range(30):
sm.test_expect_read(b'\xff')
d = SDS011(sm, log)
assert d.cmd_set_sleep() is False
# also check that driver stop before to read 30 bytes (should stop at 20 bytes)
remaining_not_requested_byte = sm.read(1)
assert remaining_not_requested_byte is not None
def test_cmd_set_sleep_get_only_head():
"""
Test driver behavior if sensor only sends HEAD
and nothing more
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x06\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
d = SDS011(sm, log)
assert d.cmd_set_sleep() is False
# check expectation about what driver should sent to sensor
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_sleep_wrong_checksum():
"""
Test correctly processed set sleep command
"""
log = logging.getLogger("SDS011")
sm = SerialMock()
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x01\x00\x00'
SENSOR_ID_RSP = b'\xab\xcd'
CHECKSUM_RSP = bytes([sum(DATA_RSP) % 256 + 1])
sm.test_expect_read(RSP_ID+DATA_RSP+SENSOR_ID_RSP+CHECKSUM_RSP+TAIL)
d = SDS011(sm, log)
assert d.cmd_set_sleep() is False
def test_cmd_get_sleep_sleepingsensor():
"""
Test correctly processed get sleep command
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x00\x00\x00'
SENSOR_ID_RSP = b'\xab\xcd'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
d = SDS011(sm, log)
assert d.cmd_get_sleep()
# check expectation about what driver should sent to sensor
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_sleep_specific_id():
"""
Test correctly processed get sleep command
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xab\xcd'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x00\x00\x00'
SENSOR_ID_RSP = b'\xab\xcd'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
d = SDS011(sm, log)
assert d.cmd_get_sleep(id=SENSOR_ID)
# check expectation about what driver should sent to sensor
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_sleep_awakesensor():
"""
Test correctly processed get sleep command, sensor is not sleeping
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x00\x01\x00'
SENSOR_ID_RSP = b'\xab\xcd'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
d = SDS011(sm, log)
assert d.cmd_get_sleep() is False
# check expectation about what driver should sent to sensor
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_sleep_noresponse():
"""
Test correctly processed get sleep command, sensor is not sleeping
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
d = SDS011(sm, log)
assert d.cmd_get_sleep() is None
def test_cmd_get_sleep_invalid():
"""
Test correctly processed get sleep command, sensor is not sleeping
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
sm.test_expect_read(HEAD)
DATA_RSP = b'\x06\x00\x02\x00' # 2 is not valid status
SENSOR_ID_RSP = b'\xab\xcd'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
d = SDS011(sm, log)
assert d.cmd_get_sleep() is None
def test_cmd_query_data():
"""
Test query data
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\xd4\x04\x3a\x0a'
SENSOR_ID_RSP = b'\xab\xcd' # simulate that sensor response come from sensor with ABCD id
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP, rsp=b'\xc0'))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
resp = d.cmd_query_data()
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
assert resp is not None
assert 123.6 == resp['pm25']
assert 261.8 == resp['pm10']
assert 'pretty' in resp.keys()
def test_cmd_query_data_fromaspecificsensor():
"""
Test query data using a specific sensor ID
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xAB\xCD'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\xd4\x04\x3a\x0a'
SENSOR_ID_RSP = SENSOR_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP, rsp=b'\xc0'))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
resp = d.cmd_query_data(id=SENSOR_ID)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
assert resp is not None
assert 123.6 == resp['pm25']
assert 261.8 == resp['pm10']
assert 'pretty' in resp.keys()
def test_cmd_set_device_id():
"""
Test set device ID API
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
# New device ID [EF FE]
NEW_ID = b'\xef\xfe'
DATA = b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + NEW_ID
SENSOR_ID = b'\xab\xcd'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x05\x00\x00\x00'
SENSOR_ID_RSP = NEW_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_id(id=SENSOR_ID, new_id=NEW_ID)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_device_id_wrongidinreplay():
"""
Test set device ID API: id in replay
is not the same of new_id
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
# New device ID [EF FE]
NEW_ID = b'\xef\xfe'
DATA = b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + NEW_ID
SENSOR_ID = b'\xab\xcd'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x05\x00\x00\x00'
SENSOR_ID_RSP = b'\xdd\xdd'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_id(id=SENSOR_ID, new_id=NEW_ID) is False
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_device_id_wrongchecksum():
"""
Test set device ID API: id in replay
is not the same of new_id
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
# New device ID [EF FE]
NEW_ID = b'\xef\xfe'
DATA = b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + NEW_ID
SENSOR_ID = b'\xab\xcd'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x05\x00\x00\x00' + NEW_ID
CHECKSUM_RSP = bytes([sum(DATA_RSP) % 256 + 1])
sm.test_expect_read(RSP_ID+DATA_RSP+CHECKSUM_RSP+TAIL)
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_id(id=SENSOR_ID, new_id=NEW_ID) is False
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_device_id_docexample():
"""
Test set device ID API: example from datasheet
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
# New device ID [EF FE]
NEW_ID = b'\xa0\x01'
DATA = b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + NEW_ID
SENSOR_ID = b'\xa1\x60'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x05\x00\x00\x00'
SENSOR_ID_RSP = NEW_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_id(id=SENSOR_ID, new_id=NEW_ID)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_working_period_continuous():
"""
Test set working period API
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x08\x01\x00\x00'
SENSOR_ID_RSP = b'\xAB\xCD'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_working_period(0)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_working_period_maxnallowed():
"""
Test set working period: set to 30min
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
# 0x1E : 30
DATA = b'\x08\x01\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x08\x01\x1e\x00'
SENSOR_ID_RSP = b'\xAB\xCD'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_working_period(30)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_set_working_period_morethanallowed():
"""
Test set working period: set to 31min
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_working_period(31) is False
##################
# VERIFICATION
##################
writes = sm.test_get_write()
assert 0 == len(writes)
def test_cmd_set_working_period_docexample():
"""
Test set working period API: example from datasheet
Send command to set the working period of sensor with ID A160 to 1 minute
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x08\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xa1\x60'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x08\x01\x01\x00'
SENSOR_ID_RSP = SENSOR_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert d.cmd_set_working_period(1, id=SENSOR_ID)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_working_period():
"""
Test get working period API
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x08\x00\x00\x00'
SENSOR_ID_RSP = b'\xAB\xCD'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert 0 == d.cmd_get_working_period()
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_working_period_docexample():
"""
Test get working period API example from datasheet
Send command to query the working period of the sensor with ID A160
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xa1\x60'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x08\x00\x02\x00'
SENSOR_ID_RSP = SENSOR_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
assert 2 == d.cmd_get_working_period(id=SENSOR_ID)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
def test_cmd_get_firmware_version():
"""
Test get firmware version API
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xff\xff'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x07\x01\x02\x03'
SENSOR_ID_RSP = b'\xAB\xCD'
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
res = d.cmd_firmware_ver()
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
assert 'year' in res.keys()
assert 1 == res['year']
assert 'month' in res.keys()
assert 2 == res['month']
assert 'day' in res.keys()
assert 3 == res['day']
assert SENSOR_ID_RSP == res['id']
assert 'pretty' in res.keys()
def test_cmd_get_firmware_version_docexample():
"""
Test get firmware version API
"""
##################
# EXPECTATION
##################
log = logging.getLogger("SDS011")
sm = SerialMock()
DATA = b'\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SENSOR_ID = b'\xA1\x60'
EXPECTED_DRIVER_WRITE = compose_write(DATA, SENSOR_ID)
sm.test_expect_read(HEAD)
DATA_RSP = b'\x07\x0f\x07\x0a'
SENSOR_ID_RSP = SENSOR_ID
sm.test_expect_read(compose_response(DATA_RSP + SENSOR_ID_RSP))
##################
# TEST EXEC
##################
d = SDS011(sm, log)
res = d.cmd_firmware_ver(id=SENSOR_ID)
##################
# VERIFICATION
##################
production_code_write_to_sensor = sm.test_get_write()
assert 1 == len(production_code_write_to_sensor)
assert EXPECTED_DRIVER_WRITE == production_code_write_to_sensor[0]
assert 'year' in res.keys()
assert 15 == res['year']
assert 'month' in res.keys()
assert 7 == res['month']
assert 'day' in res.keys()
assert 10 == res['day']
assert 'pretty' in res.keys()
| 28.412869
| 94
| 0.610115
| 4,281
| 31,794
| 4.247372
| 0.056295
| 0.098004
| 0.126217
| 0.144531
| 0.888467
| 0.867679
| 0.847055
| 0.8393
| 0.830886
| 0.821482
| 0
| 0.055761
| 0.208624
| 31,794
| 1,118
| 95
| 28.438283
| 0.666905
| 0.152922
| 0
| 0.761538
| 0
| 0.051923
| 0.110385
| 0.058565
| 0
| 0
| 0
| 0
| 0.213462
| 1
| 0.080769
| false
| 0
| 0.003846
| 0.001923
| 0.096154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
081bffefe9c4d982a8e31bc5ea7e5154078d9350
| 463
|
py
|
Python
|
test_script.py
|
aronchick/pipenv-multiple-env-in-a-directory-example
|
d089196779d7e3851beeb3a07ce702264ab175bf
|
[
"MIT"
] | null | null | null |
test_script.py
|
aronchick/pipenv-multiple-env-in-a-directory-example
|
d089196779d7e3851beeb3a07ce702264ab175bf
|
[
"MIT"
] | null | null | null |
test_script.py
|
aronchick/pipenv-multiple-env-in-a-directory-example
|
d089196779d7e3851beeb3a07ce702264ab175bf
|
[
"MIT"
] | null | null | null |
import sklearn
print(f"SKLearn version: {sklearn.__version__} \n")
if sklearn.__version__ == "0.21.3":
print(f"We're executing this code because sklearn version is < 0.22.")
else:
print(f"We're NOT executing this code because sklearn version is < 0.22.")
if sklearn.__version__ == "0.23.1":
print(f"We're executing this code because sklearn version is >= 0.22.")
else:
print(f"We're NOT executing this code because sklearn version is >= 0.22.")
| 35.615385
| 79
| 0.704104
| 77
| 463
| 4.077922
| 0.285714
| 0.356688
| 0.101911
| 0.127389
| 0.719745
| 0.719745
| 0.719745
| 0.719745
| 0.719745
| 0.719745
| 0
| 0.05168
| 0.164147
| 463
| 13
| 79
| 35.615385
| 0.75969
| 0
| 0
| 0.2
| 0
| 0
| 0.653017
| 0.045259
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.1
| 0
| 0.1
| 0.5
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
f24fe976513959fc5d4b7dedea8517e9f15ac753
| 25,313
|
py
|
Python
|
models/classifiers.py
|
caotians1/OD-test-master
|
e272421294a3614bdcdb3a4e4b530f613dad1a1c
|
[
"MIT"
] | 3
|
2020-10-07T18:35:50.000Z
|
2021-02-23T06:36:21.000Z
|
models/classifiers.py
|
caotians1/OD-test-master
|
e272421294a3614bdcdb3a4e4b530f613dad1a1c
|
[
"MIT"
] | null | null | null |
models/classifiers.py
|
caotians1/OD-test-master
|
e272421294a3614bdcdb3a4e4b530f613dad1a1c
|
[
"MIT"
] | 3
|
2020-10-08T14:38:15.000Z
|
2021-11-08T11:51:48.000Z
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.models.vgg as VGG
import torchvision.models.resnet as Resnet
import torchvision.models.densenet as Densenet
class PartialForwardable(object):
def partial_forward(self, x):
if hasattr(self, 'densenet121'):
features = self.densenet121.features(x)
out = F.relu(features, inplace=True)
out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
return out
elif hasattr(self, 'model'):
return self.model.features(x).view(x.size(0), 1)
class MNIST_VGG(nn.Module):
"""
VGG-style MNIST.
"""
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 1
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def __init__(self):
super(MNIST_VGG, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# Reduced VGG16.
self.cfg = [64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M']
self.model = VGG.VGG(self.make_layers(self.cfg, batch_norm=True), num_classes=10)
# MNIST would have a different sized feature map.
self.model.classifier = nn.Sequential(
nn.Linear(512 * 1 * 1, 256), nn.ReLU(True), nn.Dropout(),
nn.Linear(256, 256), nn.ReLU(True), nn.Dropout(),
nn.Linear(256, 10),
)
self.model._initialize_weights()
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 10])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 60
return config
class MNIST_Resnet(nn.Module):
"""
MNIST_Resnet is based on Resnet50
We replace the average pooling block to accomodate
the requirements of MNIST.
"""
def __init__(self):
super(MNIST_Resnet, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# Resnet50.
self.model = Resnet.ResNet(Resnet.Bottleneck, [2, 3, 5, 2], num_classes=10)
# MNIST would have a different sized feature map.
self.model.avgpool = nn.AdaptiveAvgPool2d((1,1))
# The first part also needs to be fixed.
self.model.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False) # Replace the harsh convolution.
del self.model.maxpool
self.model.maxpool = lambda x: x # Remove the early maxpool.
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 10])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 60
return config
class CIFAR10_VGG(nn.Module):
"""
CIFAR_VGG is based on VGG16+BatchNorm
We replace the classifier block to accomodate
the requirements of CIFAR.
"""
def __init__(self):
super(CIFAR10_VGG, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# VGG16 minus last maxpool.
self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512]
self.model = VGG.VGG(VGG.make_layers(self.cfg, batch_norm=True), num_classes=10)
# Cifar 10 would have a different sized feature map.
self.model.classifier = nn.Sequential(
nn.Linear(512 * 2 * 2, 4096), nn.ReLU(True), nn.Dropout(),
nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(),
nn.Linear(4096, 10),
)
self.model._initialize_weights()
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 10])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 60
return config
class CIFAR10_Resnet(nn.Module):
"""
CIFAR_Resnet is based on Resnet50
We replace the average pooling block to accomodate
the requirements of CIFAR.
"""
def __init__(self):
super(CIFAR10_Resnet, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# Resnet50.
self.model = Resnet.ResNet(Resnet.Bottleneck, [3, 4, 6, 3], num_classes=10)
# Cifar 10 would have a different sized feature map.
self.model.avgpool = nn.AdaptiveAvgPool2d((1,1))
# The first part also needs to be fixed.
self.model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # Replace the harsh convolution.
del self.model.maxpool
self.model.maxpool = lambda x: x # Remove the early maxpool.
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 10])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 60
return config
class CIFAR100_VGG(nn.Module):
"""
CIFAR_VGG is based on VGG16+BatchNorm
We replace the classifier block to accomodate
the requirements of CIFAR.
"""
def __init__(self):
super(CIFAR100_VGG, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# VGG16 minus last maxpool.
self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512]
self.model = VGG.VGG(VGG.make_layers(self.cfg, batch_norm=True), num_classes=100)
# Cifar 10 would have a different sized feature map.
self.model.classifier = nn.Sequential(
nn.Linear(512 * 2 * 2, 4096), nn.ReLU(True), nn.Dropout(),
nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(),
nn.Linear(4096, 100),
)
self.model._initialize_weights()
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 100])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 120
return config
class CIFAR100_Resnet(nn.Module):
"""
CIFAR_Resnet is based on Resnet50
We replace the average pooling block to accomodate
the requirements of CIFAR.
"""
def __init__(self):
super(CIFAR100_Resnet, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# Resnet50.
self.model = Resnet.ResNet(Resnet.Bottleneck, [3, 4, 6, 3], num_classes=100)
# Cifar 100 would have a different sized feature map.
self.model.avgpool = nn.AdaptiveAvgPool2d((1,1))
# The first part also needs to be fixed.
self.model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # Replace the harsh convolution.
del self.model.maxpool
self.model.maxpool = lambda x: x # Remove the early maxpool.
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 100])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 120
return config
class STL10_VGG(nn.Module):
"""
STL10_VGG is based on VGG16+BatchNorm
We replace the classifier block to accomodate
the requirements of STL10.
"""
def __init__(self):
super(STL10_VGG, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# VGG16.
self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
self.model = VGG.VGG(VGG.make_layers(self.cfg, batch_norm=True), num_classes=10)
# Cifar 10 would have a different sized feature map.
self.model.classifier = nn.Sequential(
nn.Linear(512 * 3 * 3, 4096), nn.ReLU(True), nn.Dropout(),
nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(),
nn.Linear(4096, 10),
)
self.model._initialize_weights()
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 10])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 120
return config
class STL10_Resnet(nn.Module):
"""
STL10_Resnet is based on Resnet50
We replace the average pooling block to accomodate
the requirements of STL10.
"""
def __init__(self):
super(STL10_Resnet, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# Resnet50.
self.model = Resnet.ResNet(Resnet.Bottleneck, [3, 4, 6, 3], num_classes=10)
# STL10 would have a different sized feature map.
self.model.avgpool = nn.AdaptiveAvgPool2d((1,1))
# The first part also needs to be fixed.
self.model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False) # Replace the harsh convolution.
del self.model.maxpool
self.model.maxpool = lambda x: x # Remove the early maxpool.
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 10])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 120
return config
class TinyImagenet_VGG(nn.Module):
"""
TinyImagenet_VGG is based on VGG16+BatchNorm
We replace the classifier block to accomodate
the requirements of TinyImagenet.
"""
def __init__(self):
super(TinyImagenet_VGG, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
self.model = VGG.VGG(VGG.make_layers(self.cfg, batch_norm=True), num_classes=200)
# TinyImagenet would have a different sized feature map.
self.model.classifier = nn.Sequential(
nn.Linear(512 * 2 * 2, 4096), nn.ReLU(True), nn.Dropout(),
nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(),
nn.Linear(4096, 200),
)
self.model._initialize_weights()
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 200])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 120
return config
class TinyImagenet_Resnet(nn.Module):
"""
TinyImagenet_Resnet is based on Resnet50
We replace the average pooling block to accomodate
the requirements of TinyImagenet.
"""
def __init__(self):
super(TinyImagenet_Resnet, self).__init__()
# Based on the imagenet normalization params.
self.offset = 0.44900
self.multiplier = 4.42477
# Resnet50.
self.model = Resnet.ResNet(Resnet.Bottleneck, [3, 4, 6, 3], num_classes=200)
# TinyImagenet would have a different sized feature map.
self.model.avgpool = nn.AdaptiveAvgPool2d((1,1))
# The first part also needs to be fixed.
self.model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # Replace the harsh convolution.
# del self.model.maxpool
# self.model.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x, softmax=True):
# Perform late normalization.
x = (x-self.offset)*self.multiplier
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 200])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 120
return config
class NIHDense(nn.Module, PartialForwardable):
def __init__(self):
super(NIHDense, self).__init__()
self.densenet121 = Densenet.densenet121(pretrained=True)
#TODO: ChestXNet specific implementation params (substitute for kLog)
def forward(self, x, softmax=True):
output = self.densenet121(x)
if softmax:
return F.log_softmax(output, dim=1)
else:
return output
def output_size(self):
return torch.LongTensor([1, 14])
def train_config(self):
config = {}
# TODO: chestXnet suitable arguments
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2,
min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 120
return config
class NIHDenseBinary(nn.Module, PartialForwardable):
def __init__(self, pretrained_weights_path=None, train_features=False):
super(NIHDenseBinary, self).__init__()
self.train_features = train_features
self.densenet121 = Densenet.densenet121(pretrained=False)
self.densenet121.features[0] = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
if pretrained_weights_path is not None:
print("NIHDenseBinary loading weights from ", pretrained_weights_path)
state_dict = torch.load(pretrained_weights_path)
keys = state_dict['state_dict'].copy().keys()
for key in keys:
if "norm.1" in key:
state_dict['state_dict'][key[7:].replace("norm.1", "norm1")] = state_dict['state_dict'].pop(key)
elif "norm.2" in key:
state_dict['state_dict'][key[7:].replace("norm.2", "norm2")] = state_dict['state_dict'].pop(key)
elif "conv.1" in key:
state_dict['state_dict'][key[7:].replace("conv.1", "conv1")] = state_dict['state_dict'].pop(key)
elif "conv.2" in key:
state_dict['state_dict'][key[7:].replace("conv.2", "conv2")] = state_dict['state_dict'].pop(key)
else:
state_dict['state_dict'][key[7:]] = state_dict['state_dict'].pop(key)
self.load_state_dict(state_dict['state_dict'], strict=False)
feature_dim = self.densenet121.classifier.in_features
self.densenet121.classifier =nn.Linear(feature_dim, 2)
def forward(self, x, softmax=True):
output = self.densenet121(x)
if softmax:
return F.log_softmax(output, dim=1)
else:
return output
def output_size(self):
return torch.LongTensor([1,2])
def train_config(self):
config = {}
if self.train_features:
config['optim'] = optim.Adam(
[{'params':self.densenet121.classifier.parameters(), 'lr':1e-1}, {'params':self.densenet121.features.parameters()}],
lr=1e-1)
else:
config['optim'] = optim.Adam(self.densenet121.classifier.parameters(), lr=1e-1, )
config['scheduler'] = optim.lr_scheduler.StepLR(config['optim'], 1, gamma=0.1)
config['max_epoch'] = 20
return config
class NIHChestVGG(nn.Module, PartialForwardable):
def __init__(self):
super(NIHChestVGG, self).__init__()
# Based on the imagenet normalization params.
#self.offset = 0.44900
#self.multiplier = 4.42477
self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
self.model = VGG.VGG(VGG.make_layers(self.cfg, batch_norm=True), num_classes=2)
# TinyImagenet would have a different sized feature map.
#self.model.classifier = nn.Sequential(
# nn.Linear(512 * 2 * 2, 4096), nn.ReLU(True), nn.Dropout(),
# nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(),
# nn.Linear(4096, 2),
#)
self.model._initialize_weights()
def forward(self, x, softmax=True):
# Perform late normalization.
output = self.model(x)
if softmax:
output = F.log_softmax(output, dim=1)
return output
def output_size(self):
return torch.LongTensor([1, 2])
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 120
return config
class PADDense(nn.Module, PartialForwardable):
def __init__(self, pretrained_weights_path=None, train_features=True):
super(PADDense, self).__init__()
self.train_features = train_features
self.densenet121 = Densenet.densenet121(pretrained=False)
if pretrained_weights_path is not None:
self.load_state_dict(torch.load(pretrained_weights_path), strict=False)
self.densenet121.features[0] = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
feature_dim = self.densenet121.classifier.in_features
self.densenet121.classifier =nn.Linear(feature_dim, 2)
def forward(self, x, softmax=True):
output = self.densenet121(x)
if softmax:
return F.log_softmax(output, dim=1)
else:
return output
def output_size(self):
return torch.LongTensor([1,2])
def train_config(self):
config = {}
if self.train_features:
config['optim'] = optim.Adam(
[{'params':self.densenet121.classifier.parameters(), 'lr':1e-3}, {'params':self.densenet121.features.parameters()}],
lr=1e-3)
else:
config['optim'] = optim.Adam(self.densenet121.classifier.parameters(), lr=1e-3, )
#config['scheduler'] = optim.lr_scheduler.StepLR(config['optim'], 30, gamma=0.1)
config['max_epoch'] = 100
return config
class DRDDense(nn.Module, PartialForwardable):
def __init__(self, pretrained_weights_path=None, train_features=False):
super(DRDDense, self).__init__()
self.train_features = train_features
self.densenet121 = Densenet.densenet121(pretrained=False)
if pretrained_weights_path is not None:
self.load_state_dict(torch.load(pretrained_weights_path), strict=False)
feature_dim = self.densenet121.classifier.in_features
self.densenet121.classifier =nn.Linear(feature_dim, 2)
def forward(self, x, softmax=True):
output = self.densenet121(x)
if softmax:
return F.log_softmax(output, dim=1)
else:
return output
def output_size(self):
return torch.LongTensor([1,2])
def train_config(self):
config = {}
if self.train_features:
config['optim'] = optim.Adam(
[{'params':self.densenet121.classifier.parameters(), 'lr':1e-3}, {'params':self.densenet121.features.parameters()}],
lr=1e-3)
else:
config['optim'] = optim.Adam(self.densenet121.classifier.parameters(), lr=1e-3, )
config['scheduler'] = optim.lr_scheduler.StepLR(config['optim'], 30, gamma=0.5)
config['max_epoch'] = 100
return config
class PCAMDense(nn.Module, PartialForwardable):
def __init__(self, pretrained_weights_path=None, train_features=False):
super(PCAMDense, self).__init__()
self.train_features = train_features
self.densenet121 = Densenet.densenet121(pretrained=False)
if pretrained_weights_path is not None:
self.load_state_dict(torch.load(pretrained_weights_path), strict=False)
feature_dim = self.densenet121.classifier.in_features
self.densenet121.classifier =nn.Linear(feature_dim, 2)
def forward(self, x, softmax=True):
output = self.densenet121(x)
if softmax:
return F.log_softmax(output, dim=1)
else:
return output
def output_size(self):
return torch.LongTensor([1,2])
def train_config(self):
config = {}
if self.train_features:
config['optim'] = optim.Adam(
[{'params':self.densenet121.classifier.parameters(), 'lr':1e-1}, {'params':self.densenet121.features.parameters()}],
lr=1e-1)
else:
config['optim'] = optim.Adam(self.densenet121.classifier.parameters(), lr=1e-1, )
config['scheduler'] = optim.lr_scheduler.StepLR(config['optim'], 10, gamma=0.5)
config['max_epoch'] = 100
return config
| 38.121988
| 151
| 0.611306
| 3,198
| 25,313
| 4.716698
| 0.062539
| 0.033413
| 0.022275
| 0.026518
| 0.911628
| 0.904269
| 0.89638
| 0.879475
| 0.873111
| 0.86436
| 0
| 0.059307
| 0.264607
| 25,313
| 664
| 152
| 38.121988
| 0.751007
| 0.137637
| 0
| 0.767857
| 0
| 0
| 0.036308
| 0
| 0
| 0
| 0
| 0.001506
| 0
| 1
| 0.147321
| false
| 0
| 0.015625
| 0.035714
| 0.325893
| 0.002232
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f2708640ab24df89f56c4a5a3c28d19ec3d902bc
| 64
|
py
|
Python
|
app/utilities/constants.py
|
futuresimple/triggear
|
d6b8511ba8550225e7c34bd52232327b2b89d972
|
[
"MIT"
] | 14
|
2017-08-17T16:48:26.000Z
|
2019-07-10T12:11:49.000Z
|
app/utilities/constants.py
|
futuresimple/triggear
|
d6b8511ba8550225e7c34bd52232327b2b89d972
|
[
"MIT"
] | null | null | null |
app/utilities/constants.py
|
futuresimple/triggear
|
d6b8511ba8550225e7c34bd52232327b2b89d972
|
[
"MIT"
] | null | null | null |
BRANCH_DELETED_SHA = '0000000000000000000000000000000000000000'
| 32
| 63
| 0.90625
| 4
| 64
| 14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.655738
| 0.046875
| 64
| 1
| 64
| 64
| 0.262295
| 0
| 0
| 0
| 0
| 0
| 0.625
| 0.625
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f28a491c8db7fc7ac790a79605aab7a3a9551f2e
| 241
|
py
|
Python
|
src/process/models/base/common/__init__.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 14
|
2020-12-19T15:06:13.000Z
|
2022-01-12T19:52:17.000Z
|
src/process/models/base/common/__init__.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 43
|
2021-01-06T22:05:22.000Z
|
2022-03-10T10:30:30.000Z
|
src/process/models/base/common/__init__.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 4
|
2020-12-18T23:10:09.000Z
|
2021-04-02T13:03:12.000Z
|
from models.base.common.OperationEventBase import OperationEventBase
from models.base.common.LogBase import LogBase
from models.base.common.StatusBase import StatusBase
from models.base.common.ConfigParameterBase import ConfigParameterBase
| 40.166667
| 70
| 0.879668
| 28
| 241
| 7.571429
| 0.321429
| 0.188679
| 0.264151
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070539
| 241
| 5
| 71
| 48.2
| 0.946429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f2996a8fd9443f858cc28ba1cdfea6954ed2fabf
| 8,683
|
py
|
Python
|
tests/kong/test_consumers.py
|
tammoippen/kongcli
|
b25686d77378de987ab5c4df41ae9b038d7c4953
|
[
"MIT"
] | 3
|
2019-10-14T18:38:31.000Z
|
2020-08-13T11:53:43.000Z
|
tests/kong/test_consumers.py
|
tammoippen/kongcli
|
b25686d77378de987ab5c4df41ae9b038d7c4953
|
[
"MIT"
] | 1
|
2019-10-23T08:04:13.000Z
|
2019-10-23T08:04:13.000Z
|
tests/kong/test_consumers.py
|
tammoippen/kongcli
|
b25686d77378de987ab5c4df41ae9b038d7c4953
|
[
"MIT"
] | null | null | null |
from uuid import uuid4
import pytest
from kongcli.kong import consumers
from kongcli.kong.general import add
def test_no_acl_for_new_consumer(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
assert [] == consumers.groups(session, consumer["id"])
def test_add_acl_to_consumer(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
consumers.add_group(session, consumer["id"], "some-nice-group")
assert ["some-nice-group"] == consumers.groups(session, consumer["id"])
def test_add_acl_twice_to_consumer(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
consumers.add_group(session, consumer["id"], "some-nice-group")
with pytest.raises(Exception) as e:
consumers.add_group(session, consumer["id"], "some-nice-group")
assert (
str(e.value).strip()
== '400 Bad Request: {"group":"ACL group already exist for this consumer"}'
) or (
str(e.value).strip()
== f'409 Conflict: {{"message":"UNIQUE violation detected on \'{{consumer={{id=\\"{consumer["id"]}\\"}},group=\\"some-nice-group\\"}}\'","name":"unique constraint violation","fields":{{"consumer":{{"id":"{consumer["id"]}"}},"group":"some-nice-group"}},"code":5}}'
)
def test_add_multiple_acl_to_consumer(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
consumers.add_group(session, consumer["id"], "some-nice-group1")
consumers.add_group(session, consumer["id"], "some-nice-group2")
consumers.add_group(session, consumer["id"], "some-nice-group3")
assert ["some-nice-group1", "some-nice-group2", "some-nice-group3"] == sorted(
consumers.groups(session, consumer["id"])
)
def test_delete_non_exsiting_acl(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
with pytest.raises(Exception) as e:
consumers.delete_group(session, consumer["id"], "some-group")
assert str(e.value).startswith("404 Not Found")
assert '"message":"Not found"' in str(e.value)
# also with other group we get an error
consumers.add_group(session, consumer["id"], "some-nice-group1")
with pytest.raises(Exception) as e:
consumers.delete_group(session, consumer["id"], "some-group")
assert str(e.value).startswith("404 Not Found")
assert '"message":"Not found"' in str(e.value)
def test_no_basic_auths(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
assert [] == consumers.basic_auths(session, consumer["id"])
def test_add_basic_auth(session, clean_kong, kong_version):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ba = consumers.add_basic_auth(session, consumer["id"], "some.username", "password")
assert [ba] == consumers.basic_auths(session, consumer["id"])
if kong_version >= 0.15:
assert ba["consumer"]["id"] == consumer["id"]
else:
assert ba["consumer_id"] == consumer["id"]
assert ba["username"] == "some.username"
assert ba["password"] != "password" # some hash
def test_delete_non_existing_basic_auth(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
with pytest.raises(Exception) as e:
consumers.delete_basic_auth(session, consumer["id"], str(uuid4()))
assert str(e.value).strip() == '404 Not Found: {"message":"Not found"}'
def test_delete_basic_auth(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ba = consumers.add_basic_auth(session, consumer["id"], "some.username", "password")
consumers.delete_basic_auth(session, consumer["id"], ba["id"])
assert [] == consumers.basic_auths(session, consumer["id"])
def test_update_basic_auth_no_params(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ba = consumers.add_basic_auth(session, consumer["id"], "some.username", "password")
with pytest.raises(AssertionError):
consumers.update_basic_auth(session, consumer["id"], ba["id"])
def test_update_basic_auth_username(session, clean_kong, kong_version):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ba = consumers.add_basic_auth(session, consumer["id"], "some.username", "password")
consumers.update_basic_auth(
session, consumer["id"], ba["id"], username="username.some"
)
bas = consumers.basic_auths(session, consumer["id"])
assert len(bas) == 1
assert "username.some" == bas[0].pop("username")
ba.pop("username")
if kong_version >= 0.15:
# apparently, if no password field is given in 1.0, the empty password is set
ba.pop("password")
bas[0].pop("password")
assert ba == bas[0]
def test_update_basic_auth_password(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ba = consumers.add_basic_auth(session, consumer["id"], "some.username", "password")
consumers.update_basic_auth(session, consumer["id"], ba["id"], password="4321")
bas = consumers.basic_auths(session, consumer["id"])
assert len(bas) == 1
assert ba.pop("password") != bas[0].pop("password")
assert ba == bas[0]
def test_update_basic_auth_username_password(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ba = consumers.add_basic_auth(session, consumer["id"], "some.username", "password")
consumers.update_basic_auth(
session, consumer["id"], ba["id"], username="username.some", password="4321"
)
bas = consumers.basic_auths(session, consumer["id"])
assert len(bas) == 1
assert ba.pop("password") != bas[0].pop("password")
assert "username.some" == bas[0].pop("username")
ba.pop("username")
assert ba == bas[0]
def test_no_key_auths(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
assert [] == consumers.key_auths(session, consumer["id"])
def test_add_key_auth(session, clean_kong, kong_version):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ka = consumers.add_key_auth(session, consumer["id"])
assert [ka] == consumers.key_auths(session, consumer["id"])
if kong_version >= 0.15:
assert ka["consumer"]["id"] == consumer["id"]
else:
assert ka["consumer_id"] == consumer["id"]
assert ka["key"]
def test_add_key_auth_with_key(session, clean_kong, kong_version):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ka = consumers.add_key_auth(session, consumer["id"], key="1234567890")
assert [ka] == consumers.key_auths(session, consumer["id"])
if kong_version >= 0.15:
assert ka["consumer"]["id"] == consumer["id"]
else:
assert ka["consumer_id"] == consumer["id"]
assert "1234567890" == ka["key"]
def test_lots_of_key_auths(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
for _i in range(1000):
consumers.add_key_auth(session, consumer["id"])
assert 1000 == len(consumers.key_auths(session, consumer["id"]))
def test_delete_non_existing_key_auth(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
with pytest.raises(Exception) as e:
consumers.delete_key_auth(session, consumer["id"], str(uuid4()))
assert str(e.value).strip() == '404 Not Found: {"message":"Not found"}'
def test_delete_key_auth(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ba = consumers.add_key_auth(session, consumer["id"])
consumers.delete_key_auth(session, consumer["id"], ba["id"])
assert [] == consumers.key_auths(session, consumer["id"])
def test_update_key_auth(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
ka = consumers.add_key_auth(session, consumer["id"])
consumers.update_key_auth(session, consumer["id"], ka["id"], key="4321")
kas = consumers.key_auths(session, consumer["id"])
assert len(kas) == 1
assert "4321" == kas[0].pop("key")
ka.pop("key")
assert ka == kas[0]
def test_no_plugins(session, clean_kong):
consumer = add("consumers", session, username="test-user", custom_id="1234")
assert [] == consumers.plugins(session, consumer["id"])
| 43.415
| 271
| 0.681907
| 1,157
| 8,683
| 4.946413
| 0.099395
| 0.106587
| 0.133671
| 0.099074
| 0.867203
| 0.846759
| 0.815831
| 0.792941
| 0.734405
| 0.67517
| 0
| 0.02502
| 0.148451
| 8,683
| 199
| 272
| 43.633166
| 0.748986
| 0.014166
| 0
| 0.546053
| 0
| 0.006579
| 0.187588
| 0.011571
| 0
| 0
| 0
| 0
| 0.282895
| 1
| 0.138158
| false
| 0.098684
| 0.026316
| 0
| 0.164474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
f2a76e09b5f0c60433adbe0107d015d3bfe608f5
| 162
|
py
|
Python
|
azmessaging/readers/__init__.py
|
ali-zahedi/az-messaging
|
ecc626e6be3f58a9ec166923623c144c86d2734e
|
[
"MIT"
] | null | null | null |
azmessaging/readers/__init__.py
|
ali-zahedi/az-messaging
|
ecc626e6be3f58a9ec166923623c144c86d2734e
|
[
"MIT"
] | null | null | null |
azmessaging/readers/__init__.py
|
ali-zahedi/az-messaging
|
ecc626e6be3f58a9ec166923623c144c86d2734e
|
[
"MIT"
] | null | null | null |
from .bases import Reader
from .defaults import DefaultReader
from .sms import * # noqa
from .telegram import * # noqa
from .pushnotifications import * # noqa
| 27
| 40
| 0.753086
| 20
| 162
| 6.1
| 0.5
| 0.245902
| 0.229508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179012
| 162
| 5
| 41
| 32.4
| 0.917293
| 0.08642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4b8139f884ffbbdb779a632bc5bbe3cdca31b992
| 8,731
|
py
|
Python
|
dgm4nlp/tf/logit.py
|
uva-slpl/dgm4nlp
|
9c5b3a4bc3f5e9b4f971d5b9bbad70e19bb12f8c
|
[
"MIT"
] | null | null | null |
dgm4nlp/tf/logit.py
|
uva-slpl/dgm4nlp
|
9c5b3a4bc3f5e9b4f971d5b9bbad70e19bb12f8c
|
[
"MIT"
] | null | null | null |
dgm4nlp/tf/logit.py
|
uva-slpl/dgm4nlp
|
9c5b3a4bc3f5e9b4f971d5b9bbad70e19bb12f8c
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import logging
from dgm4nlp.tf.ssoftmax import botev_sampled_softmax_layer
from dgm4nlp.tf.ssoftmax import jean_sampled_softmax_layer
from dgm4nlp.tf.ssoftmax import botev_batch_sampled_softmax_layer
def logit_layer_for_text(
nb_classes, # V
inputs, # [B, T, dim]
labels, # [B, T]
dim,
nb_softmax_samples, # S
is_training,
approximation='botev-batch',
support=None, # [S]
importance=None, # [S]
name='logit',
):
"""
Logit strategies for monolingual sequences.
:param nb_classes: number of classes over which we define a softmax
:param inputs: forward activations [B, T, dim]
:param labels: target labels [B, T]
:param dim: number of activations dim
:param nb_softmax_samples: use between 0 and nb_classes to get an approximation
:param is_training: for sampled approximations this switches between truncated/complete supports at training/prediction
:param approximation: which approximation to use
- 'botev': CSS with a shared support for all elements in a sequence
- 'jean': a form of IS with shared negative support
- 'botev-batch': CSS with a shared support for all sequences in batch
:param support: a batch-wise shared support of probable and negative classes
- necessary for botev-batch, ignored by others
:param importance: importance of elements in support
- necessary for botev-batch, ignored by others
:return: logits [B * T, V|S] and targets [B * T]
"""
batch_size = tf.shape(inputs)[0]
longest = tf.shape(inputs)[1]
if 0 < nb_softmax_samples < nb_classes: # Here we employ a sampled softmax architecture
logging.info('%s sampled-softmax=%s', name, approximation)
if approximation == 'botev': # Here we use CSS (Botev et al, 2017)
with tf.variable_scope('botev'):
# logits: [B, T, Vx|S]
# targets: [B, T]
logits, targets = botev_sampled_softmax_layer(
nb_classes=nb_classes,
nb_samples=nb_softmax_samples,
dim=dim,
labels=labels, # [B, T]
inputs=inputs, # [B, T, dim]
is_training=is_training
)
# For compatibility with the rest of the code
# [B * T, V|S]
logits = tf.reshape(logits, [batch_size * longest, -1])
# [B * T]
targets = tf.reshape(targets, [-1])
elif approximation == 'botev-batch':
if support is None or importance is None:
raise ValueError('Softmax approximation "botev-batch" requires "support" and "importance"')
with tf.variable_scope('botev-batch'):
# logits: [B, T, V|S]
# targets: [B, T]
logits, targets = botev_batch_sampled_softmax_layer(
nb_classes=nb_classes, # V
dim=dim,
labels=labels, # [B, T]
support=support, # [S]
importance=importance, # [S]
inputs=inputs, # [B, T, dim]
is_training=is_training
)
# For compatibility with the rest of the code
# [B * M, Vy|S]
logits = tf.reshape(logits, [batch_size * longest, -1])
# [B * T]
targets = tf.reshape(targets, [-1])
elif approximation == 'jean': # Here we use the method of Jean et al (2015) with uniform sampling
with tf.variable_scope('jean'):
# logits: [B * T, V|S]
# targets: [B * T]
logits, targets = jean_sampled_softmax_layer(
nb_classes=nb_classes,
nb_samples=nb_softmax_samples,
dim=dim,
labels=tf.reshape(labels, [batch_size * longest, 1]), # [B * T, 1]
inputs=tf.reshape(inputs, [batch_size * longest, -1]), # [B * M, dim]
is_training=is_training
)
else:
raise ValueError('Unknown softmax approximation for text: %s' % approximation)
else: # Here we employ an exact softmax architecture
# Here we compute logits
# [B * T, V]
logits = tf.contrib.layers.fully_connected(
tf.reshape(inputs, [batch_size * longest, dim]), # [B * T, dim]
num_outputs=nb_classes,
activation_fn=None
)
# Define targets
# [B * T]
targets = tf.reshape(labels, [-1])
return logits, targets
def logit_layer_for_bitext(
nb_classes, # V
inputs, # [B, M, dim]
outputs, # [B, N]
dim,
nb_softmax_samples, # S
is_training,
approximation='botev-batch',
support=None, # [S]
importance=None, # [S]
name='logit'
):
"""
Logit strategies for sequences where the inputs and the outputs are defined over parallel sequences.
:param nb_classes: number of classes over which we define a softmax
:param inputs: forward activations [B, M, dim]
:param outputs: output labels [B, N]
:param dim: number of activations dim
:param nb_softmax_samples: use between 0 and nb_classes to get an approximation
:param is_training: for sampled approximations this switches between truncated/complete supports at training/prediction
:param approximation: which approximation to use
- 'botev': CSS with a shared support for all elements in a sequence
- 'botev-batch': CSS with a shared support for all sequences in batch
:param support: a batch-wise shared support of probable and negative classes
- necessary for botev-batch, ignored by others
:param importance: importance of elements in support
- necessary for botev-batch, ignored by others
:return: logits [B * T, V|S] and targets [B * T]
"""
batch_size = tf.shape(inputs)[0] # B
longest_input = tf.shape(inputs)[1] # M
longest_output = tf.shape(outputs)[1] # N
if 0 < nb_softmax_samples < nb_classes: # Here we employ a sampled softmax architecture
logging.info('%s sampled-softmax=%s', name, approximation)
if approximation == 'botev':
with tf.variable_scope('botev'):
# logits: [B, M, V|S]
# targets: [B, N]
logits, targets = botev_sampled_softmax_layer(
nb_classes=nb_classes,
nb_samples=nb_softmax_samples,
dim=dim,
labels=outputs, # [B, N]
inputs=inputs, # [B, M, dim]
is_training=is_training
)
# For compatibility with the rest of the code
# [B * M, V|S]
logits = tf.reshape(logits, [batch_size * longest_input, -1])
# [B * N]
targets = tf.reshape(targets, [batch_size * longest_output])
elif approximation == 'botev-batch':
if support is None or importance is None:
raise ValueError('Softmax approximation "botev-batch" requires "support" and "importance"')
with tf.variable_scope('botev-batch'):
# logits: [B, M, V|S]
# targets: [B, N]
logits, targets = botev_batch_sampled_softmax_layer(
nb_classes=nb_classes, # V
dim=dim,
labels=outputs, # [B, N]
support=support, # [S]
importance=importance, # [S]
inputs=inputs, # [B, M, dim]
is_training=is_training
)
# For compatibility with the rest of the code
# [B * M, V|S]
logits = tf.reshape(logits, [batch_size * longest_input, -1])
# [B * N]
targets = tf.reshape(targets, [batch_size * longest_output])
else:
raise ValueError('Unknown softmax approximation for bitext: %s' % approximation)
else: # Here we employ an exact softmax architecture
# [B * M, V]
logits = tf.contrib.layers.fully_connected(
tf.reshape(inputs, [batch_size * longest_input, dim]), # [B * M, dim]
num_outputs=nb_classes,
activation_fn=None # for logits
)
# Define targets
# [B * N]
targets = tf.reshape(outputs, [-1])
# [B * M, V|S], [B * N]
return logits, targets
| 41.77512
| 123
| 0.560188
| 1,022
| 8,731
| 4.670254
| 0.130137
| 0.010476
| 0.033522
| 0.009428
| 0.869055
| 0.849151
| 0.823801
| 0.784412
| 0.749214
| 0.73392
| 0
| 0.00544
| 0.347268
| 8,731
| 208
| 124
| 41.975962
| 0.832076
| 0.343603
| 0
| 0.737705
| 0
| 0
| 0.067889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016393
| false
| 0
| 0.106557
| 0
| 0.139344
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4bb41302a42110ecdd92b78613bae6cba85b58dc
| 7,356
|
py
|
Python
|
sample/tohbase.py
|
DingPengfei/sync-ecg
|
7617b130b97936f4bd059f55b902fef631c41f4e
|
[
"BSD-2-Clause"
] | null | null | null |
sample/tohbase.py
|
DingPengfei/sync-ecg
|
7617b130b97936f4bd059f55b902fef631c41f4e
|
[
"BSD-2-Clause"
] | null | null | null |
sample/tohbase.py
|
DingPengfei/sync-ecg
|
7617b130b97936f4bd059f55b902fef631c41f4e
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf8 -*-
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hbase import Hbase
from hbase.ttypes import Mutation
from configparser import ConfigParser
import hashlib
class HbaseOperation:
def __init__(self):
cfg = ConfigParser()
cfg.read('..\\config.ini')
ip = cfg.get('hbase', 'ip')
port = cfg.get('hbase', 'port')
socket = TSocket.TSocket(ip, port)
socket.setTimeout(2000)
self.table = 'test'
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
self.client = Hbase.Client(protocol)
socket.open()
# print(self.client.getTableNames())
def put_hlw(self, header):
mutations = []
company_name = Mutation(column=b'cf:company_name', value=header['company_name'])
version = Mutation(column=b'cf:version', value=header['version'])
ecg_wave = Mutation(column=b'cf:ecg_wave', value=header['ecg_wave'])
ecg_feq = Mutation(column=b'cf:ecg_feq', value=header['ecg_feq'])
file_length = Mutation(column=b'cf:file_length', value=header['file_length'])
data_length = Mutation(column=b'cf:data_length', value=header['data_length'])
begin_time = Mutation(column=b'cf:begin_time', value=header['data_length'])
end_time = Mutation(column=b'cf:end_time', value=header['end_time'])
crc_head = Mutation(column=b'cf:crc_head', value=header['crc_head'])
crc_data = Mutation(column=b'cf:crc_data', value=header['crc_data'])
id = Mutation(column=b'cf:id', value=header['id'])
name = Mutation(column=b'cf:name', value=header['company_name'])
birthday = Mutation(column=b'cf:birthday', value=header['birthday'])
sex = Mutation(column=b'cf:sex', value=header['sex'])
age = Mutation(column=b'cf:age', value=header['age'])
height = Mutation(column=b'cf:height', value=header['height'])
weight = Mutation(column=b'cf:weight', value=header['weight'])
phone = Mutation(column=b'cf:phone', value=header['phone'])
unit = Mutation(column=b'cf:unit', value=header['unit'])
address = Mutation(column=b'cf:address', value=header['address'])
e_name = Mutation(column=b'cf:e_name', value=header['e_name'])
e_phone = Mutation(column=b'cf:e_phone', value=header['e_phone'])
bed = Mutation(column=b'cf:bed', value=header['bed'])
doctor = Mutation(column=b'cf:doctor', value=header['doctor'])
remark = Mutation(column=b'cf:remark', value=header['remark'])
reserved = Mutation(column=b'cf:reserved', value=header['reserved'])
content = Mutation(column=b'cf:content', value=header['content'])
mutations.append(company_name)
mutations.append(version)
mutations.append(ecg_wave)
mutations.append(ecg_feq)
mutations.append(file_length)
mutations.append(data_length)
mutations.append(begin_time)
mutations.append(end_time)
mutations.append(crc_head)
mutations.append(crc_data)
mutations.append(id)
mutations.append(name)
mutations.append(birthday)
mutations.append(sex)
mutations.append(age)
mutations.append(height)
mutations.append(weight)
mutations.append(phone)
mutations.append(unit)
mutations.append(address)
mutations.append(e_phone)
mutations.append(e_name)
mutations.append(bed)
mutations.append(doctor)
mutations.append(remark)
mutations.append(reserved)
mutations.append(content)
head = header['id'].decode('utf-8')
row_id = hashlib.md5(header['id']).hexdigest() + head[::-1]
self.client.mutateRow(b'test', row_id.encode(), mutations, {})
def put_hly(self, header):
mutations = []
company_name = Mutation(column=b'cf:company_name', value=header['company_name'])
version = Mutation(column=b'cf:version', value=header['version'])
ecg_wave = Mutation(column=b'cf:ecg_wave', value=header['ecg_wave'])
ecg_feq = Mutation(column=b'cf:ecg_feq', value=header['ecg_feq'])
other_wave = Mutation(column=b'cf:other_wave', value=header['other_wave'])
other_feq = Mutation(column=b'cf:other_feq', value=header['other_feq'])
file_length = Mutation(column=b'cf:file_length', value=header['file_length'])
data_length = Mutation(column=b'cf:data_length', value=header['data_length'])
begin_time = Mutation(column=b'cf:begin_time', value=header['data_length'])
end_time = Mutation(column=b'cf:end_time', value=header['end_time'])
crc_head = Mutation(column=b'cf:crc_head', value=header['crc_head'])
crc_data = Mutation(column=b'cf:crc_data', value=header['crc_data'])
id = Mutation(column=b'cf:id', value=header['id'])
name = Mutation(column=b'cf:name', value=header['company_name'])
birthday = Mutation(column=b'cf:birthday', value=header['birthday'])
sex = Mutation(column=b'cf:sex', value=header['sex'])
age = Mutation(column=b'cf:age', value=header['age'])
height = Mutation(column=b'cf:height', value=header['height'])
weight = Mutation(column=b'cf:weight', value=header['weight'])
phone = Mutation(column=b'cf:phone', value=header['phone'])
unit = Mutation(column=b'cf:unit', value=header['unit'])
address = Mutation(column=b'cf:address', value=header['address'])
e_name = Mutation(column=b'cf:e_name', value=header['e_name'])
e_phone = Mutation(column=b'cf:e_phone', value=header['e_phone'])
bed = Mutation(column=b'cf:bed', value=header['bed'])
doctor = Mutation(column=b'cf:doctor', value=header['doctor'])
remark = Mutation(column=b'cf:remark', value=header['remark'])
field = Mutation(column=b'cf:field', value=header['field'])
reserved = Mutation(column=b'cf:reserved', value=header['reserved'])
content = Mutation(column=b'cf:content', value=header['content'])
mutations.append(company_name)
mutations.append(version)
mutations.append(ecg_wave)
mutations.append(ecg_feq)
mutations.append(other_wave)
mutations.append(other_feq)
mutations.append(file_length)
mutations.append(data_length)
mutations.append(begin_time)
mutations.append(end_time)
mutations.append(crc_head)
mutations.append(crc_data)
mutations.append(id)
mutations.append(name)
mutations.append(birthday)
mutations.append(sex)
mutations.append(age)
mutations.append(height)
mutations.append(weight)
mutations.append(phone)
mutations.append(unit)
mutations.append(address)
mutations.append(e_phone)
mutations.append(e_name)
mutations.append(bed)
mutations.append(doctor)
mutations.append(remark)
mutations.append(field)
mutations.append(reserved)
mutations.append(content)
head = header['id'].decode('utf-8')
row_id = hashlib.md5(header['id']).hexdigest() + head[::-1]
self.client.mutateRow(b'test', row_id.encode(), mutations, {})
| 46.556962
| 88
| 0.650218
| 921
| 7,356
| 5.076004
| 0.094463
| 0.170695
| 0.182888
| 0.207273
| 0.861818
| 0.850909
| 0.850909
| 0.850909
| 0.850909
| 0.850909
| 0
| 0.001872
| 0.201332
| 7,356
| 157
| 89
| 46.853503
| 0.793872
| 0.007477
| 0
| 0.811189
| 0
| 0
| 0.139901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020979
| false
| 0
| 0.041958
| 0
| 0.06993
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
29e3cc5ca820e5d7f67b700623e579b096e5e831
| 277
|
py
|
Python
|
analyzer/expression/prefix_unary_expression_syntax.py
|
vbondarevsky/ones_analyzer
|
ab8bff875192db238ed17c20d61c9fa5b55c3fa8
|
[
"MIT"
] | 12
|
2017-11-23T07:04:13.000Z
|
2022-03-01T21:06:56.000Z
|
analyzer/expression/prefix_unary_expression_syntax.py
|
vbondarevsky/analyzer_test
|
ab8bff875192db238ed17c20d61c9fa5b55c3fa8
|
[
"MIT"
] | 2
|
2017-06-25T21:32:32.000Z
|
2017-11-19T19:05:40.000Z
|
analyzer/expression/prefix_unary_expression_syntax.py
|
vbondarevsky/analyzer_test
|
ab8bff875192db238ed17c20d61c9fa5b55c3fa8
|
[
"MIT"
] | 5
|
2017-11-21T08:24:56.000Z
|
2021-08-17T23:21:18.000Z
|
class PrefixUnaryExpressionSyntax(object):
def __init__(self, kind, operator_token, operand):
self.kind = kind
self.operator_token = operator_token
self.operand = operand
def __str__(self):
return f"{self.operator_token}{self.operand}"
| 30.777778
| 54
| 0.685921
| 31
| 277
| 5.741935
| 0.419355
| 0.292135
| 0.191011
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220217
| 277
| 8
| 55
| 34.625
| 0.824074
| 0
| 0
| 0
| 0
| 0
| 0.126354
| 0.126354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d9e53f5c3f339a11d4e93393cdac88a146407e61
| 39,760
|
py
|
Python
|
unittest/test_sinh.py
|
m1griffin/arrayfunc
|
df57097699c25d3e949e1ade307ed61eaa5728c2
|
[
"Apache-2.0"
] | 2
|
2017-08-28T08:41:16.000Z
|
2018-05-29T03:49:36.000Z
|
unittest/test_sinh.py
|
m1griffin/arrayfunc
|
df57097699c25d3e949e1ade307ed61eaa5728c2
|
[
"Apache-2.0"
] | null | null | null |
unittest/test_sinh.py
|
m1griffin/arrayfunc
|
df57097699c25d3e949e1ade307ed61eaa5728c2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
##############################################################################
# Project: arrayfunc
# Module: test_sinh.py
# Purpose: arrayfunc unit test.
# Language: Python 3.4
# Date: 09-Dec-2017.
# Ver: 06-Mar-2020.
#
###############################################################################
#
# Copyright 2014 - 2020 Michael Griffin <m12.griffin@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
"""This conducts unit tests for sinh.
"""
##############################################################################
import sys
import array
import itertools
import math
import operator
import platform
import copy
import unittest
import arrayfunc
##############################################################################
##############################################################################
# The following code is all auto-generated.
##############################################################################
class sinh_general_even_arraysize_without_simd_f(unittest.TestCase):
"""Test for basic general function operation.
test_template_noparams
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%0.3f != %0.3f' % (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
if 'even' == 'even':
testdatasize = 160
if 'even' == 'odd':
testdatasize = 159
paramitersize = 5
xdata = [x for x,y in zip(itertools.cycle([0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0]), range(testdatasize))]
self.data = array.array('f', xdata)
self.dataout = array.array('f', [0]*len(self.data))
self.limited = len(self.data) // 2
# The expected results.
self.expected = [math.sinh(x) for x in self.data]
# The expected results when the maxlen parameter is used.
self.expectedlimiteddata = self.expected[0:self.limited] + list(self.data)[self.limited:]
# The same, but where dataout is used as one of the sources.
self.expectedlimiteddataout = self.expected[0:self.limited] + list(self.dataout)[self.limited:]
########################################################
def test_sinh_basic_array_none_a1(self):
"""Test sinh as *array-none* for basic function - Array code f.
"""
arrayfunc.sinh(self.data )
for dataoutitem, expecteditem in zip(list(self.data), self.expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_none_a2(self):
"""Test sinh as *array-none* for basic function with matherrors=True - Array code f.
"""
arrayfunc.sinh(self.data, matherrors=True )
for dataoutitem, expecteditem in zip(list(self.data), self.expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_none_a3(self):
"""Test sinh as *array-none* for basic function with maxlen - Array code f.
"""
arrayfunc.sinh(self.data, maxlen=self.limited )
for dataoutitem, expecteditem in zip(list(self.data), self.expectedlimiteddata):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_none_a4(self):
"""Test sinh as *array-none* for basic function with maxlen and matherrors=True - Array code f.
"""
arrayfunc.sinh(self.data, maxlen=self.limited, matherrors=True )
for dataoutitem, expecteditem in zip(list(self.data), self.expectedlimiteddata):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_array_b1(self):
"""Test sinh as *array-array* for basic function - Array code f.
"""
arrayfunc.sinh(self.data, self.dataout )
for dataoutitem, expecteditem in zip(list(self.dataout), self.expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_array_b2(self):
"""Test sinh as *array-array* for basic function with matherrors=True - Array code f.
"""
arrayfunc.sinh(self.data, self.dataout, matherrors=True )
for dataoutitem, expecteditem in zip(list(self.dataout), self.expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_array_b3(self):
"""Test sinh as *array-array* for basic function with maxlen - Array code f.
"""
arrayfunc.sinh(self.data, self.dataout, maxlen=self.limited )
for dataoutitem, expecteditem in zip(list(self.dataout), self.expectedlimiteddataout):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_array_b4(self):
"""Test sinh as *array-array* for basic function with maxlen and matherrors=True - Array code f.
"""
arrayfunc.sinh(self.data, self.dataout, maxlen=self.limited, matherrors=True )
for dataoutitem, expecteditem in zip(list(self.dataout), self.expectedlimiteddataout):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
##############################################################################
class sinh_param_errors_f(unittest.TestCase):
"""Test for invalid parameters.
param_invalid_template
"""
########################################################
def setUp(self):
"""Initialise.
"""
self.floatarray = array.array('f', [0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0])
arraysize = len(self.floatarray)
self.dataout = array.array('f', itertools.repeat(0.0, arraysize))
# Create some integer array equivalents.
self.intarray = array.array('i', [int(x) for x in self.floatarray])
self.intdataout = array.array('i', [int(x) for x in self.dataout])
########################################################
def test_sinh_array_none_a1(self):
"""Test sinh as *array-none* for integer array - Array code f.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.intarray)
########################################################
def test_sinh_array_none_b1(self):
"""Test sinh as *array-none* for matherrors='a' - Array code f.
"""
# Copy the array so we don't change the original data.
floatarray = copy.copy(self.floatarray)
# This version is expected to pass.
arrayfunc.sinh(floatarray, matherrors=True)
floatarray = copy.copy(self.floatarray)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(floatarray, matherrors='a')
########################################################
def test_sinh_array_none_b2(self):
"""Test sinh as *array-none* for maxlen='a' - Array code f.
"""
# Copy the array so we don't change the original data.
floatarray = copy.copy(self.floatarray)
testmaxlen = len(floatarray) // 2
# This version is expected to pass.
arrayfunc.sinh(floatarray, maxlen=testmaxlen)
floatarray = copy.copy(self.floatarray)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(floatarray, maxlen='a')
########################################################
def test_sinh_array_array_c1(self):
"""Test sinh as *array-array* for integer array - Array code f.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.intarray, self.dataout)
########################################################
def test_sinh_array_array_c2(self):
"""Test sinh as *array-array* for integer output array - Array code f.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.floatarray, self.intdataout)
########################################################
def test_sinh_array_array_c3(self):
"""Test sinh as *array-array* for integer input and output array - Array code f.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.intarray, self.intdataout)
########################################################
def test_sinh_array_num_array_d1(self):
"""Test sinh as *array-num-array* for matherrors='a' - Array code f.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout, matherrors=True)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.floatarray, self.dataout, matherrors='a')
########################################################
def test_sinh_array_array_d2(self):
"""Test sinh as *array-array* for maxlen='a' - Array code f.
"""
testmaxlen = len(self.floatarray) // 2
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout, maxlen=testmaxlen)
floatarray = copy.copy(self.floatarray)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.floatarray, self.dataout, maxlen='a')
########################################################
def test_sinh_no_params_e1(self):
"""Test sinh with no parameters - Array code f.
"""
with self.assertRaises(TypeError):
arrayfunc.sinh()
##############################################################################
##############################################################################
class sinh_general_even_arraysize_without_simd_d(unittest.TestCase):
"""Test for basic general function operation.
test_template_noparams
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%0.3f != %0.3f' % (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
if 'even' == 'even':
testdatasize = 160
if 'even' == 'odd':
testdatasize = 159
paramitersize = 5
xdata = [x for x,y in zip(itertools.cycle([0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0]), range(testdatasize))]
self.data = array.array('d', xdata)
self.dataout = array.array('d', [0]*len(self.data))
self.limited = len(self.data) // 2
# The expected results.
self.expected = [math.sinh(x) for x in self.data]
# The expected results when the maxlen parameter is used.
self.expectedlimiteddata = self.expected[0:self.limited] + list(self.data)[self.limited:]
# The same, but where dataout is used as one of the sources.
self.expectedlimiteddataout = self.expected[0:self.limited] + list(self.dataout)[self.limited:]
########################################################
def test_sinh_basic_array_none_a1(self):
"""Test sinh as *array-none* for basic function - Array code d.
"""
arrayfunc.sinh(self.data )
for dataoutitem, expecteditem in zip(list(self.data), self.expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_none_a2(self):
"""Test sinh as *array-none* for basic function with matherrors=True - Array code d.
"""
arrayfunc.sinh(self.data, matherrors=True )
for dataoutitem, expecteditem in zip(list(self.data), self.expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_none_a3(self):
"""Test sinh as *array-none* for basic function with maxlen - Array code d.
"""
arrayfunc.sinh(self.data, maxlen=self.limited )
for dataoutitem, expecteditem in zip(list(self.data), self.expectedlimiteddata):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_none_a4(self):
"""Test sinh as *array-none* for basic function with maxlen and matherrors=True - Array code d.
"""
arrayfunc.sinh(self.data, maxlen=self.limited, matherrors=True )
for dataoutitem, expecteditem in zip(list(self.data), self.expectedlimiteddata):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_array_b1(self):
"""Test sinh as *array-array* for basic function - Array code d.
"""
arrayfunc.sinh(self.data, self.dataout )
for dataoutitem, expecteditem in zip(list(self.dataout), self.expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_array_b2(self):
"""Test sinh as *array-array* for basic function with matherrors=True - Array code d.
"""
arrayfunc.sinh(self.data, self.dataout, matherrors=True )
for dataoutitem, expecteditem in zip(list(self.dataout), self.expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_array_b3(self):
"""Test sinh as *array-array* for basic function with maxlen - Array code d.
"""
arrayfunc.sinh(self.data, self.dataout, maxlen=self.limited )
for dataoutitem, expecteditem in zip(list(self.dataout), self.expectedlimiteddataout):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_basic_array_array_b4(self):
"""Test sinh as *array-array* for basic function with maxlen and matherrors=True - Array code d.
"""
arrayfunc.sinh(self.data, self.dataout, maxlen=self.limited, matherrors=True )
for dataoutitem, expecteditem in zip(list(self.dataout), self.expectedlimiteddataout):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
##############################################################################
class sinh_param_errors_d(unittest.TestCase):
"""Test for invalid parameters.
param_invalid_template
"""
########################################################
def setUp(self):
"""Initialise.
"""
self.floatarray = array.array('d', [0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0])
arraysize = len(self.floatarray)
self.dataout = array.array('d', itertools.repeat(0.0, arraysize))
# Create some integer array equivalents.
self.intarray = array.array('i', [int(x) for x in self.floatarray])
self.intdataout = array.array('i', [int(x) for x in self.dataout])
########################################################
def test_sinh_array_none_a1(self):
"""Test sinh as *array-none* for integer array - Array code d.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.intarray)
########################################################
def test_sinh_array_none_b1(self):
"""Test sinh as *array-none* for matherrors='a' - Array code d.
"""
# Copy the array so we don't change the original data.
floatarray = copy.copy(self.floatarray)
# This version is expected to pass.
arrayfunc.sinh(floatarray, matherrors=True)
floatarray = copy.copy(self.floatarray)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(floatarray, matherrors='a')
########################################################
def test_sinh_array_none_b2(self):
"""Test sinh as *array-none* for maxlen='a' - Array code d.
"""
# Copy the array so we don't change the original data.
floatarray = copy.copy(self.floatarray)
testmaxlen = len(floatarray) // 2
# This version is expected to pass.
arrayfunc.sinh(floatarray, maxlen=testmaxlen)
floatarray = copy.copy(self.floatarray)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(floatarray, maxlen='a')
########################################################
def test_sinh_array_array_c1(self):
"""Test sinh as *array-array* for integer array - Array code d.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.intarray, self.dataout)
########################################################
def test_sinh_array_array_c2(self):
"""Test sinh as *array-array* for integer output array - Array code d.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.floatarray, self.intdataout)
########################################################
def test_sinh_array_array_c3(self):
"""Test sinh as *array-array* for integer input and output array - Array code d.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.intarray, self.intdataout)
########################################################
def test_sinh_array_num_array_d1(self):
"""Test sinh as *array-num-array* for matherrors='a' - Array code d.
"""
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout, matherrors=True)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.floatarray, self.dataout, matherrors='a')
########################################################
def test_sinh_array_array_d2(self):
"""Test sinh as *array-array* for maxlen='a' - Array code d.
"""
testmaxlen = len(self.floatarray) // 2
# This version is expected to pass.
arrayfunc.sinh(self.floatarray, self.dataout, maxlen=testmaxlen)
floatarray = copy.copy(self.floatarray)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.sinh(self.floatarray, self.dataout, maxlen='a')
########################################################
def test_sinh_no_params_e1(self):
"""Test sinh with no parameters - Array code d.
"""
with self.assertRaises(TypeError):
arrayfunc.sinh()
##############################################################################
##############################################################################
class sinh_nandata_exceptions_nan_f(unittest.TestCase):
"""Test for basic general function operation.
nan_data_errorchecked_noparam_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%0.3f != %0.3f' % (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataout = array.array('f', itertools.repeat(0.0, 10))
self.datainf = array.array('f', [math.inf] * 10)
self.datanan = array.array('f', [math.nan] * 10)
self.dataninf = array.array('f', [-math.inf] * 10)
########################################################
def test_sinh_outputarray(self):
"""Test sinh for data of nan with matherrors checking on and single parameter functions - Array code f.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.datanan, self.dataout)
########################################################
def test_sinh_inplace(self):
"""Test sinh in place for data of nan with matherrors checking on and single parameter functions - Array code f.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.datanan)
########################################################
def test_sinh_ov_outputarray(self):
"""Test sinh for data of nan with matherrors checking off and single parameter functions - Array code f.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.datanan]
# This is the actual test.
arrayfunc.sinh(self.datanan, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_ov_inplace(self):
"""Test sinh in place for data of nan with matherrors checking off and single parameter functions - Array code f.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.datanan]
# This is the actual test.
arrayfunc.sinh(self.datanan, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.datanan), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
##############################################################################
class sinh_nandata_exceptions_nan_d(unittest.TestCase):
"""Test for basic general function operation.
nan_data_errorchecked_noparam_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%0.3f != %0.3f' % (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataout = array.array('d', itertools.repeat(0.0, 10))
self.datainf = array.array('d', [math.inf] * 10)
self.datanan = array.array('d', [math.nan] * 10)
self.dataninf = array.array('d', [-math.inf] * 10)
########################################################
def test_sinh_outputarray(self):
"""Test sinh for data of nan with matherrors checking on and single parameter functions - Array code d.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.datanan, self.dataout)
########################################################
def test_sinh_inplace(self):
"""Test sinh in place for data of nan with matherrors checking on and single parameter functions - Array code d.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.datanan)
########################################################
def test_sinh_ov_outputarray(self):
"""Test sinh for data of nan with matherrors checking off and single parameter functions - Array code d.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.datanan]
# This is the actual test.
arrayfunc.sinh(self.datanan, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_ov_inplace(self):
"""Test sinh in place for data of nan with matherrors checking off and single parameter functions - Array code d.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.datanan]
# This is the actual test.
arrayfunc.sinh(self.datanan, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.datanan), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
##############################################################################
class sinh_nandata_exceptions_inf_f(unittest.TestCase):
"""Test for basic general function operation.
nan_data_errorchecked_noparam_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%0.3f != %0.3f' % (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataout = array.array('f', itertools.repeat(0.0, 10))
self.datainf = array.array('f', [math.inf] * 10)
self.datanan = array.array('f', [math.nan] * 10)
self.dataninf = array.array('f', [-math.inf] * 10)
########################################################
def test_sinh_outputarray(self):
"""Test sinh for data of inf with matherrors checking on and single parameter functions - Array code f.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.datainf, self.dataout)
########################################################
def test_sinh_inplace(self):
"""Test sinh in place for data of inf with matherrors checking on and single parameter functions - Array code f.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.datainf)
########################################################
def test_sinh_ov_outputarray(self):
"""Test sinh for data of inf with matherrors checking off and single parameter functions - Array code f.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.datainf]
# This is the actual test.
arrayfunc.sinh(self.datainf, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_ov_inplace(self):
"""Test sinh in place for data of inf with matherrors checking off and single parameter functions - Array code f.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.datainf]
# This is the actual test.
arrayfunc.sinh(self.datainf, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.datainf), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
##############################################################################
class sinh_nandata_exceptions_inf_d(unittest.TestCase):
"""Test for basic general function operation.
nan_data_errorchecked_noparam_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%0.3f != %0.3f' % (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataout = array.array('d', itertools.repeat(0.0, 10))
self.datainf = array.array('d', [math.inf] * 10)
self.datanan = array.array('d', [math.nan] * 10)
self.dataninf = array.array('d', [-math.inf] * 10)
########################################################
def test_sinh_outputarray(self):
"""Test sinh for data of inf with matherrors checking on and single parameter functions - Array code d.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.datainf, self.dataout)
########################################################
def test_sinh_inplace(self):
"""Test sinh in place for data of inf with matherrors checking on and single parameter functions - Array code d.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.datainf)
########################################################
def test_sinh_ov_outputarray(self):
"""Test sinh for data of inf with matherrors checking off and single parameter functions - Array code d.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.datainf]
# This is the actual test.
arrayfunc.sinh(self.datainf, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_ov_inplace(self):
"""Test sinh in place for data of inf with matherrors checking off and single parameter functions - Array code d.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.datainf]
# This is the actual test.
arrayfunc.sinh(self.datainf, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.datainf), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
##############################################################################
class sinh_nandata_exceptions_ninf_f(unittest.TestCase):
"""Test for basic general function operation.
nan_data_errorchecked_noparam_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%0.3f != %0.3f' % (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataout = array.array('f', itertools.repeat(0.0, 10))
self.datainf = array.array('f', [math.inf] * 10)
self.datanan = array.array('f', [math.nan] * 10)
self.dataninf = array.array('f', [-math.inf] * 10)
########################################################
def test_sinh_outputarray(self):
"""Test sinh for data of -inf with matherrors checking on and single parameter functions - Array code f.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.dataninf, self.dataout)
########################################################
def test_sinh_inplace(self):
"""Test sinh in place for data of -inf with matherrors checking on and single parameter functions - Array code f.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.dataninf)
########################################################
def test_sinh_ov_outputarray(self):
"""Test sinh for data of -inf with matherrors checking off and single parameter functions - Array code f.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.dataninf]
# This is the actual test.
arrayfunc.sinh(self.dataninf, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_ov_inplace(self):
"""Test sinh in place for data of -inf with matherrors checking off and single parameter functions - Array code f.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.dataninf]
# This is the actual test.
arrayfunc.sinh(self.dataninf, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.dataninf), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
##############################################################################
class sinh_nandata_exceptions_ninf_d(unittest.TestCase):
"""Test for basic general function operation.
nan_data_errorchecked_noparam_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%0.3f != %0.3f' % (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataout = array.array('d', itertools.repeat(0.0, 10))
self.datainf = array.array('d', [math.inf] * 10)
self.datanan = array.array('d', [math.nan] * 10)
self.dataninf = array.array('d', [-math.inf] * 10)
########################################################
def test_sinh_outputarray(self):
"""Test sinh for data of -inf with matherrors checking on and single parameter functions - Array code d.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.dataninf, self.dataout)
########################################################
def test_sinh_inplace(self):
"""Test sinh in place for data of -inf with matherrors checking on and single parameter functions - Array code d.
"""
with self.assertRaises(ArithmeticError):
arrayfunc.sinh(self.dataninf)
########################################################
def test_sinh_ov_outputarray(self):
"""Test sinh for data of -inf with matherrors checking off and single parameter functions - Array code d.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.dataninf]
# This is the actual test.
arrayfunc.sinh(self.dataninf, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_sinh_ov_inplace(self):
"""Test sinh in place for data of -inf with matherrors checking off and single parameter functions - Array code d.
"""
# Calculate the expected result.
expected = [math.sinh(x) for x in self.dataninf]
# This is the actual test.
arrayfunc.sinh(self.dataninf, matherrors=True)
for dataoutitem, expecteditem in zip(list(self.dataninf), expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
##############################################################################
if __name__ == '__main__':
# Check to see if the log file option has been selected. This is an option
# which we have added in order to decide where to output the results.
if '-l' in sys.argv:
# Remove the option from the argument list so that "unittest" does
# not complain about unknown options.
sys.argv.remove('-l')
with open('af_unittest.txt', 'a') as f:
f.write('\n\n')
f.write('sinh\n\n')
trun = unittest.TextTestRunner(f)
unittest.main(testRunner=trun)
else:
unittest.main()
##############################################################################
| 34.483955
| 117
| 0.606464
| 4,582
| 39,760
| 5.19402
| 0.064164
| 0.039329
| 0.045716
| 0.018824
| 0.953822
| 0.950502
| 0.950334
| 0.947351
| 0.943065
| 0.942897
| 0
| 0.008887
| 0.136796
| 39,760
| 1,152
| 118
| 34.513889
| 0.684537
| 0.342153
| 0
| 0.90932
| 0
| 0
| 0.011388
| 0
| 0
| 0
| 0
| 0
| 0.186398
| 1
| 0.191436
| false
| 0.020151
| 0.02267
| 0
| 0.239295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a186eae1d53e7961c6c0754194611f908b16f26
| 12,007
|
py
|
Python
|
tests/test_ion_balance.py
|
chummels/trident_test
|
259339b1d46f96565d862ad8f11d6d7970a2f73d
|
[
"BSD-3-Clause-Clear"
] | 14
|
2017-09-14T20:29:38.000Z
|
2022-03-12T11:26:48.000Z
|
tests/test_ion_balance.py
|
chummels/trident_test
|
259339b1d46f96565d862ad8f11d6d7970a2f73d
|
[
"BSD-3-Clause-Clear"
] | 129
|
2017-09-20T22:06:36.000Z
|
2022-02-23T20:21:32.000Z
|
tests/test_ion_balance.py
|
chummels/trident_test
|
259339b1d46f96565d862ad8f11d6d7970a2f73d
|
[
"BSD-3-Clause-Clear"
] | 21
|
2017-09-14T22:22:35.000Z
|
2022-03-12T11:26:57.000Z
|
"""
Tests for ion balance code
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2016, Trident Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#-----------------------------------------------------------------------------
from trident.ion_balance import \
add_ion_fraction_field, \
add_ion_number_density_field, \
add_ion_density_field, \
add_ion_mass_field, \
add_ion_fields
from yt import \
load, \
SlicePlot
from yt.testing import \
fake_random_ds, \
fake_amr_ds
import tempfile
import shutil
from trident.testing import \
answer_test_data_dir, \
assert_array_rel_equal
import os
import numpy as np
ISO_GALAXY = os.path.join(answer_test_data_dir,
'IsolatedGalaxy/galaxy0030/galaxy0030')
FIRE_SIM = os.path.join(answer_test_data_dir,
'FIRE_M12i_ref11/snapshot_600.hdf5')
def test_add_ion_fraction_field_to_grid_ds():
"""
Test to add various ion fields
"""
ds = fake_random_ds(8, fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units= ('g/cm**3', 'cm/s', 'cm/s',
'cm/s', 'K', ''))
ad = ds.all_data()
add_ion_fraction_field('O', 6, ds)
field = ('gas', 'O_p5_ion_fraction')
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
dirpath = tempfile.mkdtemp()
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_number_density_field_to_grid_ds():
"""
Test to add various ion fields
"""
ds = fake_random_ds(8, fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units= ('g/cm**3', 'cm/s', 'cm/s',
'cm/s', 'K', ''))
ad = ds.all_data()
add_ion_mass_field('O', 6, ds)
field = ('gas', 'O_p5_number_density')
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
dirpath = tempfile.mkdtemp()
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_density_field_to_grid_ds():
"""
Test to add various ion fields
"""
ds = fake_random_ds(8, fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units= ('g/cm**3', 'cm/s', 'cm/s',
'cm/s', 'K', ''))
ad = ds.all_data()
add_ion_mass_field('O', 6, ds)
field = ('gas', 'O_p5_density')
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
dirpath = tempfile.mkdtemp()
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_mass_field_to_grid_ds():
"""
Test to add various ion fields
"""
ds = fake_random_ds(8, fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units= ('g/cm**3', 'cm/s', 'cm/s',
'cm/s', 'K', ''))
ad = ds.all_data()
add_ion_mass_field('O', 6, ds, ftype='gas')
field = ('gas', 'O_p5_mass')
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
dirpath = tempfile.mkdtemp()
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_fraction_fields_to_amr_ds():
"""
Test to add various ion fields
"""
ds = fake_amr_ds(fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units=('g/cm**3', 'cm/s', 'cm/s', 'cm/s', 'K', ''))
ad = ds.all_data()
add_ion_fraction_field('O', 6, ds)
field = ('gas', 'O_p5_ion_fraction')
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
dirpath = tempfile.mkdtemp()
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_number_density_fields_to_amr_ds():
"""
Test to add various ion fields
"""
ds = fake_amr_ds(fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units=('g/cm**3', 'cm/s', 'cm/s', 'cm/s', 'K', ''))
ad = ds.all_data()
add_ion_number_density_field('O', 6, ds)
field = ('gas', 'O_p5_number_density')
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
dirpath = tempfile.mkdtemp()
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_density_fields_to_amr_ds():
"""
Test to add various ion fields
"""
ds = fake_amr_ds(fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units=('g/cm**3', 'cm/s', 'cm/s', 'cm/s', 'K', ''))
ad = ds.all_data()
add_ion_density_field('O', 6, ds)
field = ('gas', 'O_p5_density')
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
dirpath = tempfile.mkdtemp()
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_mass_fields_to_amr_ds():
"""
Test to add various ion fields
"""
ds = fake_amr_ds(fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units=('g/cm**3', 'cm/s', 'cm/s', 'cm/s', 'K', ''))
ad = ds.all_data()
add_ion_mass_field('O', 6, ds)
field = ('gas', 'O_p5_mass')
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
dirpath = tempfile.mkdtemp()
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_fields_to_grid_ds():
"""
Test to add various ion fields
"""
ds = fake_random_ds(8, fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units= ('g/cm**3', 'cm/s', 'cm/s',
'cm/s', 'K', ''))
ftype = 'gas'
ad = ds.all_data()
ions = ['H', 'O', 'N V']
add_ion_fields(ds, ions)
fields = ['H_p0_ion_fraction', 'H_p0_number_density', 'O_p5_mass', 'N_p4_density']
# Assure that a sampling of fields are added and can be sliced
dirpath = tempfile.mkdtemp()
for field in fields:
field = (ftype, field)
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_all_ion_fields_to_grid_ds():
"""
Test to add various ion fields
"""
ds = fake_random_ds(8, fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units= ('g/cm**3', 'cm/s', 'cm/s',
'cm/s', 'K', ''))
ftype = 'gas'
ad = ds.all_data()
add_ion_fields(ds, 'all')
fields = ['H_p0_ion_fraction', 'H_p0_number_density', 'O_p5_mass', 'N_p4_density']
# Assure that a sampling of fields are added and can be sliced
dirpath = tempfile.mkdtemp()
for field in fields:
field = (ftype, field)
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_all_ion_fields_to_grid_ds_from_file():
"""
Test to add various ion fields
"""
ds = fake_random_ds(8, fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units= ('g/cm**3', 'cm/s', 'cm/s',
'cm/s', 'K', ''))
ftype = 'gas'
ad = ds.all_data()
add_ion_fields(ds, 'all', ftype=ftype, line_database='lines.txt')
fields = ['H_p0_ion_fraction', 'H_p0_number_density', 'O_p5_mass', 'N_p4_density']
# Assure that a sampling of fields are added and can be sliced
dirpath = tempfile.mkdtemp()
for field in fields:
field = (ftype, field)
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_all_ion_fields_to_amr_ds():
"""
Test to add various ion fields
"""
ds = fake_amr_ds(fields=("density", "velocity_x", "velocity_y",
"velocity_z", "temperature", "metallicity"),
units=('g/cm**3', 'cm/s', 'cm/s', 'cm/s', 'K', ''))
ftype = 'gas'
ad = ds.all_data()
ions = ['H', 'O', 'N V']
add_ion_fields(ds, ions, ftype=ftype)
fields = ['H_p0_ion_fraction', 'H_p0_number_density', 'O_p5_mass', 'N_p4_density']
# Assure that a sampling of fields are added and can be sliced
dirpath = tempfile.mkdtemp()
for field in fields:
field = (ftype, field)
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_fields_to_enzo():
"""
Test to add various ion fields to Enzo dataset and slice on them
"""
ds = load(ISO_GALAXY)
add_ion_fields(ds, ['H', 'O VI'], ftype='gas')
ad = ds.all_data()
fields = ['H_p0_number_density', 'O_p5_density']
# Assure that a sampling of fields are added and can be sliced
dirpath = tempfile.mkdtemp()
for field in fields:
field = ('gas', field)
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_add_ion_fields_to_gizmo():
"""
Test to add various ion fields to gizmo dataset and slice on them
"""
ds = load(FIRE_SIM)
add_ion_fields(ds, ['H', 'O VI'], ftype='PartType0')
ad = ds.all_data()
fields = ['H_p0_ion_fraction', 'O_p5_mass']
# Assure that a sampling of fields are added and can be sliced
dirpath = tempfile.mkdtemp()
for field in fields:
field = ('gas', field)
assert field in ds.derived_field_list
assert isinstance(ad[field], np.ndarray)
SlicePlot(ds, 'x', field).save(dirpath)
shutil.rmtree(dirpath)
def test_ion_fraction_field_is_from_on_disk_fields():
"""
Test to add various ion fields to Enzo dataset and slice on them
"""
ds = load(ISO_GALAXY)
add_ion_fields(ds, ['H'], ftype='gas')
ad = ds.all_data()
# Assure that a sampling of fields are added and can be sliced
arr1 = ad['H_p0_ion_fraction']
arr2 = ad['H_p0_number_density'] / ad['H_nuclei_density']
assert_array_rel_equal(arr1, arr2, decimals=15)
def test_to_not_overwrite_fields_for_grid():
"""
Test to not overwrite an existing ion field
"""
ds = load(ISO_GALAXY)
val_before = ds.r['H_p0_number_density'][0]
add_ion_fields(ds, ['H'], ftype='gas')
val_after = ds.r['H_p0_number_density'][0]
assert val_before == val_after
def test_to_not_overwrite_fields_for_particle():
"""
Test to not overwrite an existing ion field
"""
ds = load(FIRE_SIM)
val_sph_before = ds.r[('PartType0', 'H_p0_number_density')][0]
val_gas_before = ds.r[('gas', 'H_p0_number_density')][0]
add_ion_fields(ds, ['H'], ftype='PartType0')
val_sph_after = ds.r[('PartType0', 'H_p0_number_density')][0]
val_gas_after = ds.r[('gas', 'H_p0_number_density')][0]
assert val_sph_before == val_sph_after
assert val_gas_before == val_gas_after
| 35.949102
| 86
| 0.593237
| 1,631
| 12,007
| 4.106683
| 0.090129
| 0.016124
| 0.017916
| 0.021499
| 0.868618
| 0.858167
| 0.853688
| 0.81487
| 0.805912
| 0.803822
| 0
| 0.011076
| 0.255601
| 12,007
| 333
| 87
| 36.057057
| 0.738308
| 0.11993
| 0
| 0.726496
| 0
| 0
| 0.168918
| 0.006706
| 0
| 0
| 0
| 0
| 0.141026
| 1
| 0.07265
| false
| 0
| 0.034188
| 0
| 0.106838
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a54f9beebcceeec131c8c899850a5c39b0eb9ef
| 42,261
|
py
|
Python
|
compiler/dfg.py
|
omareddash/dnnweaverclone
|
a803ffb1f52e23595cedb8a6dc095c881d2c62ff
|
[
"Apache-2.0"
] | 7
|
2019-04-06T06:33:11.000Z
|
2021-10-02T08:17:35.000Z
|
compiler/dfg.py
|
omareddash/dnnweaverclone
|
a803ffb1f52e23595cedb8a6dc095c881d2c62ff
|
[
"Apache-2.0"
] | 1
|
2020-01-07T17:09:16.000Z
|
2020-01-07T17:09:16.000Z
|
compiler/dfg.py
|
omareddash/dnnweaverclone
|
a803ffb1f52e23595cedb8a6dc095c881d2c62ff
|
[
"Apache-2.0"
] | 4
|
2019-04-06T06:33:12.000Z
|
2021-04-05T19:17:27.000Z
|
# from network import LayerNode, Convolution
import sys
from math import ceil, log, floor
from collections import deque
INPUT_SHARING = False
class DFG:
def __init__(self, conv, hardware):
self.hardware = hardware
total_bram_capacity = self.hardware["resources"]["num_bram"] * self.hardware["resources"]["memory_per_bram"]
self.memory = total_bram_capacity
self.compute_config = hardware["config"]
def schedule(self, layer):
# print
from network import Convolution, Normalization
from network import Pooling
if isinstance(layer, Convolution):
tmp = self.conv_schedule(layer)
# print tmp
return tmp
if isinstance(layer, Normalization):
print "Scheduling Normalization"
# exit(-1)
tmp = self.norm_schedule(layer)
# print tmp
return tmp
# elif isinstance(layer, Pooling):
# return [[1, 1, 1], 0]
else:
print "Unknown Layer"
return [[1, 1, 1, 1, 1, 1], 0]
# sys.exit(-1)
def conv_schedule(self, conv):
from network import Convolution
assert isinstance(conv, Convolution)
prev_layer_params = conv.prev_layer.params
self.input_width = prev_layer_params["size_x"]
self.input_height = prev_layer_params["size_y"]
self.input_channels = prev_layer_params["output_channels"]
self.output_channels = conv.params["output_channels"]
self.kernel_width = conv.params["kernel_size"]
self.kernel_height = conv.params["kernel_size"]
od = conv.get_output_dimensions()
print "OUTPUT DIMENSIONS ARE ------------ {0}".format(od)
self.output_width = od[0]
self.output_height = od[1]
self.pad_x = conv.params["pad_x"]
self.pad_y = conv.params["pad_y"]
self.stride_x = conv.params["stride_x"]
self.stride_y = conv.params["stride_y"]
[input_block, output_block] = self.smart_force(conv)
print "Obtained the following config - Input : {0}, output - {1}".format(input_block, output_block)
# print "Input partition = {0}".format(input_partition)
# print "Output partition = {0}".format(output_partition)
conv.set_data_partition(input_block, output_block)
# config = self.brute_force()
# penalty = self.get_penalty_print(config)
penalty = self.get_penalty_print(input_block, output_block, conv)
# penalty = 0
# print "Scheduling CONV"
# print "Min Penalty = {0:,}".format(penalty)
# print [config, penalty]
conv.set_memory_accesses(penalty)
# exit(-1)
return [input_block, output_block, penalty]
def norm_schedule(self, norm):
from network import Normalization
assert isinstance(norm, Normalization)
prev_layer_params = norm.prev_layer.params
self.input_width = prev_layer_params["size_x"]
self.input_height = prev_layer_params["size_y"]
self.input_channels = prev_layer_params["output_channels"]
self.output_channels = norm.params["output_channels"]
self.norm_type = norm.norm_type
self.kernel_width = norm.params["kernel_size"]
if (self.norm_type == "within_channel"):
self.kernel_height = norm.params["kernel_size"]
else:
self.kernel_height = 1
od = norm.get_output_dimensions()
print "OUTPUT DIMENSIONS ARE ------------ {0}".format(od)
self.output_width = od[0]
self.output_height = od[1]
self.pad_x = norm.params["pad_x"]
self.pad_y = norm.params["pad_y"]
self.stride_x = norm.params["stride_x"]
self.stride_y = norm.params["stride_y"]
[input_block, output_block] = self.smart_force_norm(norm)
# exit(-1)
print "Obtained the following config - Input : {0}, output - {1}".format(input_block, output_block)
# print "Input partition = {0}".format(input_partition)
# print "Output partition = {0}".format(output_partition)
# norm.set_data_partition(input_block, output_block)
# config = self.brute_force()
# penalty = self.get_penalty_print(config)
penalty = self.get_penalty_print_norm(input_block, output_block, norm)
# penalty = 0
# print "Scheduling norm"
# print "Min Penalty = {0:,}".format(penalty)
# print [config, penalty]
norm.set_memory_accesses(penalty)
# exit(-1)
return [input_block, output_block, penalty]
def pool_schedule(self, pool):
from network import Pooling
assert isinstance(pool, Pooling)
prev_layer_params = pool.prev_layer.params
# self.input_width = prev_layer_params["size_x"]
# self.input_height = prev_layer_params["size_y"]
# self.input_channels = prev_layer_params["output_channels"]
# self.output_channels = pool.params["output_channels"]
# self.kernel_width = pool.params["kernel_size"]
# self.kernel_height = pool.params["kernel_size"]
# od = pool.get_output_dimensions()
# self.output_width = od[0]
# self.output_height = od[1]
#
# config = self.smart_force()
# config = self.brute_force()
# penalty = self.get_penalty_print(config)
print "Min Penalty = {0:,}".format(0)
def get_max_width(self, id, od, oh_min):
on_chip_memory = self.memory / 8
print "Total on-chip memory = {0}".format(on_chip_memory)
print "Compute Config = {0}".format(self.compute_config)
# memory_per_pu = int(floor(float(on_chip_memory) / self.compute_config[2]))
memory_per_pu = self.compute_config[0] * self.hardware["resources"]["memory_per_bram"] / 8
print "Memory per BRAM = {0:,}".format(self.hardware["resources"]["memory_per_bram"])
print "Memory per PU = {0:,} Bytes".format(memory_per_pu)
kw = self.kernel_width
kh = self.kernel_height
# ih = kh
ih = 1
for ow in range(self.compute_config[0], self.output_width + 1, self.compute_config[0]):
memory_for_output = int(ceil(float(ow) / self.compute_config[0]) * self.compute_config[0]) * oh_min * od * \
self.hardware["data"]["bytes_per_element"]
# iw = (ow - 1) * self.stride_x + kw
# iw = 2 * self.compute_config[0]
iw = 0
memory_for_input = int(ceil(float(iw) / self.compute_config[0]) * self.compute_config[0]) * kh * id
# print "Memory for input = {0}".format(memory_for_input)
# memory_for_input = 0
if memory_for_input + memory_for_output > memory_per_pu:
return ow - self.compute_config[0]
return self.output_width
def get_max_height(self, id, od, ow_max):
on_chip_memory = self.memory / 8
print "Total on-chip memory = {0}".format(on_chip_memory)
print "Compute Config = {0}".format(self.compute_config)
# memory_per_pu = int(floor(float(on_chip_memory) / self.compute_config[2]))
memory_per_pu = self.compute_config[0] * self.hardware["resources"]["memory_per_bram"] / 8
print "Memory per PU = {0:,} Bytes".format(memory_per_pu)
kw = self.kernel_width
kh = self.kernel_height
iw = (ow_max - 1) * self.stride_x + kw
for oh in range(1, self.output_height + 1):
memory_for_output = int(ceil(float(ow_max) / self.compute_config[0]) * self.compute_config[0]) * oh * od * \
self.hardware["data"]["bytes_per_element"]
# ih = (oh - 1) * self.stride_y + kh
ih = 1
# memory_for_input = iw * ih * id
memory_for_input = 0
if memory_for_input + memory_for_output > memory_per_pu:
return oh - 1
return self.output_height
def get_max_output_channels(self, id, ow_max, oh_max):
on_chip_memory = self.memory / 8
print "Total on-chip memory = {0}".format(on_chip_memory)
print "Compute Config = {0}".format(self.compute_config)
# memory_per_pu = int(floor(float(on_chip_memory) / self.compute_config[2]))
memory_per_pu = self.compute_config[0] * self.hardware["resources"]["memory_per_bram"] / 8
print "Memory per PU = {0:,} Bytes".format(memory_per_pu)
memory_per_output = int(ceil(float(ow_max)/self.compute_config[0])*self.compute_config[0]) * oh_max
memory_per_input = int(ceil(float(self.input_width)/self.compute_config[0])*self.compute_config[0]) * self.input_height * id
if memory_per_input > memory_per_pu:
print "Can't fit input feature map"
return 1
od = int(floor(float(memory_per_pu - memory_per_input) / memory_per_output))
print "Output Channels = {0}".format(od)
return od
def smart_force(self, conv):
# TODO : No sharing of inputs
penalty = None
best_ow = None
best_oh = None
best_od = None
best_iw = None
best_ih = None
best_id = None
id = self.input_channels
od = self.output_channels
# oh_max = self.get_max_height()
ow_max = self.get_max_width(id, od, 1)
print "Max width that can fit in PU = {0}".format(ow_max)
oh_max = self.get_max_height(id, od, ow_max)
print "Max height that can fit in PU = {0}".format(oh_max)
if ow_max == self.output_width and oh_max == self.output_height:
print "Can fit entire CONV into FPGA"
best_iw = self.input_width
best_ih = self.input_height
best_id = self.input_channels
best_ow = self.output_width
best_oh = self.output_height
best_od = self.output_channels
kernel_h_next_layer = 0
oh = oh_max
else:
print "Can't fit entire CONV into FPGA"
print "Dividing CONV into partitions"
print "Testing with small partition"
# Find Kernel Height for next layer
curr = conv.next_layer
from network import Convolution, InnerProduct, Pooling, Normalization
while not (isinstance(curr, Convolution) or
isinstance(curr, InnerProduct) or
isinstance(curr, Pooling) or
isinstance(curr, Normalization) or
curr is None):
curr = curr.next_layer
# curr.print_layer(self.hardware)
if curr is None or isinstance(curr, InnerProduct):
kernel_h_next_layer = 0
else:
kernel_h_next_layer = curr.params["kernel_size"] - 1
ow_max = self.get_max_width(1, 1, kernel_h_next_layer+1)
print "Max width that can fit in PU = {0}".format(ow_max)
oh_max = self.get_max_height(1, 1, ow_max)
print "Max height that can fit in PU = {0}".format(oh_max)
if ow_max < self.output_width:
print "ERROR Cant fit entire Width of output"
# exit(-1)
else:
ow = ow_max
oh = min(int(ceil(self.output_height / ceil(
float(self.output_height) / (oh_max - kernel_h_next_layer)))) + kernel_h_next_layer, oh_max)
print "Using max height of {0}".format(oh)
print "Need to do redundant computations : {0}".format(kernel_h_next_layer * ow_max)
# Introduce batches
num_batches = 1
# oh += kernel_h_next_layer
ow = ow_max
id = 1
# od = self.get_max_output_channels(id, ow, oh)
# if (od < 1):
# # print "Less than one OD"
# # exit(-1)
# od = 1
# od = 1
iw = (ow - 1) * self.stride_x + self.kernel_width
ih = (oh - 1) * self.stride_y + self.kernel_height
input_block = [iw, ih, id, num_batches]
output_block = [ow, oh, od, num_batches]
tmp = self.get_penalty_print(input_block, output_block, conv)
best_ow = ow
best_oh = oh
best_od = od
best_iw = iw
best_ih = ih
best_id = 1
input_block = [best_iw, best_ih, best_id, num_batches]
output_block = [best_ow, best_oh, best_od, num_batches]
# exit(-1)
# print "log ({0}) = {1}".format(oh_max, int(ceil(log(oh_max, 2))))
# # sys.exit()
# for oh_step in xrange(int(ceil(log(oh_max, 2)))):
# print oh_step, int(ceil(log(oh_max, 2)))
# oh = oh_max / (2 ** oh_step)
# print oh
# # for oh in xrange (math)
# ih = (oh - 1) * self.stride_y + self.kernel_height
# parallelism = 0
# # print "Output height = {0}".format(oh)
# for od in xrange(1, 1 + self.output_channels):
# for id in xrange(1, 1 + self.input_channels):
# # ow = self.get_max_width(id, oh, od)
# ow = oh
# # if oh < ow:
# # [oh, ow] = [ow, oh]
# # ih = (oh - 1) * self.stride_y + self.kernel_height
# iw = (ow - 1) * self.stride_x + self.kernel_width
# tmp = self.get_penalty([ow, oh, od, iw, ih, id])
# curr_parallelism = ow * od * oh
# if (tmp is not None and penalty >= tmp and (parallelism < curr_parallelism or (
# parallelism == curr_parallelism and id > best_id))) or penalty is None:
# parallelism = curr_parallelism
# best_ow = ow
# best_oh = oh
# best_od = od
# best_iw = iw
# best_ih = ih
# best_id = id
# penalty = tmp
#
# exit(-1)
return [input_block, output_block]
# return [1, 1, 1, 1, 1, 1]
def smart_force_norm(self, norm):
# TODO : No sharing of inputs
penalty = None
best_ow = None
best_oh = None
best_od = None
best_iw = None
best_ih = None
best_id = None
id = self.input_channels
od = self.output_channels
# oh_max = self.get_max_height()
ow_max = self.get_max_width(id, od, 1)
print "Max width that can fit in PU = {0}".format(ow_max)
oh_max = self.get_max_height(id, od, ow_max)
print "Max height that can fit in PU = {0}".format(oh_max)
if ow_max == self.output_width and oh_max == self.output_height:
print "Can fit entire Norm into FPGA"
best_iw = self.input_width
best_ih = self.input_height
best_id = self.input_channels
best_ow = self.output_width
best_oh = self.output_height
best_od = self.output_channels
kernel_h_next_layer = 0
oh = oh_max
else:
print "Can't fit entire Norm into FPGA"
print "Dividing Norm into partitions"
print "Testing with small partition"
# Find Kernel Height for next layer
curr = norm.next_layer
from network import Convolution, InnerProduct, Pooling, Normalization
while not (isinstance(curr, Convolution) or
isinstance(curr, InnerProduct) or
isinstance(curr, Pooling) or
isinstance(curr, Normalization) or
curr is None):
curr = curr.next_layer
# curr.print_layer(self.hardware)
if curr is None or isinstance(curr, InnerProduct):
kernel_h_next_layer = 0
else:
kernel_h_next_layer = curr.params["kernel_size"] - 1
ow_max = self.get_max_width(1, 1, kernel_h_next_layer+1)
print "Max width that can fit in PU = {0}".format(ow_max)
print "Kernel Next = {0}".format(kernel_h_next_layer)
oh_max = self.get_max_height(1, 1, ow_max)
print "Max height that can fit in PU = {0}".format(oh_max)
if ow_max < self.output_width:
print "ERROR Cant fit entire Width of output"
# exit(-1)
else:
ow = ow_max
# oh = min(int(ceil(self.output_height / ceil(
# float(self.output_height) / (oh_max - kernel_h_next_layer)))) + kernel_h_next_layer, oh_max)
oh = oh_max
print "Using max height of {0}".format(oh)
# print "Need to do redundant computations : {0}".format(kernel_h_next_layer * ow_max)
# Introduce batches
num_batches = 1
# oh += kernel_h_next_layer
ow = ow_max
id = 1
# od = self.get_max_output_channels(id, ow, oh)
# if (od < 1):
# # print "Less than one OD"
# # exit(-1)
# od = 1
# od = 1
iw = min(ow + self.kernel_width - 1, self.input_width)
ih = min(oh + self.kernel_height - 1, self.input_height)
input_block = [iw, ih, id, num_batches]
output_block = [ow, oh, od, num_batches]
print "Input block = {0}\n Output block = {1}".format(input_block, output_block)
# exit(-1)
tmp = self.get_penalty_print_norm(input_block, output_block, norm)
# exit(-1)
best_ow = ow
best_oh = oh
best_od = od
best_iw = iw
best_ih = ih
best_id = 1
input_block = [best_iw, best_ih, best_id, num_batches]
output_block = [best_ow, best_oh, best_od, num_batches]
# exit(-1)
# print "log ({0}) = {1}".format(oh_max, int(ceil(log(oh_max, 2))))
# # sys.exit()
# for oh_step in xrange(int(ceil(log(oh_max, 2)))):
# print oh_step, int(ceil(log(oh_max, 2)))
# oh = oh_max / (2 ** oh_step)
# print oh
# # for oh in xrange (math)
# ih = (oh - 1) * self.stride_y + self.kernel_height
# parallelism = 0
# # print "Output height = {0}".format(oh)
# for od in xrange(1, 1 + self.output_channels):
# for id in xrange(1, 1 + self.input_channels):
# # ow = self.get_max_width(id, oh, od)
# ow = oh
# # if oh < ow:
# # [oh, ow] = [ow, oh]
# # ih = (oh - 1) * self.stride_y + self.kernel_height
# iw = (ow - 1) * self.stride_x + self.kernel_width
# tmp = self.get_penalty([ow, oh, od, iw, ih, id])
# curr_parallelism = ow * od * oh
# if (tmp is not None and penalty >= tmp and (parallelism < curr_parallelism or (
# parallelism == curr_parallelism and id > best_id))) or penalty is None:
# parallelism = curr_parallelism
# best_ow = ow
# best_oh = oh
# best_od = od
# best_iw = iw
# best_ih = ih
# best_id = id
# penalty = tmp
#
# exit(-1)
return [input_block, output_block]
# return [1, 1, 1, 1, 1, 1]
# def brute_force(self):
# penalty = None
# best_ow = None
# best_oh = None
# best_od = None
# best_id = None
# for ow in xrange(1, 1 + self.output_width):
# print ow
# for oh in xrange(1, 1 + self.output_height):
# for od in xrange(1, 1 + self.output_channels):
# for id in xrange(1, 1 + self.input_channels):
# tmp = self.get_penalty([ow, oh, od, id])
# if ((tmp is not None and penalty > tmp) or penalty is None):
# best_ow = ow
# best_oh = oh
# best_od = od
# best_id = id
# penalty = tmp
# return [best_ow, best_oh, best_od, best_id]
# def get_penalty(self, config):
# # [bo_w, bo_h, bo_d, bi_d] = config
# [bo_w, bo_h, bo_d, x, y, bi_d] = config
# iw = self.input_width
# ih = self.input_height
# ni = self.input_channels
# no = self.output_channels
# kw = self.kernel_width
# kh = self.kernel_height
# ow = self.output_width
# oh = self.output_height
# iw += 2 * self.pad_x
#
# stride_h = self.stride_y
# stride_w = self.stride_x
#
# compute_config = self.compute_config
# # print "Compute config = {0}".format(self.compute_config)
#
# # print "Input FM width = {0}".format(iw)
# # print "Input FM height = {0}".format(ih)
# # print "Input FM = {0}".format(ni)
# # print "Kernel Width = {0}".format(kw)
# # print "Kernel height = {0}".format(kh)
# # print "Output height = {0}".format(ow)
# # print "Output width = {0}".format(oh)
# # print "Output FM = {0}".format(no)
#
# # print
# # print "Partitioning Input data into sub-sets"
#
# on_chip_memory = self.memory
# # print "On-chip Memory = {0}".format(on_chip_memory)
#
# output_ribbon = [bo_w, bo_h, bo_d]
# # input_ribbon = [output_ribbon[0] + kw - 1, output_ribbon[1] + kh - 1, bi_d]
# input_ribbon = [(output_ribbon[0] - 1) * stride_w + kw, (output_ribbon[1] - 1) * stride_h + kh, bi_d]
# w_steps = int(ceil(float(ow) / output_ribbon[0]))
# h_steps = int(ceil(float(oh) / output_ribbon[1]))
# od_steps = int(ceil(float(no) / output_ribbon[2]))
# id_steps = int(ceil(float(ni) / input_ribbon[2]))
#
# # print "Input Block size = {0} x {1} x {2}".format(input_ribbon[0], input_ribbon[1], input_ribbon[2])
# # print "Input Num Blocks = {0} x {1} x {2}".format(w_steps, h_steps, id_steps)
# # print "Output Block size = {0} x {1} x {2}".format(output_ribbon[0], output_ribbon[1], output_ribbon[2])
# # print "Output Num Blocks = {0} x {1} x {2}".format(w_steps, h_steps, od_steps)
#
# # print "Weight blocks = {0} x {1} x {2} x {3}".format(kw, kh, input_ribbon[2], output_ribbon[2])
#
# memory_input = input_ribbon[0] * input_ribbon[1] * input_ribbon[2]
# memory_weight = kw * kh * input_ribbon[2] * output_ribbon[2]
# memory_output = on_chip_memory - memory_input - memory_weight
#
# if (memory_output < output_ribbon[0] * output_ribbon[1] * output_ribbon[2]):
# # print "Error:Memory size < output"
# return None
#
# # print
# # print "Parallelism = {0:,}".format(output_ribbon[0] * output_ribbon[1] * output_ribbon[2])
# # print "Memory for input = {0:,}".format(memory_input)
# # print "Memory for weights = {0:,}".format(memory_weight)
# # print "Memory for outputs = {0:,}".format(memory_output)
#
# penalty = 0
# total_weight_accesses = 0
# total_partial_output_accesses = 0
# total_input_accesses = 0
#
# bi_w = input_ribbon[0]
# bi_h = input_ribbon[1]
# bi_d = input_ribbon[2]
#
# bo_w = output_ribbon[0]
# bo_h = output_ribbon[1]
# bo_d = output_ribbon[2]
#
# ribbon_penalty = iw * bi_h * bi_d
# ribbon_overlap = max(bi_w * (kh - 1 - stride_h) * bi_d, 0)
#
# # print "Ribbon Data = {0}\nRibbon Overlap = {1}".format((h_steps - 1) * ribbon_penalty, (h_steps - 1) * ribbon_overlap)
#
# bottom_ribbon_penalty = ((oh - 1) * stride_h - bo_h * (h_steps - 1) + kh) * iw * bi_d
#
# # print "Bottom ribbon dimensions : {0} x {1} x {2}".format((oh-1)*stride_h - bo_h * (h_steps - 1) + kh, iw, bi_d)
#
# # print "Bottom ribbon penalty = {0}".format(bottom_ribbon_penalty)
#
# partition_input_accesses = max((h_steps - 1) * ribbon_penalty, 0) + bottom_ribbon_penalty \
# - max((h_steps - 1) * ribbon_overlap, 0)
# # print "Partition Input accesses = {0}".format(partition_input_accesses)
#
# partition_weight_accesses = kw * kh * ni * no
#
# partition_output_access = max(oh * ow * no - memory_output, 0)
# formula_output_penalty = (id_steps - 1) * partition_output_access
# # print "Formula Output penalty = {0}".format(formula_output_penalty)
# penalty += formula_output_penalty
# total_weight_accesses += partition_weight_accesses
# total_partial_output_accesses += formula_output_penalty
# penalty += partition_weight_accesses
#
# partition_input_overlap = bi_w * bi_h * bi_d
# total_input_accesses += (od_steps * partition_input_accesses - (
# od_steps - 1) * partition_input_overlap) * id_steps
# penalty += (od_steps * partition_input_accesses - (od_steps - 1) * partition_input_overlap) * id_steps
#
# # print
# # print "Inputs accessed = {0:,}".format(total_input_accesses)
# # print "Weights accessed = {0:,}".format(total_weight_accesses)
# # print "Outputs accessed = {0:,}".format(total_partial_output_accesses)
# # print "Total DRAM Accesses = {0:,}".format(penalty)
#
# # print
# # print "Total Penalty = {0:,}".format(penalty)
# # actual_compute_cycles = ow * oh * no * kw * kh * ni
# # print "Compute Cycles = {0:,}".format(actual_compute_cycles)
# return penalty
def get_penalty_print(self, input_block, output_block, conv):
[bi_w, bi_h, bi_d, num_batch] = input_block
[bo_w, bo_h, bo_d, num_batch] = output_block
iw = self.input_width
ih = self.input_height
ni = self.input_channels
no = self.output_channels
kw = self.kernel_width
kh = self.kernel_height
ow = self.output_width
oh = self.output_height
iw += 2 * self.pad_x
print "*" * 50
print "Getting DRAM accesses"
print "*" * 50
stride_h = self.stride_y
stride_w = self.stride_x
print "Input FM width = {0}".format(iw)
print "Input FM height = {0}".format(ih)
print "Input FM = {0}".format(ni)
print "Kernel Width = {0}".format(kw)
print "Kernel height = {0}".format(kh)
print "Output width = {0}".format(oh)
print "Output height = {0}".format(ow)
print "Output FM = {0}".format(no)
on_chip_memory = self.memory
print "On-chip Memory = {0}".format(on_chip_memory)
output_ribbon = [bo_w, bo_h, bo_d]
print "output size being processed = {0} x {1} x {2}".format(output_ribbon[0], output_ribbon[1], num_batch)
input_ribbon = [(output_ribbon[0] - 1) * stride_w + kw, (output_ribbon[1] - 1) * stride_h + kh, bi_d]
w_steps = int(ceil(float(ow) / output_ribbon[0]))
h_steps = int(ceil(float(oh) / output_ribbon[1]))
id_steps = int(ceil(float(ni) / input_ribbon[2]))
od_per_batch = int(floor(float(self.compute_config[2]) / num_batch))
#TODO:Verify
od_steps = int(ceil(ceil(float(no) / output_ribbon[2]) / float(od_per_batch)))
print
print "Partitioning Input data into sub-sets"
print "Input Block size = {0} x {1} x {2} x {3}".format(input_ribbon[0], input_ribbon[1], input_ribbon[2],
num_batch)
print "Input Num Blocks = {0} x {1} x {2} x 1".format(w_steps, h_steps, id_steps, num_batch)
print "Output Block size = {0} x {1} x {2} x {3}".format(output_ribbon[0], output_ribbon[1], od_per_batch,
num_batch)
print "Output Num Blocks = {0} x {1} x {2} x 1".format(w_steps, h_steps, od_steps, num_batch)
print "Weight blocks = {0} x {1} x {2} x {3}".format(kw, kh, input_ribbon[2], output_ribbon[2])
# memory_input = input_ribbon[0] * input_ribbon[1] * input_ribbon[2]
memory_input = 0
memory_weight = kw * kh * input_ribbon[2] * output_ribbon[2]
memory_output = on_chip_memory - memory_input - memory_weight
# if (memory_output < output_ribbon[0] * output_ribbon[1] * output_ribbon[2]):
# print "Error:Memory size < output"
# return None
print
print "Parallelism = {0:,}".format(output_ribbon[0] * output_ribbon[1] * output_ribbon[2])
print "Memory for input = {0:,}".format(memory_input)
print "Memory for weights = {0:,}".format(memory_weight)
print "Memory for outputs = {0:,}".format(memory_output)
bi_w = input_ribbon[0]
bi_h = input_ribbon[1]
bi_d = input_ribbon[2]
bo_w = output_ribbon[0]
bo_h = output_ribbon[1]
bo_d = output_ribbon[2]
print "Compute Config = {0}".format(self.compute_config)
# INPUT ACCESSES
if bi_w <= self.compute_config[0]:
partial_input_accesses = int(ceil(float(bi_w) / self.compute_config[0]) * self.compute_config[0]) * \
bi_h * \
bi_d * \
od_steps * \
num_batch
else:
partial_input_accesses = bi_w *\
bi_h * \
bi_d * \
od_steps * \
num_batch
print "{0}, {1}, {2}, {3}, {4}".format(bi_w, bi_h, bi_d, od_steps, num_batch)
print "Partial Input Accesses = {0}".format(partial_input_accesses)
total_input_accesses = partial_input_accesses * w_steps * h_steps * id_steps
# WEIGHT ACCESSES
# A:
total_weight_accesses = kw * kh * ni * no * w_steps * h_steps
# OUTPUT ACCESSES
# A:
# partial_output_accesses = int(ceil(float(bo_w) / self.compute_config[0]) * self.compute_config[0]) * \
# bo_h * no * w_steps * h_steps
partial_output_accesses = 0
total_output_accesses = (ni - 1) * partial_output_accesses
penalty = total_input_accesses + total_weight_accesses + total_output_accesses
# penalty = total_input_accesses# + total_weight_accesses + total_output_accesses
print
print "Inputs accessed = {0:,}".format(total_input_accesses)
print "Weights accessed = {0:,}".format(total_weight_accesses)
print "Outputs accessed = {0:,}".format(total_output_accesses)
print "Total DRAM Accesses = {0:,}".format(penalty)
print
print "Total Penalty = {0:,}".format(penalty)
actual_compute_cycles = ow * oh * no * kw * kh * ni
print "Compute Cycles = {0:,}".format(actual_compute_cycles)
# exit(-1)
conv.set_data_partition([bi_w, bi_h, bi_d], [bo_w, bo_h, bo_d])
bw = min(self.hardware["resources"]["bandwidth"], self.compute_config[0])
memory_access_cycles = int(ceil(float(penalty) / bw))
print "Memory Access Cycles = {0}".format(memory_access_cycles)
total_cycles = conv.get_cycles(self.hardware)
return penalty
# if strategy == "A":
# print "Strategy A"
# # INPUT ACCESSES
# # A:
# total_input_accesses = partial_input_accesses * id_steps * w_steps * h_steps
# # WEIGHT ACCESSES
# # A:
# total_weight_accesses = kw * kh * ni * no * w_steps * h_steps
# # OUTPUT ACCESSES
# # A:
# partial_output_accesses = int(ceil(float(bo_w) / self.compute_config[0]) * self.compute_config[0]) * \
# bo_h * no * w_steps * h_steps
# total_output_accesses = (ni - 1) * partial_output_accesses
#
# elif strategy == "B":
# print "Strategy B"
# # INPUT ACCESSES
# # B:
# total_input_accesses = partial_input_accesses * id_steps * od_steps * w_steps * h_steps
# # WEIGHT ACCESSES
# # B:
# total_weight_accesses = kw * kh * ni * no * od_steps * w_steps * h_steps
# # OUTPUT ACCESSES
# # B:
# partial_output_accesses = 0
# total_output_accesses = (ni - 1) * partial_output_accesses
#
# else:
# print "Strategy C"
# # INPUT ACCESSES
# # C:
# total_input_accesses = partial_input_accesses * id_steps * od_steps * w_steps * h_steps
# # WEIGHT ACCESSES
# # C:
# total_weight_accesses = kw * kh * ni * no
# # OUTPUT ACCESSES
# # C:
# partial_output_accesses = int(ceil(float(bo_w) / self.compute_config[0]) * self.compute_config[0]) * \
# bo_h * no * w_steps * h_steps
# total_output_accesses = (ni - 1) * partial_output_accesses
# OUTPUT ACCESSES
# A:
# partial_output_accesses = int(ceil(float(bo_w) / self.compute_config[0]) * self.compute_config[0]) * \
# bo_h * no * w_steps * h_steps
# B:
# partial_output_accesses = 0
def get_penalty_print_norm(self, input_block, output_block, norm):
[bi_w, bi_h, bi_d, num_batch] = input_block
[bo_w, bo_h, bo_d, num_batch] = output_block
iw = self.input_width
ih = self.input_height
ni = self.input_channels
no = self.output_channels
kw = self.kernel_width
kh = self.kernel_height
ow = self.output_width
oh = self.output_height
iw += 2 * self.pad_x
print "*" * 50
print "Getting DRAM accesses"
print "*" * 50
stride_h = self.stride_y
stride_w = self.stride_x
print "Input FM width = {0}".format(iw)
print "Input FM height = {0}".format(ih)
print "Input FM = {0}".format(ni)
print "Kernel Width = {0}".format(kw)
print "Kernel height = {0}".format(kh)
print "Output width = {0}".format(oh)
print "Output height = {0}".format(ow)
print "Output FM = {0}".format(no)
on_chip_memory = self.memory
print "On-chip Memory = {0}".format(on_chip_memory)
output_ribbon = [bo_w, bo_h, bo_d]
print "output size being processed = {0} x {1} x {2}".format(output_ribbon[0], output_ribbon[1], num_batch)
input_ribbon = [bi_w, bi_h, bi_d]
w_steps = int(ceil(float(ow) / output_ribbon[0]))
h_steps = int(ceil(float(oh) / output_ribbon[1]))
id_steps = int(ceil(float(ni) / input_ribbon[2]))
od_per_batch = int(floor(float(self.compute_config[2]) / num_batch))
od_steps = int(ceil(ceil(float(no) / od_per_batch)))
print "Num output FMs = {0}".format(no)
print "Num PU = {0}".format(output_ribbon[2])
print
print "Partitioning Input data into sub-sets"
print "Input Block size = {0} x {1} x {2} x {3}".format(input_ribbon[0], input_ribbon[1], input_ribbon[2],
num_batch)
print "Input Num Blocks = {0} x {1} x {2} x 1".format(w_steps, h_steps, id_steps, num_batch)
print "Output Block size = {0} x {1} x {2} x {3}".format(output_ribbon[0], output_ribbon[1], od_per_batch,
num_batch)
print "Output Num Blocks = {0} x {1} x {2} x 1".format(w_steps, h_steps, od_steps, num_batch)
print "Weight blocks = {0} x {1} x {2} x {3}".format(kw, kh, input_ribbon[2], output_ribbon[2])
print "Compute config = {0}".format(self.compute_config)
# memory_input = input_ribbon[0] * input_ribbon[1] * input_ribbon[2]
memory_input = 0
memory_weight = 0
memory_output = on_chip_memory - memory_input - memory_weight
# print "Total Memory = {0}".format(self.hardware["resources"]["memory_per_bram"] * self.compute_config[0] * self.compute_config[2])
# if (memory_output < output_ribbon[0] * output_ribbon[1] * output_ribbon[2]):
# print "Error:Memory size < output"
# return None
# print
# print "Parallelism = {0:,}".format(output_ribbon[0] * output_ribbon[1] * output_ribbon[2])
# print "Memory for input = {0:,}".format(memory_input)
# print "Memory for weights = {0:,}".format(memory_weight)
# print "Memory for outputs = {0:,}".format(memory_output)
bi_w = input_ribbon[0]
bi_h = input_ribbon[1]
bi_d = input_ribbon[2]
bo_w = output_ribbon[0]
bo_h = output_ribbon[1]
bo_d = output_ribbon[2]
print "Compute Config = {0}".format(self.compute_config)
# INPUT ACCESSES
if bi_w <= self.compute_config[0]:
partial_input_accesses = int(ceil(float(bi_w) / self.compute_config[0]) * self.compute_config[0]) * \
bi_h * \
bi_d * \
no
else:
partial_input_accesses = bi_w *\
bi_h * \
bi_d * \
no
print "{0}, {1}, {2}, {3}, {4}".format(bi_w, bi_h, bi_d, od_steps, num_batch)
print "Partial Input Accesses = {0}".format(partial_input_accesses)
total_input_accesses = partial_input_accesses * w_steps * h_steps
print "Total Input Accesses = {0}".format(total_input_accesses)
# WEIGHT ACCESSES
# A:
total_weight_accesses = 0
# OUTPUT ACCESSES
# A:
# partial_output_accesses = int(ceil(float(bo_w) / self.compute_config[0]) * self.compute_config[0]) * \
# bo_h * no * w_steps * h_steps
partial_output_accesses = 0
total_output_accesses = (ni - 1) * partial_output_accesses
penalty = total_input_accesses + total_weight_accesses + total_output_accesses
# penalty = total_input_accesses# + total_weight_accesses + total_output_accesses
print
print "Inputs accessed = {0:,}".format(total_input_accesses)
print "Weights accessed = {0:,}".format(total_weight_accesses)
print "Outputs accessed = {0:,}".format(total_output_accesses)
print "Total DRAM Accesses = {0:,}".format(penalty)
actual_compute_cycles = norm.get_cycles(self.hardware)
print "*" * 50
print
print "Total Penalty = {0:,}".format(penalty)
print "Compute Cycles = {0:,}".format(actual_compute_cycles)
bw = min(self.hardware["resources"]["bandwidth"], self.compute_config[0])
memory_access_cycles = int(ceil(float(penalty) / bw))
print "Memory Access Cycles = {0:,}".format(memory_access_cycles)
# exit(-1)
# exit(-1)
# norm.set_data_partition([bi_w, bi_h, bi_d], [bo_w, bo_h, bo_d])
# total_cycles = norm.get_cycles(self.hardware)
return penalty
# if strategy == "A":
# print "Strategy A"
# # INPUT ACCESSES
# # A:
# total_input_accesses = partial_input_accesses * id_steps * w_steps * h_steps
# # WEIGHT ACCESSES
# # A:
# total_weight_accesses = kw * kh * ni * no * w_steps * h_steps
# # OUTPUT ACCESSES
# # A:
# partial_output_accesses = int(ceil(float(bo_w) / self.compute_config[0]) * self.compute_config[0]) * \
# bo_h * no * w_steps * h_steps
# total_output_accesses = (ni - 1) * partial_output_accesses
#
# elif strategy == "B":
# print "Strategy B"
# # INPUT ACCESSES
# # B:
# total_input_accesses = partial_input_accesses * id_steps * od_steps * w_steps * h_steps
# # WEIGHT ACCESSES
# # B:
# total_weight_accesses = kw * kh * ni * no * od_steps * w_steps * h_steps
# # OUTPUT ACCESSES
# # B:
# partial_output_accesses = 0
# total_output_accesses = (ni - 1) * partial_output_accesses
#
# else:
# print "Strategy C"
# # INPUT ACCESSES
# # C:
# total_input_accesses = partial_input_accesses * id_steps * od_steps * w_steps * h_steps
# # WEIGHT ACCESSES
# # C:
# total_weight_accesses = kw * kh * ni * no
# # OUTPUT ACCESSES
# # C:
# partial_output_accesses = int(ceil(float(bo_w) / self.compute_config[0]) * self.compute_config[0]) * \
# bo_h * no * w_steps * h_steps
# total_output_accesses = (ni - 1) * partial_output_accesses
# OUTPUT ACCESSES
# A:
# partial_output_accesses = int(ceil(float(bo_w) / self.compute_config[0]) * self.compute_config[0]) * \
# bo_h * no * w_steps * h_steps
# B:
# partial_output_accesses = 0
| 40.990301
| 140
| 0.551288
| 5,301
| 42,261
| 4.136955
| 0.035842
| 0.033516
| 0.044186
| 0.034473
| 0.866667
| 0.846831
| 0.816188
| 0.799134
| 0.782034
| 0.763976
| 0
| 0.01892
| 0.337143
| 42,261
| 1,030
| 141
| 41.030097
| 0.763931
| 0.369277
| 0
| 0.714286
| 0
| 0
| 0.132071
| 0
| 0
| 0
| 0
| 0.000971
| 0.006696
| 0
| null | null | 0
| 0.022321
| null | null | 0.272321
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8a57906c8d390e3f1b73d26436d91781fc59e7f2
| 5,112
|
py
|
Python
|
Lib/test/test_hexoct.py
|
deadsnakes/python2.3
|
0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849
|
[
"PSF-2.0"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
Lib/test/test_hexoct.py
|
deadsnakes/python2.3
|
0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_hexoct.py
|
deadsnakes/python2.3
|
0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849
|
[
"PSF-2.0"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
"""Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
Some of these tests will have to change in Python 2.4!
"""
import sys
platform_long_is_32_bits = sys.maxint == 2147483647
import unittest
from test import test_support
import warnings
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
"<string>")
class TextHexOct(unittest.TestCase):
def test_hex_baseline(self):
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
if platform_long_is_32_bits:
self.assertEqual(0x7fffffff, 2147483647)
else:
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
if platform_long_is_32_bits:
self.assertEqual(-(0x7fffffff), -2147483647)
else:
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
if platform_long_is_32_bits:
self.assertEqual(-0x7fffffff, -2147483647)
else:
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# This test is in a <string> so we can ignore the warnings
exec """if 1:
if platform_long_is_32_bits:
# Positive-looking constants with negavive values
self.assertEqual(0x80000000, -2147483648L)
self.assertEqual(0xffffffff, -1)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), 2147483648L)
self.assertEqual(-(0xffffffff), 1)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, 2147483648L)
self.assertEqual(-0xffffffff, 1)
else:
# Positive-looking constants with negavive values
self.assertEqual(0x8000000000000000, -9223372036854775808L)
self.assertEqual(0xffffffffffffffff, -1)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), 9223372036854775808L)
self.assertEqual(-(0xffffffffffffffff), 1)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, 9223372036854775808L)
self.assertEqual(-0xffffffffffffffff, 1)
\n"""
def test_oct_baseline(self):
# Baseline tests
self.assertEqual(00, 0)
self.assertEqual(020, 16)
if platform_long_is_32_bits:
self.assertEqual(017777777777, 2147483647)
else:
self.assertEqual(0777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(00), 0)
self.assertEqual(-(020), -16)
if platform_long_is_32_bits:
self.assertEqual(-(017777777777), -2147483647)
else:
self.assertEqual(-(0777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-00, 0)
self.assertEqual(-020, -16)
if platform_long_is_32_bits:
self.assertEqual(-017777777777, -2147483647)
else:
self.assertEqual(-0777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# This test is in a <string> so we can ignore the warnings
exec """if 1:
if platform_long_is_32_bits:
# Positive-looking constants with negavive values
self.assertEqual(020000000000, -2147483648L)
self.assertEqual(037777777777, -1)
# Ditto with a minus sign and parentheses
self.assertEqual(-(020000000000), 2147483648L)
self.assertEqual(-(037777777777), 1)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-020000000000, 2147483648L)
self.assertEqual(-037777777777, 1)
else:
# Positive-looking constants with negavive values
self.assertEqual(01000000000000000000000, -9223372036854775808L)
self.assertEqual(01777777777777777777777, -1)
# Ditto with a minus sign and parentheses
self.assertEqual(-(01000000000000000000000), 9223372036854775808L)
self.assertEqual(-(01777777777777777777777), 1)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-01000000000000000000000, 9223372036854775808L)
self.assertEqual(-01777777777777777777777, 1)
\n"""
def test_main():
test_support.run_unittest(TextHexOct)
if __name__ == "__main__":
test_main()
| 40.896
| 78
| 0.644953
| 547
| 5,112
| 5.923218
| 0.179159
| 0.222222
| 0.037037
| 0.055556
| 0.877778
| 0.871605
| 0.856173
| 0.856173
| 0.779012
| 0.779012
| 0
| 0.237877
| 0.273865
| 5,112
| 124
| 79
| 41.225806
| 0.634968
| 0.060446
| 0
| 0.36
| 0
| 0
| 0.54822
| 0.220065
| 0
| 0
| 0.0589
| 0
| 0.48
| 0
| null | null | 0
| 0.04
| null | null | 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8a9668afbbc83a72e07931da09f46f1f87ac7058
| 35,282
|
py
|
Python
|
src/repli1d/development.py
|
amir-zeraati/repli1D
|
2795b4cc997614f2724b682469ccc1406dec9fac
|
[
"MIT"
] | null | null | null |
src/repli1d/development.py
|
amir-zeraati/repli1D
|
2795b4cc997614f2724b682469ccc1406dec9fac
|
[
"MIT"
] | null | null | null |
src/repli1d/development.py
|
amir-zeraati/repli1D
|
2795b4cc997614f2724b682469ccc1406dec9fac
|
[
"MIT"
] | null | null | null |
import argparse
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.utils import shuffle
from sklearn.metrics import make_scorer
from repli1d.models import mlp
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--preprocessing', type=str, default='log')
parser.add_argument('--max_epoch', type=int, default=150)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--cell_line', type=str, default='K562')
parser.add_argument('--listfile', nargs='+', type=str,
default='data/K562_2000_merged_histones_init.csv.gz')
parser.add_argument('--marks', nargs='+', type=str,
default=['H2A.Z', 'H3K27ac', 'H3K79me2', 'H3K27me3',
'H3K9ac', 'H3K4me2', 'H3K4me3', 'H3K9me3',
'H3K4me1', 'H3K36me3', 'H4K20me1'])
parser.add_argument('--output', type=str, default=['initiation'])
parser.add_argument('--output_dir', type=str,
default='development/')
parser.add_argument('--image_format', type=str, default='png')
args = parser.parse_args()
df = pd.read_csv('{}'.format(args.listfile), compression='gzip')
masks = pd.read_csv('data/hg19_2000_no_N_inside.csv')
print('Number of NANs is {}'.format(masks['signal'].sum()))
df.loc[~masks['signal'].astype(bool)] = np.nan
df = df.dropna()
print(df)
if args.preprocessing == 'log to log RF Gridsearch':
for i in args.marks + args.output:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train)
regr = RandomForestRegressor(n_jobs=1, random_state=0)
params = {
'max_depth': [2, 3, 5, 10, 20],
'min_samples_leaf': [5, 10, 20, 50, 100, 200],
'n_estimators': [10, 25, 30, 50, 100, 200]
}
mse = make_scorer(mean_squared_error, greater_is_better=False)
grid_search = GridSearchCV(estimator=regr,
param_grid=params,
cv=4,
n_jobs=80, verbose=1,
scoring=mse)
grid_search.fit(X_train, y_train.ravel())
print(grid_search.best_score_)
# print(mean_squared_error(regr.predict(X_train), y_train))
print(grid_search.best_estimator_)
if args.preprocessing == 'log to raw RF Gridsearch':
for i in args.marks:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train)
regr = RandomForestRegressor(n_jobs=1, random_state=0)
params = {
'max_depth': [2, 3, 5, 10, 20],
'min_samples_leaf': [5, 10, 20, 50, 100, 200],
'n_estimators': [10, 25, 30, 50, 100, 200]
}
mse = make_scorer(mean_squared_error, greater_is_better=False)
grid_search = GridSearchCV(estimator=regr,
param_grid=params,
cv=4,
n_jobs=80, verbose=1,
scoring=mse)
grid_search.fit(X_train, y_train.ravel())
print(grid_search.best_score_)
# print(mean_squared_error(regr.predict(X_train), y_train))
print(grid_search.best_estimator_)
if args.preprocessing == 'raw to log RF Gridsearch':
for i in args.output:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train)
regr = RandomForestRegressor(n_jobs=1, random_state=0)
params = {
'max_depth': [2, 3, 5, 10, 20],
'min_samples_leaf': [5, 10, 20, 50, 100, 200],
'n_estimators': [10, 25, 30, 50, 100, 200]
}
mse = make_scorer(mean_squared_error, greater_is_better=False)
grid_search = GridSearchCV(estimator=regr,
param_grid=params,
cv=4,
n_jobs=80, verbose=1,
scoring=mse)
grid_search.fit(X_train, y_train.ravel())
print(grid_search.best_score_)
# print(mean_squared_error(regr.predict(X_train), y_train))
print(grid_search.best_estimator_)
if args.preprocessing == 'raw to raw RF Gridsearch':
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train)
regr = RandomForestRegressor(n_jobs=1, random_state=0)
params = {
'max_depth': [2, 3, 5, 10, 20],
'min_samples_leaf': [5, 10, 20, 50, 100, 200],
'n_estimators': [10, 25, 30, 50, 100, 200]
}
mse = make_scorer(mean_squared_error, greater_is_better=False)
grid_search = GridSearchCV(estimator=regr,
param_grid=params,
cv=4,
n_jobs=80, verbose=1,
scoring=mse)
grid_search.fit(X_train, y_train.ravel())
print(grid_search.best_score_)
# print(mean_squared_error(regr.predict(X_train), y_train))
print(grid_search.best_estimator_)
if args.preprocessing == 'log to log RF':
for i in args.marks + args.output:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train)
regr = RandomForestRegressor(max_depth=20, min_samples_leaf=20,
n_estimators=500, n_jobs=20,
random_state=0)
regr.fit(X_train, y_train.ravel())
predicted_test = regr.predict(X_test)
predicted = regr.predict(X_train)
pd.DataFrame(predicted, columns=['predictions']).to_csv(
'{}{}_predicted_train.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(predicted_test, columns=['predictions']).to_csv(
'{}{}_predicted_test.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(y_train, columns=['observed_values']).to_csv(
'{}{}_observed_train.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(y_test, columns=['observed_values']).to_csv(
'{}{}_observed_test.csv'.format(args.output_dir,
args.cell_line))
print(mean_squared_error(10**predicted, 10**y_train))
print(mean_squared_error(10**predicted_test, 10**y_test))
print(mean_squared_error(predicted, y_train))
print(mean_squared_error(predicted_test, y_test))
print(regr.feature_importances_)
p1 = max(max(predicted), max(y_train))
p2 = min(min(predicted), min(y_train))
plt.plot([p1, p2], [p1, p2], '-', color='orange')
plt.scatter(y_train.ravel(), predicted, s=0.1, alpha=0.05)
plt.title(
'Log of predicted values with respect to the log of observed values')
plt.ylabel('Predicted vlaues')
plt.xlabel('Observed values')
plt.axis('square')
plt.savefig('{}distribution_performance.{}'.format(args.output_dir,
args.image_format),
dpi=300, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12, 10))
plt.plot([p1, p2], [p1, p2], 'w-')
plt.hist2d(y_train.ravel(), predicted,
bins=[np.histogram_bin_edges(y_train, bins='auto'),
np.histogram_bin_edges(predicted,
bins='auto')],
cmap=plt.cm.nipy_spectral)
plt.colorbar()
plt.xlabel('Observed values')
plt.ylabel('Predicted values')
plt.title('Log of predicted values with respect to the log of ' +
'observed values for {}'.format(args.cell_line))
plt.savefig('{}{}.{}'.format(args.output_dir,
args.cell_line,
args.image_format),
dpi=300, bbox_inches='tight', transparent=False)
plt.close()
plt.plot(y_train[0:50], '-o')
plt.plot(predicted[0:50], '-o')
plt.legend(['Real', 'Predicted'], loc='upper right')
plt.title('comparison of observed values and predicted values by RF')
plt.savefig('{}{}comaprison_r_p.{}'.format(args.output_dir,
args.cell_line,
args.image_format),
dpi=300, bbox_inches='tight', transparent=False)
plt.close()
if args.preprocessing == 'log to raw RF':
for i in args.marks:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train, random_state=42)
regr = RandomForestRegressor(max_depth=20, min_samples_leaf=20,
n_estimators=500, n_jobs=20, random_state=0)
regr.fit(X_train, y_train.ravel())
predicted_test = regr.predict(X_test)
predicted = regr.predict(X_train)
pd.DataFrame(predicted, columns=['predictions']).to_csv(
'{}{}_predicted_train.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(predicted_test, columns=['predictions']).to_csv(
'{}{}_predicted_test.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(y_train, columns=['observed_values']).to_csv(
'{}{}_observed_train.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(y_test, columns=['observed_values']).to_csv(
'{}{}_observed_test.csv'.format(args.output_dir,
args.cell_line))
print(mean_squared_error(predicted, y_train))
print(mean_squared_error(predicted_test, y_test))
print(regr.feature_importances_)
p1 = max(max(predicted), max(y_train))
p2 = min(min(predicted), min(y_train))
plt.plot([p1, p2], [p1, p2], '-', color='orange')
plt.scatter(y_train.ravel(), predicted, s=0.1, alpha=0.05)
plt.title('Predicted values with respect to the observed values')
plt.ylabel('Predicted vlaues')
plt.xlabel('Observed values')
plt.axis('square')
plt.savefig('{}distribution_performance.{}'.format(args.output_dir, args.image_format),
dpi=300, bbox_inches='tight')
plt.close()
plt.figure(figsize=(10, 10))
plt.plot([p1, p2], [p1, p2], 'w-')
plt.hist2d(y_train.ravel(), predicted,
bins=[np.histogram_bin_edges(y_train, bins='auto'),
np.histogram_bin_edges(predicted,
bins='auto')],
cmap=plt.cm.nipy_spectral)
plt.colorbar()
plt.xlabel('Observed values')
plt.ylabel('Predicted values')
plt.title('Predicted values with respect to the observed values for {}'.format(
args.cell_line))
plt.savefig('{}{}.{}'.format(args.output_dir,
args.cell_line,
args.image_format),
dpi=300, bbox_inches='tight', transparent=False)
plt.close()
plt.plot(y_train[0:50], '-o')
plt.plot(predicted[0:50], '-o')
plt.legend(['Observed', 'Predicted'], loc='upper right')
plt.title('Comparison of observed values and predicted values by RF')
plt.savefig('{}{}comaprison_r_p.{}'.format(args.output_dir,
args.cell_line,
args.image_format),
dpi=300, bbox_inches='tight', transparent=False)
plt.close()
if args.preprocessing == 'raw to log RF':
for i in args.output:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train)
regr = RandomForestRegressor(max_depth=20, min_samples_leaf=20,
n_estimators=500, n_jobs=20, random_state=0)
regr.fit(X_train, y_train.ravel())
predicted_test = regr.predict(X_test)
predicted = regr.predict(X_train)
pd.DataFrame(predicted, columns=['predictions']).to_csv(
'{}{}_predicted_train.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(predicted_test, columns=['predictions']).to_csv(
'{}{}_predicted_test.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(y_train, columns=['observed_values']).to_csv(
'{}{}_observed_train.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(y_test, columns=['observed_values']).to_csv(
'{}{}_observed_test.csv'.format(args.output_dir,
args.cell_line))
print(mean_squared_error(10**predicted, 10**y_train))
print(mean_squared_error(10**regr.predict(X_test), 10**y_test))
print(mean_squared_error(predicted, y_train))
print(mean_squared_error(predicted_test, y_test))
print(regr.feature_importances_)
p1 = max(max(predicted), max(y_train))
p2 = min(min(predicted), min(y_train))
plt.plot([p1, p2], [p1, p2], '-', color='orange')
plt.scatter(y_train.ravel(), predicted, s=0.1, alpha=0.05)
plt.title(
'Log of predicted values with respect to the log of observed values')
plt.ylabel('Predicted vlaues')
plt.xlabel('Observed values')
plt.axis('square')
plt.savefig('{}distribution_performance.{}'.format(args.output_dir,
args.image_format),
dpi=300, bbox_inches='tight')
plt.close()
plt.figure(figsize=(10, 10))
plt.plot([p1, p2], [p1, p2], 'w-')
plt.hist2d(y_train.ravel(), predicted,
bins=[np.histogram_bin_edges(y_train, bins='auto'),
np.histogram_bin_edges(predicted,
bins='auto')],
cmap=plt.cm.nipy_spectral)
plt.colorbar()
plt.xlabel('Observed values')
plt.ylabel('Predicted values')
plt.title('Predicted values with respect to the log of ' +
'observed values for {}'.format(args.cell_line))
plt.savefig('{}{}.{}'.format(args.output_dir,
args.cell_line,
args.image_format),
dpi=300, bbox_inches='tight', transparent=False)
plt.close()
plt.plot(y_train[0:50], '-o')
plt.plot(predicted[0:50], '-o')
plt.legend(['Observed', 'Predicted'], loc='upper right')
plt.title('Comparison of observed values and predicted values by RF')
plt.savefig('{}{}comaprison_r_p.{}'.format(args.output_dir,
args.cell_line,
args.image_format),
dpi=300, bbox_inches='tight', transparent=False)
plt.close()
if args.preprocessing == 'raw to raw RF':
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train, random_state=42)
regr = RandomForestRegressor(max_depth=20, min_samples_leaf=20,
n_estimators=200, n_jobs=-1, random_state=0)
regr.fit(X_train, y_train.ravel())
predicted_test = regr.predict(X_test)
predicted = regr.predict(X_train)
pd.DataFrame(predicted, columns=['predictions']).to_csv(
'{}{}_predicted_train.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(predicted_test, columns=['predictions']).to_csv(
'{}{}_predicted_test.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(y_train, columns=['observed_values']).to_csv(
'{}{}_observed_train.csv'.format(args.output_dir,
args.cell_line))
pd.DataFrame(y_test, columns=['observed_values']).to_csv(
'{}{}_observed_test.csv'.format(args.output_dir,
args.cell_line))
print(mean_squared_error(predicted, y_train))
print(mean_squared_error(predicted_test, y_test))
print(regr.feature_importances_)
p1 = max(max(predicted), max(y_train))
p2 = min(min(predicted), min(y_train))
plt.plot([p1, p2], [p1, p2], '-', color='orange')
plt.scatter(y_train.ravel(), predicted, s=0.1, alpha=0.05)
plt.title('Predicted values with respect to the observed values')
plt.ylabel('Predicted values')
plt.xlabel('Observed values')
plt.axis('square')
plt.savefig('{}distribution_performance.{}'.format(args.output_dir,
args.image_format),
dpi=300, bbox_inches='tight')
plt.close()
# plt.figure(figsize=(10, 10))
plt.plot([p1, p2], [p1, p2], 'w-')
plt.hist2d(np.log10(y_train.ravel()+1), np.log10(predicted + 1),
bins=[100, 100],
cmap=plt.cm.nipy_spectral, norm=matplotlib.colors.LogNorm(
vmin=None, vmax=None, clip=False))
# plt.yscale('log')
# plt.ylim([0, 4])
# plt.xlim([0, 4])
# plt.xscale('log')
plt.colorbar()
plt.xlabel('Log(observed values+1)')
plt.ylabel('Log(predicted values+1)')
plt.title('Predicted values with respect to the ' +
'observed values for {}'.format(args.cell_line))
plt.savefig('{}{}.{}'.format(args.output_dir,
args.cell_line,
args.image_format),
dpi=300, bbox_inches='tight', transparent=False)
plt.close()
plt.plot(y_train[0:50], '-o')
plt.plot(predicted[0:50], '-o')
plt.legend(['Observed', 'Predicted'], loc='upper right')
plt.title('Comparison of observed values and predicted values by RF')
plt.savefig('{}{}comaprison_r_p.{}'.format(args.output_dir,
args.cell_line,
args.image_format),
dpi=300, bbox_inches='tight', transparent=False)
plt.show()
if args.preprocessing == 'log to log FCNN':
for i in args.marks + args.output:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
# X_train = df.loc[(df['chrom'] != 'chr1') & (df['chrom'] != 'chr2'),
# args.marks].to_numpy()
# y_train = df.loc[(df['chrom'] != 'chr1') & (df['chrom'] != 'chr2'),
# args.output].to_numpy()
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train)
X_train = tf.convert_to_tensor(X_train, np.float32)
y_train = tf.convert_to_tensor(y_train, np.float32)
X_test = tf.convert_to_tensor(X_test, np.float32)
y_test = tf.convert_to_tensor(y_test, np.float32)
# X_val = df.loc[df['chrom'] == 'chr2', args.marks].to_numpy()
# y_val = df.loc[df['chrom'] == 'chr2', args.output].to_numpy()
model = mlp(X_train, y_train)
tf.keras.utils.plot_model(model,
to_file='{}{}FCNN_architecture.png'.format(
args.output_dir,
args.preprocessing),
show_shapes=True)
checkpoint_filepath = r'{}{}FCNN_K562_marks.mdl_wts.hdf5'.format(
args.output_dir, args.preprocessing)
mcp_save = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_best_only=True,
monitor='val_loss', mode='min')
model.compile(loss='mse', optimizer='adam',
metrics=['mse', 'mae',
tf.keras.metrics.RootMeanSquaredError()])
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
history = model.fit(X_train, y_train, epochs=2000,
verbose=1, validation_split=0.07,
callbacks=[callback, mcp_save],
batch_size=128) # validation_data=(X_val, y_val),
plt.plot(history.history['loss'], c='red')
plt.plot(history.history['val_loss'], c='blue')
plt.scatter(np.argmin(history.history['val_loss']),
np.min(history.history['val_loss']), facecolors='none',
edgecolors='chocolate', s=50)
plt.title('Fully Connected Neural Network Loss')
plt.ylabel('Loss (Mean Squared Error)')
plt.xlabel('Epoch')
plt.legend(['training', 'validation'], loc='upper right')
plt.savefig('{}FCNN_Loss.png'.format(args.output_dir),
dpi=300, bbox_inches='tight')
hist = pd.DataFrame(history.history)
with open('{}{}history.csv'.format(args.output_dir,
args.preprocessing), mode='w') as f:
hist.to_csv(f)
if args.preprocessing == 'log to raw FCNN':
for i in args.marks:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
# X_train = df.loc[(df['chrom'] != 'chr1') & (df['chrom'] != 'chr2'),
# args.marks].to_numpy()
# y_train = df.loc[(df['chrom'] != 'chr1') & (df['chrom'] != 'chr2'),
# args.output].to_numpy()
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
X_train, y_train = shuffle(X_train, y_train)
X_train = tf.convert_to_tensor(X_train, np.float32)
y_train = tf.convert_to_tensor(y_train, np.float32)
X_test = tf.convert_to_tensor(X_test, np.float32)
y_test = tf.convert_to_tensor(y_test, np.float32)
# X_val = df.loc[df['chrom'] == 'chr2', args.marks].to_numpy()
# y_val = df.loc[df['chrom'] == 'chr2', args.output].to_numpy()
model = mlp(X_train, y_train)
tf.keras.utils.plot_model(model,
to_file='{}{}FCNN_architecture.png'.format(
args.output_dir,
args.preprocessing),
show_shapes=True)
checkpoint_filepath = r'{}{}FCNN_K562_marks.mdl_wts.hdf5'.format(
args.output_dir, args.preprocessing)
mcp_save = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_best_only=True,
monitor='val_loss', mode='min')
model.compile(loss='mse', optimizer='adam',
metrics=['mse', 'mae',
tf.keras.metrics.RootMeanSquaredError()])
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
history = model.fit(X_train, y_train, epochs=2000,
verbose=1, validation_split=0.07,
callbacks=[callback, mcp_save],
batch_size=128) # validation_data=(X_val, y_val),
plt.plot(history.history['loss'], c='red')
plt.plot(history.history['val_loss'], c='blue')
plt.scatter(np.argmin(history.history['val_loss']),
np.min(history.history['val_loss']), facecolors='none',
edgecolors='chocolate', s=50)
plt.title('Fully Connected Neural Network Loss')
plt.ylabel('Loss (Mean Squared Error)')
plt.xlabel('Epoch')
plt.legend(['training', 'validation'], loc='upper right')
plt.savefig('{}FCNN_Loss.png'.format(args.output_dir),
dpi=300, bbox_inches='tight')
hist = pd.DataFrame(history.history)
with open('{}{}history.csv'.format(args.output_dir,
args.preprocessing), mode='w') as f:
hist.to_csv(f)
predicted = model.predict(X_train)
print(mean_squared_error(predicted, y_train))
print(mean_squared_error(model.predict(X_test), y_test))
if args.preprocessing == 'log to log multi-GPU FCNN':
for i in args.marks + args.output:
df[i] = df[i] + np.min(df[i][(df[i] != 0)])
df[i] = np.log10(df[i])
# X_train = df.loc[(df['chrom'] != 'chr1') & (df['chrom'] != 'chr2'),
# args.marks].to_numpy()
# y_train = df.loc[(df['chrom'] != 'chr1') & (df['chrom'] != 'chr2'),
# args.output].to_numpy()
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
# X_val = df.loc[df['chrom'] == 'chr2', args.marks].to_numpy()
# y_val = df.loc[df['chrom'] == 'chr2', args.output].to_numpy()
strategy = tf.distribute.MirroredStrategy()
print("Number of devices: {}".format(strategy.num_replicas_in_sync))
X_train, y_train = shuffle(X_train, y_train)
num_val_samples = 10**5
X_val = X_train[-num_val_samples:]
y_val = y_train[-num_val_samples:]
X_train = X_train[:-num_val_samples]
y_train = y_train[:-num_val_samples]
batch_size = 128
train_dataset = tf.data.Dataset.from_tensor_slices(
(X_train, y_train)).batch(batch_size)
val_dataset = tf.data.Dataset.from_tensor_slices(
(X_val, y_val)).batch(batch_size)
# Open a strategy scope.
with strategy.scope():
# Everything that creates variables should be under the strategy scope.
# In general this is only model construction & `compile()`.
model = mlp(X_train, y_train)
tf.keras.utils.plot_model(model,
to_file='{}{}FCNN_architecture.png'.format(
args.output_dir, args.preprocessing),
show_shapes=True)
checkpoint_filepath = r'{}{}FCNN_K562_marks.mdl_wts.hdf5'.format(
args.output_dir, args.preprocessing)
mcp_save = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_best_only=True,
monitor='val_loss', mode='min')
model.compile(loss='mse', optimizer='adam',
metrics=['mse', 'mae',
tf.keras.metrics.RootMeanSquaredError()])
callback = tf.keras.callbacks.EarlyStopping(monitor='loss',
patience=3)
history = model.fit(train_dataset, epochs=2000,
verbose=1, validation_data=val_dataset,
callbacks=[callback, mcp_save])
# validation_data=(X_val, y_val),
plt.plot(history.history['loss'], c='red')
plt.plot(history.history['val_loss'], c='blue')
plt.scatter(np.argmin(history.history['val_loss']),
np.min(history.history['val_loss']), facecolors='none',
edgecolors='chocolate', s=50)
plt.title('Fully Connected Neural Network Loss')
plt.ylabel('Loss (Mean Squared Error)')
plt.xlabel('Epoch')
plt.legend(['training', 'validation'], loc='upper right')
plt.savefig('{}FCNN_Loss.png'.format(args.output_dir),
dpi=300, bbox_inches='tight')
hist = pd.DataFrame(history.history)
with open('{}{}history.csv'.format(args.output_dir,
args.preprocessing), mode='w') as f:
hist.to_csv(f)
if args.preprocessing == 'min_max normalization':
# X_train = df.loc[(df['chrom'] != 'chr1') & (df['chrom'] != 'chr2'),
# args.marks].to_numpy()
# y_train = df.loc[(df['chrom'] != 'chr1') & (df['chrom'] != 'chr2'),
# args.output].to_numpy()
X_train = df.loc[df['chrom'] != 'chr1', args.marks].to_numpy()
print(X_train.shape)
y_train = df.loc[df['chrom'] != 'chr1', args.output].to_numpy()
print(y_train.shape)
X_test = df.loc[df['chrom'] == 'chr1', args.marks].to_numpy()
y_test = df.loc[df['chrom'] == 'chr1', args.output].to_numpy()
# X_val = df.loc[df['chrom'] == 'chr2', args.marks].to_numpy()
# y_val = df.loc[df['chrom'] == 'chr2', args.output].to_numpy()
model = mlp(X_train, y_train)
tf.keras.utils.plot_model(model,
to_file='{}{}FCNN_architecture.png'.format(
args.output_dir,
args.preprocessing),
show_shapes=True)
checkpoint_filepath = r'{}{}FCNN_K562_marks.mdl_wts.hdf5'.format(
args.output_dir, args.preprocessing)
mcp_save = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_best_only=True,
monitor='val_loss', mode='min')
model.compile(loss='mse', optimizer='adam',
metrics=['mse', 'mae',
tf.keras.metrics.RootMeanSquaredError()])
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
X_train, y_train = shuffle(X_train, y_train)
history = model.fit(X_train, y_train, epochs=2000,
verbose=1, validation_split=0.07,
callbacks=[callback, mcp_save],
batch_size=128) # validation_data=(X_val, y_val),
plt.plot(history.history['loss'], c='red')
plt.plot(history.history['val_loss'], c='blue')
plt.scatter(np.argmin(history.history['val_loss']),
np.min(history.history['val_loss']), facecolors='none',
edgecolors='chocolate', s=50)
plt.title('Fully Connected Neural Network Loss')
plt.ylabel('Loss (Mean Squared Error)')
plt.xlabel('Epoch')
plt.legend(['training', 'validation'], loc='upper right')
plt.savefig('{}FCNN_Loss.png'.format(args.output_dir),
dpi=300, bbox_inches='tight')
hist = pd.DataFrame(history.history)
with open('{}{}history.csv'.format(args.output_dir,
args.preprocessing), mode='w') as f:
hist.to_csv(f)
| 52.424963
| 95
| 0.540304
| 4,246
| 35,282
| 4.290391
| 0.073245
| 0.037218
| 0.024592
| 0.042158
| 0.904759
| 0.901027
| 0.892079
| 0.888072
| 0.882253
| 0.878849
| 0
| 0.026178
| 0.314636
| 35,282
| 672
| 96
| 52.502976
| 0.727183
| 0.055807
| 0
| 0.850891
| 0
| 0
| 0.127975
| 0.026088
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025932
| 0
| 0.025932
| 0.0859
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a9c3de7a05f847818653ccc05d0560b4ddaa1fc
| 37,503
|
py
|
Python
|
src/ebay_rest/api/sell_marketing/api/promotion_api.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | null | null | null |
src/ebay_rest/api/sell_marketing/api/promotion_api.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | null | null | null |
src/ebay_rest/api/sell_marketing/api/promotion_api.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Marketing API
<p>The <i>Marketing API </i> offers two platforms that sellers can use to promote and advertise their products:</p> <ul><li><b>Promoted Listings</b> is an eBay ad service that lets sellers set up <i>ad campaigns </i> for the products they want to promote. eBay displays the ads in search results and in other marketing modules as <b>SPONSORED</b> listings. If an item in a Promoted Listings campaign sells, the seller is assessed a Promoted Listings fee, which is a seller-specified percentage applied to the sales price. For complete details, see <a href=\"/api-docs/sell/static/marketing/promoted-listings.html\">Promoted Listings</a>.</li> <li><b>Promotions Manager</b> gives sellers a way to offer discounts on specific items as a way to attract buyers to their inventory. Sellers can set up discounts (such as \"20% off\" and other types of offers) on specific items or on an entire customer order. To further attract buyers, eBay prominently displays promotion <i>teasers</i> throughout buyer flows. For complete details, see <a href=\"/api-docs/sell/static/marketing/promotions-manager.html\">Promotions Manager</a>.</li></ul> <p><b>Marketing reports</b>, on both the Promoted Listings and Promotions Manager platforms, give sellers information that shows the effectiveness of their marketing strategies. The data gives sellers the ability to review and fine tune their marketing efforts.</p> <p class=\"tablenote\"><b>Important!</b> Sellers must have an active eBay Store subscription, and they must accept the <b>Terms and Conditions</b> before they can make requests to these APIs in the Production environment. There are also site-specific listings requirements and restrictions associated with these marketing tools, as listed in the \"requirements and restrictions\" sections for <a href=\"/api-docs/sell/marketing/static/overview.html#PL-requirements\">Promoted Listings</a> and <a href=\"/api-docs/sell/marketing/static/overview.html#PM-requirements\">Promotions Manager</a>.</p> <p>The table below lists all the Marketing API calls grouped by resource.</p> # noqa: E501
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ...sell_marketing.api_client import ApiClient
class PromotionApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_listing_set(self, promotion_id, **kwargs): # noqa: E501
"""get_listing_set # noqa: E501
<p>This method returns the set of listings associated with the <b>promotion_id</b> specified in the path parameter. Call <a href=\"/api-docs/sell/marketing/resources/promotion/methods/getPromotions\">getPromotions</a> to retrieve the IDs of a seller's promotions. <p>The listing details are returned in a paginated set and you can control and results returned using the following query parameters: <b>limit</b>, <b>offset</b>, <b>q</b>, <b>sort</b>, and <b>status</b>.</p> <ul><li><b>Maximum associated listings returned:</b> 200</li> <li><b>Default number of listings returned:</b> 200</li></ul> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_listing_set(promotion_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str promotion_id: This path parameter takes a concatenation of the ID of the promotion you want to get plus the marketplace ID on which the promotion is hosted. Concatenate the two values by separating them with an \"at sign\" (<b>@</b>). <br><br>The ID of the promotion (<b>promotionId</b>) is a unique eBay-assigned value that's generated when the promotion is created. The Marketplace ID is the ENUM value of eBay marketplace where the promotion is hosted. <br><br><b>Example:</b> <code>1********5@EBAY_US</code> (required)
:param str limit: Specifies the maximum number of promotions returned on a page from the result set. <br><br><b>Default:</b> 200<br><b>Maximum:</b> 200
:param str offset: Specifies the number of promotions to skip in the result set before returning the first promotion in the paginated response. <p>Combine <b>offset</b> with the <b>limit</b> query parameter to control the items returned in the response. For example, if you supply an <b>offset</b> of <code>0</code> and a <b>limit</b> of <code>10</code>, the first page of the response contains the first 10 items from the complete list of items retrieved by the call. If <b>offset</b> is <code>10</code> and <b>limit</b> is <code>20</code>, the first page of the response contains items 11-30 from the complete result set.</p> <p><b>Default:</b> 0</p>
:param str q: Reserved for future use.
:param str sort: Specifies the order in which to sort the associated listings in the response. If you precede the supplied value with a dash, the response is sorted in reverse order. <br><br><b>Example:</b> <br> <code>sort=PRICE</code> - Sorts the associated listings by their current price in ascending order <br> <code>sort=-TITLE</code> - Sorts the associated listings by their title in descending alphabetical order (Z-Az-a) <br><br><b>Valid values</b>:<ul class=\"compact\"><li>AVAILABLE</li> <li>PRICE</li> <li>TITLE</li></ul> For implementation help, refer to eBay API documentation at https://developer.ebay.com/api-docs/sell/marketing/types/csb:SortField
:param str status: This query parameter applies only to markdown promotions. It filters the response based on the indicated status of the promotion. Currently, the only supported value for this parameter is <code>MARKED_DOWN</code>, which indicates active markdown promotions. For implementation help, refer to eBay API documentation at https://developer.ebay.com/api-docs/sell/marketing/types/sme:ItemMarkdownStatusEnum
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_listing_set_with_http_info(promotion_id, **kwargs) # noqa: E501
else:
(data) = self.get_listing_set_with_http_info(promotion_id, **kwargs) # noqa: E501
return data
def get_listing_set_with_http_info(self, promotion_id, **kwargs): # noqa: E501
"""get_listing_set # noqa: E501
<p>This method returns the set of listings associated with the <b>promotion_id</b> specified in the path parameter. Call <a href=\"/api-docs/sell/marketing/resources/promotion/methods/getPromotions\">getPromotions</a> to retrieve the IDs of a seller's promotions. <p>The listing details are returned in a paginated set and you can control and results returned using the following query parameters: <b>limit</b>, <b>offset</b>, <b>q</b>, <b>sort</b>, and <b>status</b>.</p> <ul><li><b>Maximum associated listings returned:</b> 200</li> <li><b>Default number of listings returned:</b> 200</li></ul> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_listing_set_with_http_info(promotion_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str promotion_id: This path parameter takes a concatenation of the ID of the promotion you want to get plus the marketplace ID on which the promotion is hosted. Concatenate the two values by separating them with an \"at sign\" (<b>@</b>). <br><br>The ID of the promotion (<b>promotionId</b>) is a unique eBay-assigned value that's generated when the promotion is created. The Marketplace ID is the ENUM value of eBay marketplace where the promotion is hosted. <br><br><b>Example:</b> <code>1********5@EBAY_US</code> (required)
:param str limit: Specifies the maximum number of promotions returned on a page from the result set. <br><br><b>Default:</b> 200<br><b>Maximum:</b> 200
:param str offset: Specifies the number of promotions to skip in the result set before returning the first promotion in the paginated response. <p>Combine <b>offset</b> with the <b>limit</b> query parameter to control the items returned in the response. For example, if you supply an <b>offset</b> of <code>0</code> and a <b>limit</b> of <code>10</code>, the first page of the response contains the first 10 items from the complete list of items retrieved by the call. If <b>offset</b> is <code>10</code> and <b>limit</b> is <code>20</code>, the first page of the response contains items 11-30 from the complete result set.</p> <p><b>Default:</b> 0</p>
:param str q: Reserved for future use.
:param str sort: Specifies the order in which to sort the associated listings in the response. If you precede the supplied value with a dash, the response is sorted in reverse order. <br><br><b>Example:</b> <br> <code>sort=PRICE</code> - Sorts the associated listings by their current price in ascending order <br> <code>sort=-TITLE</code> - Sorts the associated listings by their title in descending alphabetical order (Z-Az-a) <br><br><b>Valid values</b>:<ul class=\"compact\"><li>AVAILABLE</li> <li>PRICE</li> <li>TITLE</li></ul> For implementation help, refer to eBay API documentation at https://developer.ebay.com/api-docs/sell/marketing/types/csb:SortField
:param str status: This query parameter applies only to markdown promotions. It filters the response based on the indicated status of the promotion. Currently, the only supported value for this parameter is <code>MARKED_DOWN</code>, which indicates active markdown promotions. For implementation help, refer to eBay API documentation at https://developer.ebay.com/api-docs/sell/marketing/types/sme:ItemMarkdownStatusEnum
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['promotion_id', 'limit', 'offset', 'q', 'sort', 'status'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_listing_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'promotion_id' is set
if ('promotion_id' not in params or
params['promotion_id'] is None):
raise ValueError("Missing the required parameter `promotion_id` when calling `get_listing_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'promotion_id' in params:
path_params['promotion_id'] = params['promotion_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'status' in params:
query_params.append(('status', params['status'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/promotion/{promotion_id}/get_listing_set', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_promotions(self, marketplace_id, **kwargs): # noqa: E501
"""get_promotions # noqa: E501
This method returns a list of a seller's undeleted promotions. <p>The call returns up to 200 currently-available promotions on the specified marketplace. While the response body does not include the promotion's <b>discountRules</b> or <b>inventoryCriterion</b> containers, it does include the <b>promotionHref</b> (which you can use to retrieve the complete details of the promotion).</p> <p>Use query parameters to sort and filter the results by the number of promotions to return, the promotion state or type, and the eBay marketplace. You can also supply keywords to limit the response to the promotions that contain that keywords in the title of the promotion.</p> <p><b>Maximum returned:</b> 200</p> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_promotions(marketplace_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str marketplace_id: The eBay marketplace ID of the site where the promotion is hosted. <p><b>Valid values:</b></p> <ul><li><code>EBAY_AU</code> = Australia</li> <li><code>EBAY_DE</code> = Germany</li> <li><code>EBAY_ES</code> = Spain</li> <li><code>EBAY_FR</code> = France</li> <li><code>EBAY_GB</code> = Great Britain</li> <li><code>EBAY_IT</code> = Italy</li> <li><code>EBAY_US</code> = United States</li></ul> (required)
:param str limit: Specifies the maximum number of promotions returned on a page from the result set. <br><br><b>Default:</b> 200 <br><b>Maximum:</b> 200
:param str offset: Specifies the number of promotions to skip in the result set before returning the first promotion in the paginated response. <p>Combine <b>offset</b> with the <b>limit</b> query parameter to control the items returned in the response. For example, if you supply an <b>offset</b> of <code>0</code> and a <b>limit</b> of <code>10</code>, the first page of the response contains the first 10 items from the complete list of items retrieved by the call. If <b>offset</b> is <code>10</code> and <b>limit</b> is <code>20</code>, the first page of the response contains items 11-30 from the complete result set.</p> <p><b>Default:</b> 0</p>
:param str promotion_status: Specifies the promotion state by which you want to filter the results. The response contains only those promotions that match the state you specify. <br><br><b>Valid values:</b> <ul><li><code>DRAFT</code></li> <li><code>SCHEDULED</code></li> <li><code>RUNNING</code></li> <li><code>PAUSED</code></li> <li><code>ENDED</code></li></ul><b>Maximum number of input values:</b> 1
:param str promotion_type: Filters the returned promotions based on their campaign promotion type. Specify one of the following values to indicate the promotion type you want returned: <ul><li><code>CODED_COUPON</code> – A coupon code promotion set with <b>createItemPromotion</b>.</li> <li><code>MARKDOWN_SALE</code> – A markdown promotion set with <b>createItemPriceMarkdownPromotion</b>.</li> <li><code>ORDER_DISCOUNT</code> – A threshold promotion set with <b>createItemPromotion</b>.</li> <li><code>VOLUME_DISCOUNT</code> – A volume pricing promotion set with <b>createItemPromotion</b>.</li></ul>
:param str q: A string consisting of one or more <i>keywords</i>. eBay filters the response by returning only the promotions that contain the supplied keywords in the promotion title. <br><br><b>Example:</b> \"iPhone\" or \"Harry Potter.\" <br><br>Commas that separate keywords are ignored. For example, a keyword string of \"iPhone, iPad\" equals \"iPhone iPad\", and each results in a response that contains promotions with both \"iPhone\" and \"iPad\" in the title.
:param str sort: Specifies the order for how to sort the response. If you precede the supplied value with a dash, the response is sorted in reverse order. <br><br><b>Example:</b> <br> <code>sort=END_DATE</code> Sorts the promotions in the response by their end dates in ascending order <br> <code>sort=-PROMOTION_NAME</code> Sorts the promotions by their promotion name in descending alphabetical order (Z-Az-a) <br><br><b>Valid values</b>:<ul><li><code>START_DATE</code></li> <li><code>END_DATE</code></li> <li><code>PROMOTION_NAME</code></li></ul> For implementation help, refer to eBay API documentation at https://developer.ebay.com/api-docs/sell/marketing/types/csb:SortField
:return: PromotionsPagedCollection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_promotions_with_http_info(marketplace_id, **kwargs) # noqa: E501
else:
(data) = self.get_promotions_with_http_info(marketplace_id, **kwargs) # noqa: E501
return data
def get_promotions_with_http_info(self, marketplace_id, **kwargs): # noqa: E501
"""get_promotions # noqa: E501
This method returns a list of a seller's undeleted promotions. <p>The call returns up to 200 currently-available promotions on the specified marketplace. While the response body does not include the promotion's <b>discountRules</b> or <b>inventoryCriterion</b> containers, it does include the <b>promotionHref</b> (which you can use to retrieve the complete details of the promotion).</p> <p>Use query parameters to sort and filter the results by the number of promotions to return, the promotion state or type, and the eBay marketplace. You can also supply keywords to limit the response to the promotions that contain that keywords in the title of the promotion.</p> <p><b>Maximum returned:</b> 200</p> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_promotions_with_http_info(marketplace_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str marketplace_id: The eBay marketplace ID of the site where the promotion is hosted. <p><b>Valid values:</b></p> <ul><li><code>EBAY_AU</code> = Australia</li> <li><code>EBAY_DE</code> = Germany</li> <li><code>EBAY_ES</code> = Spain</li> <li><code>EBAY_FR</code> = France</li> <li><code>EBAY_GB</code> = Great Britain</li> <li><code>EBAY_IT</code> = Italy</li> <li><code>EBAY_US</code> = United States</li></ul> (required)
:param str limit: Specifies the maximum number of promotions returned on a page from the result set. <br><br><b>Default:</b> 200 <br><b>Maximum:</b> 200
:param str offset: Specifies the number of promotions to skip in the result set before returning the first promotion in the paginated response. <p>Combine <b>offset</b> with the <b>limit</b> query parameter to control the items returned in the response. For example, if you supply an <b>offset</b> of <code>0</code> and a <b>limit</b> of <code>10</code>, the first page of the response contains the first 10 items from the complete list of items retrieved by the call. If <b>offset</b> is <code>10</code> and <b>limit</b> is <code>20</code>, the first page of the response contains items 11-30 from the complete result set.</p> <p><b>Default:</b> 0</p>
:param str promotion_status: Specifies the promotion state by which you want to filter the results. The response contains only those promotions that match the state you specify. <br><br><b>Valid values:</b> <ul><li><code>DRAFT</code></li> <li><code>SCHEDULED</code></li> <li><code>RUNNING</code></li> <li><code>PAUSED</code></li> <li><code>ENDED</code></li></ul><b>Maximum number of input values:</b> 1
:param str promotion_type: Filters the returned promotions based on their campaign promotion type. Specify one of the following values to indicate the promotion type you want returned: <ul><li><code>CODED_COUPON</code> – A coupon code promotion set with <b>createItemPromotion</b>.</li> <li><code>MARKDOWN_SALE</code> – A markdown promotion set with <b>createItemPriceMarkdownPromotion</b>.</li> <li><code>ORDER_DISCOUNT</code> – A threshold promotion set with <b>createItemPromotion</b>.</li> <li><code>VOLUME_DISCOUNT</code> – A volume pricing promotion set with <b>createItemPromotion</b>.</li></ul>
:param str q: A string consisting of one or more <i>keywords</i>. eBay filters the response by returning only the promotions that contain the supplied keywords in the promotion title. <br><br><b>Example:</b> \"iPhone\" or \"Harry Potter.\" <br><br>Commas that separate keywords are ignored. For example, a keyword string of \"iPhone, iPad\" equals \"iPhone iPad\", and each results in a response that contains promotions with both \"iPhone\" and \"iPad\" in the title.
:param str sort: Specifies the order for how to sort the response. If you precede the supplied value with a dash, the response is sorted in reverse order. <br><br><b>Example:</b> <br> <code>sort=END_DATE</code> Sorts the promotions in the response by their end dates in ascending order <br> <code>sort=-PROMOTION_NAME</code> Sorts the promotions by their promotion name in descending alphabetical order (Z-Az-a) <br><br><b>Valid values</b>:<ul><li><code>START_DATE</code></li> <li><code>END_DATE</code></li> <li><code>PROMOTION_NAME</code></li></ul> For implementation help, refer to eBay API documentation at https://developer.ebay.com/api-docs/sell/marketing/types/csb:SortField
:return: PromotionsPagedCollection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['marketplace_id', 'limit', 'offset', 'promotion_status', 'promotion_type', 'q', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_promotions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'marketplace_id' is set
if ('marketplace_id' not in params or
params['marketplace_id'] is None):
raise ValueError("Missing the required parameter `marketplace_id` when calling `get_promotions`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'marketplace_id' in params:
query_params.append(('marketplace_id', params['marketplace_id'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'promotion_status' in params:
query_params.append(('promotion_status', params['promotion_status'])) # noqa: E501
if 'promotion_type' in params:
query_params.append(('promotion_type', params['promotion_type'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/promotion', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PromotionsPagedCollection', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def pause_promotion(self, promotion_id, **kwargs): # noqa: E501
"""pause_promotion # noqa: E501
This method pauses a currently-active (RUNNING) threshold promotion and changes the state of the promotion from <code>RUNNING</code> to <code>PAUSED</code>. Pausing a promotion makes the promotion temporarily unavailable to buyers and any currently-incomplete transactions will not receive the promotional offer until the promotion is resumed. Also, promotion teasers are not displayed when a promotion is paused. <br><br>Pass the ID of the promotion you want to pause using the <b>promotion_id</b> path parameter. Call <a href=\"/api-docs/sell/marketing/resources/promotion/methods/getPromotions\">getPromotions</a> to retrieve the IDs of the seller's promotions. <br><br><b>Note:</b> You can only pause threshold promotions (you cannot pause markdown promotions). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.pause_promotion(promotion_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str promotion_id: This path parameter takes a concatenation of the ID of the promotion you want to pause plus the marketplace ID on which the promotion is hosted. Concatenate the two values by separating them with an \"at sign\" (<b>@</b>). <br><br>The ID of the promotion (<b>promotionId</b>) is a unique eBay-assigned value that's generated when the promotion is created. The Marketplace ID is the ENUM value of eBay marketplace where the promotion is hosted. <br><br><b>Example:</b> <code>1********5@EBAY_US</code> (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.pause_promotion_with_http_info(promotion_id, **kwargs) # noqa: E501
else:
(data) = self.pause_promotion_with_http_info(promotion_id, **kwargs) # noqa: E501
return data
def pause_promotion_with_http_info(self, promotion_id, **kwargs): # noqa: E501
"""pause_promotion # noqa: E501
This method pauses a currently-active (RUNNING) threshold promotion and changes the state of the promotion from <code>RUNNING</code> to <code>PAUSED</code>. Pausing a promotion makes the promotion temporarily unavailable to buyers and any currently-incomplete transactions will not receive the promotional offer until the promotion is resumed. Also, promotion teasers are not displayed when a promotion is paused. <br><br>Pass the ID of the promotion you want to pause using the <b>promotion_id</b> path parameter. Call <a href=\"/api-docs/sell/marketing/resources/promotion/methods/getPromotions\">getPromotions</a> to retrieve the IDs of the seller's promotions. <br><br><b>Note:</b> You can only pause threshold promotions (you cannot pause markdown promotions). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.pause_promotion_with_http_info(promotion_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str promotion_id: This path parameter takes a concatenation of the ID of the promotion you want to pause plus the marketplace ID on which the promotion is hosted. Concatenate the two values by separating them with an \"at sign\" (<b>@</b>). <br><br>The ID of the promotion (<b>promotionId</b>) is a unique eBay-assigned value that's generated when the promotion is created. The Marketplace ID is the ENUM value of eBay marketplace where the promotion is hosted. <br><br><b>Example:</b> <code>1********5@EBAY_US</code> (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['promotion_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method pause_promotion" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'promotion_id' is set
if ('promotion_id' not in params or
params['promotion_id'] is None):
raise ValueError("Missing the required parameter `promotion_id` when calling `pause_promotion`") # noqa: E501
collection_formats = {}
path_params = {}
if 'promotion_id' in params:
path_params['promotion_id'] = params['promotion_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/promotion/{promotion_id}/pause', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def resume_promotion(self, promotion_id, **kwargs): # noqa: E501
"""resume_promotion # noqa: E501
This method restarts a threshold promotion that was previously paused and changes the state of the promotion from <code>PAUSED</code> to <code>RUNNING</code>. Only promotions that have been previously paused can be resumed. Resuming a promotion reinstates the promotional teasers and any transactions that were in motion before the promotion was paused will again be eligible for the promotion. <br><br>Pass the ID of the promotion you want to resume using the <b>promotion_id</b> path parameter. Call <a href=\"/api-docs/sell/marketing/resources/promotion/methods/getPromotions\">getPromotions</a> to retrieve the IDs of the seller's promotions. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.resume_promotion(promotion_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str promotion_id: This path parameter takes a concatenation of the ID of the promotion you want to resume plus the marketplace ID on which the promotion is hosted. Concatenate the two values by separating them with an \"at sign\" (<b>@</b>). <br><br>The ID of the promotion (<b>promotionId</b>) is a unique eBay-assigned value that's generated when the promotion is created. The Marketplace ID is the ENUM value of eBay marketplace where the promotion is hosted. <br><br><b>Example:</b> <code>1********5@EBAY_US</code> (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.resume_promotion_with_http_info(promotion_id, **kwargs) # noqa: E501
else:
(data) = self.resume_promotion_with_http_info(promotion_id, **kwargs) # noqa: E501
return data
def resume_promotion_with_http_info(self, promotion_id, **kwargs): # noqa: E501
"""resume_promotion # noqa: E501
This method restarts a threshold promotion that was previously paused and changes the state of the promotion from <code>PAUSED</code> to <code>RUNNING</code>. Only promotions that have been previously paused can be resumed. Resuming a promotion reinstates the promotional teasers and any transactions that were in motion before the promotion was paused will again be eligible for the promotion. <br><br>Pass the ID of the promotion you want to resume using the <b>promotion_id</b> path parameter. Call <a href=\"/api-docs/sell/marketing/resources/promotion/methods/getPromotions\">getPromotions</a> to retrieve the IDs of the seller's promotions. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.resume_promotion_with_http_info(promotion_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str promotion_id: This path parameter takes a concatenation of the ID of the promotion you want to resume plus the marketplace ID on which the promotion is hosted. Concatenate the two values by separating them with an \"at sign\" (<b>@</b>). <br><br>The ID of the promotion (<b>promotionId</b>) is a unique eBay-assigned value that's generated when the promotion is created. The Marketplace ID is the ENUM value of eBay marketplace where the promotion is hosted. <br><br><b>Example:</b> <code>1********5@EBAY_US</code> (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['promotion_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method resume_promotion" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'promotion_id' is set
if ('promotion_id' not in params or
params['promotion_id'] is None):
raise ValueError("Missing the required parameter `promotion_id` when calling `resume_promotion`") # noqa: E501
collection_formats = {}
path_params = {}
if 'promotion_id' in params:
path_params['promotion_id'] = params['promotion_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/promotion/{promotion_id}/resume', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 84.087444
| 2,092
| 0.687918
| 5,412
| 37,503
| 4.66796
| 0.084257
| 0.020583
| 0.0095
| 0.010133
| 0.908206
| 0.904366
| 0.897597
| 0.893401
| 0.892333
| 0.886474
| 0
| 0.01091
| 0.205664
| 37,503
| 445
| 2,093
| 84.276404
| 0.837127
| 0.690451
| 0
| 0.759657
| 0
| 0
| 0.192293
| 0.037393
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038627
| false
| 0
| 0.017167
| 0
| 0.111588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
76d8cbf800338c9da42a6c927dff877e2e0ea9fe
| 5,319
|
py
|
Python
|
src/python/turicreate/meta/decompiler/tests/test_comprehensions.py
|
pappasG/turicreate
|
494e313957a6c01333628b182a7d5bc6efea18f8
|
[
"BSD-3-Clause"
] | 2
|
2019-02-08T08:45:27.000Z
|
2020-09-07T05:55:18.000Z
|
src/python/turicreate/meta/decompiler/tests/test_comprehensions.py
|
pappasG/turicreate
|
494e313957a6c01333628b182a7d5bc6efea18f8
|
[
"BSD-3-Clause"
] | 3
|
2022-02-15T04:42:24.000Z
|
2022-03-12T01:05:15.000Z
|
src/python/turicreate/meta/decompiler/tests/test_comprehensions.py
|
pappasG/turicreate
|
494e313957a6c01333628b182a7d5bc6efea18f8
|
[
"BSD-3-Clause"
] | 1
|
2019-11-23T09:47:24.000Z
|
2019-11-23T09:47:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
'''
Created on Nov 6, 2011
@author: sean
'''
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import unittest
from ...decompiler.tests import Base
class ListComprehension(Base):
def test_comp1(self):
stmnt = '[a for b in c]'
self.statement(stmnt)
def test_comp2(self):
stmnt = '[a() +1 for b in c]'
self.statement(stmnt)
def test_comp3(self):
stmnt = 'y = [a() +1 for b in c]'
self.statement(stmnt)
def test_comp_ifs(self):
stmnt = 'y = [a() +1 for b in c if asdf]'
self.statement(stmnt)
def test_comp_ifs1(self):
stmnt = 'y = [a() +1 for b in c if asdf if asd]'
self.statement(stmnt)
def test_comp_ifs2(self):
stmnt = 'y = [a() +1 for b in c if asdf if not asd]'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp1(self):
stmnt = '[a for b in c for d in e]'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp2(self):
stmnt = '[a() +1 for b in c for d in e]'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp3(self):
stmnt = 'y = [a() +1 for b in c for d in e]'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs(self):
stmnt = 'y = [a() +1 for b in c if asdf for d in e]'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs1(self):
stmnt = 'y = [a() +1 for b in c if asdf if asd for d in e if this]'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs2(self):
stmnt = 'y = [a() +1 for b in c for d in e if adsf]'
self.statement(stmnt)
class SetComprehension(Base):
def test_comp1(self):
stmnt = '{a for b in c}'
self.statement(stmnt)
def test_comp2(self):
stmnt = '{a() +1 for b in c}'
self.statement(stmnt)
def test_comp3(self):
stmnt = 'y = {a() +1 for b in c}'
self.statement(stmnt)
def test_comp_ifs(self):
stmnt = 'y = {a() +1 for b in c if asdf}'
self.statement(stmnt)
def test_comp_ifs1(self):
stmnt = 'y = {a() +1 for b in c if asdf if asd}'
self.statement(stmnt)
def test_comp_ifs2(self):
stmnt = 'y = {a() +1 for b in c if asdf if not asd}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp1(self):
stmnt = '{a for b in c for d in e}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp2(self):
stmnt = '{a() +1 for b in c for d in e}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp3(self):
stmnt = 'y = {a() +1 for b in c for d in e}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs(self):
stmnt = 'y = {a() +1 for b in c if asdf for d in e}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs1(self):
stmnt = 'y = {a() +1 for b in c if asdf if asd for d in e if this}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs2(self):
stmnt = 'y = {a() +1 for b in c for d in e if adsf}'
self.statement(stmnt)
class DictComprehension(Base):
def test_comp1(self):
stmnt = '{a:q for b in c}'
self.statement(stmnt)
def test_comp2(self):
stmnt = '{a() +1:q for b in c}'
self.statement(stmnt)
def test_comp3(self):
stmnt = 'y = {a() +1:q for b in c}'
self.statement(stmnt)
def test_comp_ifs(self):
stmnt = 'y = {a() +1:q for b in c if asdf}'
self.statement(stmnt)
def test_comp_ifs1(self):
stmnt = 'y = {a() +1:q for b in c if asdf if asd}'
self.statement(stmnt)
def test_comp_ifs2(self):
stmnt = 'y = {a() +1:q for b in c if asdf if not asd}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp1(self):
stmnt = '{a:q for b in c for d in e}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp2(self):
stmnt = '{a():q +1 for b in c for d in e}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp3(self):
stmnt = 'y = {a() +1:q for b in c for d in e}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs(self):
stmnt = 'y = {a() +1:q for b in c if asdf for d in e}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs1(self):
stmnt = 'y = {a() +1:q for b in c if asdf if asd for d in e if this}'
self.statement(stmnt)
@unittest.expectedFailure
def test_multi_comp_ifs2(self):
stmnt = 'y = {a() +1:q for b in c for d in e if adsf}'
self.statement(stmnt)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 27.848168
| 85
| 0.595789
| 834
| 5,319
| 3.684652
| 0.111511
| 0.082005
| 0.07029
| 0.082005
| 0.858119
| 0.858119
| 0.858119
| 0.85454
| 0.85454
| 0.849658
| 0
| 0.018967
| 0.286332
| 5,319
| 190
| 86
| 27.994737
| 0.790306
| 0.057718
| 0
| 0.661765
| 0
| 0.022059
| 0.244649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.264706
| false
| 0
| 0.036765
| 0
| 0.323529
| 0.007353
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
76fd94b9430ed0128ef8ba0ca29f558d0b0a1311
| 423
|
py
|
Python
|
CCC/ccc01j1.py
|
crackersamdjam/DMOJ-Solutions
|
97992566595e2c7bf41b5da9217d8ef61bdd1d71
|
[
"MIT"
] | null | null | null |
CCC/ccc01j1.py
|
crackersamdjam/DMOJ-Solutions
|
97992566595e2c7bf41b5da9217d8ef61bdd1d71
|
[
"MIT"
] | null | null | null |
CCC/ccc01j1.py
|
crackersamdjam/DMOJ-Solutions
|
97992566595e2c7bf41b5da9217d8ef61bdd1d71
|
[
"MIT"
] | null | null | null |
n = int(input())
for i in range(1, n+1, 2):
for j in range(i):
print("*", end="")
for j in range(n+n-i-i):
print(" ", end="")
for j in range(i):
print("*", end="")
print()
for i in range(n-2, 0, -2):
for j in range(i):
print("*", end="")
for j in range(n+n-i-i):
print(" ", end="")
for j in range(i):
print("*", end="")
print()
| 24.882353
| 29
| 0.413712
| 69
| 423
| 2.536232
| 0.188406
| 0.32
| 0.205714
| 0.377143
| 0.788571
| 0.788571
| 0.788571
| 0.788571
| 0.788571
| 0.788571
| 0
| 0.021978
| 0.35461
| 423
| 17
| 30
| 24.882353
| 0.619048
| 0
| 0
| 0.823529
| 0
| 0
| 0.014706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.470588
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
0a0330d9ae37f832164caf3efc632b4ffb9d8af7
| 59
|
py
|
Python
|
src/python/some_file.py
|
davidjstevenson/cpp-embedded-python
|
798e99bde8bb969fda18335f36e286e2dfcbf9e2
|
[
"MIT"
] | null | null | null |
src/python/some_file.py
|
davidjstevenson/cpp-embedded-python
|
798e99bde8bb969fda18335f36e286e2dfcbf9e2
|
[
"MIT"
] | null | null | null |
src/python/some_file.py
|
davidjstevenson/cpp-embedded-python
|
798e99bde8bb969fda18335f36e286e2dfcbf9e2
|
[
"MIT"
] | null | null | null |
def some_file():
print("some_file.py:some_file()")
| 14.75
| 38
| 0.627119
| 9
| 59
| 3.777778
| 0.555556
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186441
| 59
| 3
| 39
| 19.666667
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0.436364
| 0.436364
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
0a2ee86ecd7e85c1e9bb767099c6638e0aa65e4b
| 9,345
|
py
|
Python
|
src/genie/libs/parser/iosxr/tests/ShowRouteIpv6/cli/equal/golden_outpu_4_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxr/tests/ShowRouteIpv6/cli/equal/golden_outpu_4_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxr/tests/ShowRouteIpv6/cli/equal/golden_outpu_4_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
'vrf': {
'default': {
'address_family': {
'ipv6': {
'routes': {
'2001:0:10:204:0:30:0:2/128': {
'active': True,
'next_hop': {
'outgoing_interface': {
'Bundle-Ether10': {
'outgoing_interface': 'Bundle-Ether10',
'updated': '00:54:06'
}
}
},
'route': '2001:0:10:204:0:30:0:2/128',
'source_protocol': 'local',
'source_protocol_codes': 'L'
},
'2001:0:10:204:0:30::/126': {
'active': True,
'next_hop': {
'outgoing_interface': {
'Bundle-Ether10': {
'outgoing_interface': 'Bundle-Ether10',
'updated': '00:54:06'
}
}
},
'route': '2001:0:10:204:0:30::/126',
'source_protocol': 'connected',
'source_protocol_codes': 'C'
},
'2001:0:10:204:0:33::/126': {
'active': True,
'metric': 11,
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': 'fe80::226:88ff:fe55:6f17',
'outgoing_interface': 'TenGigE0/0/0/1',
'updated': '00:53:18'
}
}
},
'route': '2001:0:10:204:0:33::/126',
'route_preference': 115,
'source_protocol': 'isis',
'source_protocol_codes': 'i '
'L2'
},
'2001:db8:1b7f:8e5c::8/128': {
'active': True,
'metric': 11,
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': 'fe80::226:88ff:fe55:6f17',
'outgoing_interface': 'TenGigE0/0/0/1',
'updated': '00:53:18'
}
}
},
'route': '2001:db8:1b7f:8e5c::8/128',
'route_preference': 115,
'source_protocol': 'isis',
'source_protocol_codes': 'i '
'L2'
},
'2001:db8:4:4::1/128': {
'active': True,
'next_hop': {
'outgoing_interface': {
'Loopback60': {
'outgoing_interface': 'Loopback60',
'updated': '00:54:19'
}
}
},
'route': '2001:db8:4:4::1/128',
'source_protocol': 'local',
'source_protocol_codes': 'L'
},
'::/0': {
'active': True,
'metric': 11,
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': 'fe80::226:88ff:fe55:6f17',
'outgoing_interface': 'TenGigE0/0/0/1',
'updated': '00:00:10'
}
}
},
'route': '::/0',
'route_preference': 115,
'source_protocol': 'isis',
'source_protocol_codes': 'i* '
'L2'
},
'fc00:a0:1:216::1/128': {
'active': True,
'metric': 20,
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': 'fe80::464c:a8ff:fe96:a25f',
'outgoing_interface': 'Bundle-Ether10',
'updated': '00:53:55'
}
}
},
'route': 'fc00:a0:1:216::1/128',
'route_preference': 115,
'source_protocol': 'isis',
'source_protocol_codes': 'i '
'L2'
},
'fc00:a0:1::/64': {
'active': True,
'next_hop': {
'outgoing_interface': {
'TenGigE0/0/0/0': {
'outgoing_interface': 'TenGigE0/0/0/0',
'updated': '00:54:18'
}
}
},
'route': 'fc00:a0:1::/64',
'source_protocol': 'connected',
'source_protocol_codes': 'C'
},
'fc00:a0:1::2/128': {
'active': True,
'next_hop': {
'outgoing_interface': {
'TenGigE0/0/0/0': {
'outgoing_interface': 'TenGigE0/0/0/0',
'updated': '00:54:18'
}
}
},
'route': 'fc00:a0:1::2/128',
'source_protocol': 'local',
'source_protocol_codes': 'L'
},
'fc00:a0:2::/64': {
'active': True,
'metric': 11,
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': 'fe80::226:88ff:fe55:6f17',
'outgoing_interface': 'TenGigE0/0/0/1',
'updated': '00:53:18'
}
}
},
'route': 'fc00:a0:2::/64',
'route_preference': 115,
'source_protocol': 'isis',
'source_protocol_codes': 'i '
'L2'
},
'fc00:a0:5::/64': {
'active': True,
'next_hop': {
'outgoing_interface': {
'TenGigE0/0/0/1': {
'outgoing_interface': 'TenGigE0/0/0/1',
'updated': '00:54:18'
}
}
},
'route': 'fc00:a0:5::/64',
'source_protocol': 'connected',
'source_protocol_codes': 'C'
},
'fc00:a0:5::2/128': {
'active': True,
'next_hop': {
'outgoing_interface': {
'TenGigE0/0/0/1': {
'outgoing_interface': 'TenGigE0/0/0/1',
'updated': '00:54:18'
}
}
},
'route': 'fc00:a0:5::2/128',
'source_protocol': 'local',
'source_protocol_codes': 'L'
}
}
}
},
'last_resort': {
'gateway': 'fe80::226:88ff:fe55:6f17',
'to_network': '::'
}
}
}
}
| 44.080189
| 79
| 0.243018
| 531
| 9,345
| 4.105461
| 0.146893
| 0.154128
| 0.104587
| 0.143119
| 0.907798
| 0.898165
| 0.855046
| 0.796789
| 0.777523
| 0.709633
| 0
| 0.147372
| 0.647833
| 9,345
| 211
| 80
| 44.2891
| 0.515041
| 0
| 0
| 0.54067
| 0
| 0
| 0.258589
| 0.063684
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a602ac85f69e9daef9f41df0dd1311b88d2e305
| 4,523
|
py
|
Python
|
misc/data_loader.py
|
minfanzhang/noisy-K-FAC
|
5c7dd24d09ec13bce0f427f38324d6d684f0e998
|
[
"Apache-2.0"
] | null | null | null |
misc/data_loader.py
|
minfanzhang/noisy-K-FAC
|
5c7dd24d09ec13bce0f427f38324d6d684f0e998
|
[
"Apache-2.0"
] | null | null | null |
misc/data_loader.py
|
minfanzhang/noisy-K-FAC
|
5c7dd24d09ec13bce0f427f38324d6d684f0e998
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torchvision
import torchvision.transforms as transforms
class Flatten(object):
def __call__(self, tensor):
return tensor.view(-1)
def __repr__(self):
return self.__class__.__name__
class Transpose(object):
def __call__(self, tensor):
return tensor.permute(1, 2, 0)
def __repr__(self):
return self.__class__.__name__
def load_pytorch(config, batch_size=None):
if config.dataset == 'cifar10':
if config.data_aug:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
Transpose()
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
Transpose()
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
Transpose()
])
trainset = torchvision.datasets.CIFAR10(root=config.data_path, train=True, download=True, transform=train_transform)
testset = torchvision.datasets.CIFAR10(root=config.data_path, train=False, download=True, transform=test_transform)
elif config.dataset == 'cifar100':
if config.data_aug:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
Transpose()
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
Transpose()
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
Transpose()
])
trainset = torchvision.datasets.CIFAR10(root=config.data_path, train=True, download=True, transform=train_transform)
testset = torchvision.datasets.CIFAR10(root=config.data_path, train=False, download=True, transform=test_transform)
elif config.dataset == 'mnist':
transform = transforms.Compose([
transforms.ToTensor(),
Flatten(),
])
trainset = torchvision.datasets.MNIST(root=config.data_path, train=True, download=True, transform=transform)
testset = torchvision.datasets.MNIST(root=config.data_path, train=False, download=True, transform=transform)
elif config.dataset == 'fmnist':
transform = transforms.Compose([
transforms.ToTensor(),
Flatten(),
])
trainset = torchvision.datasets.FashionMNIST(root=config.data_path, train=True, download=True, transform=transform)
testset = torchvision.datasets.FashionMNIST(root=config.data_path, train=False, download=True, transform=transform)
else:
raise ValueError("Unsupported dataset!")
if batch_size:
config.batch_size = batch_size
if config.check_grad:
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
drop_last=True)
else:
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
drop_last=True)
testloader = torch.utils.data.DataLoader(testset,
batch_size=config.test_batch_size,
shuffle=False,
num_workers=config.num_workers)
return trainloader, testloader
| 43.490385
| 124
| 0.563343
| 430
| 4,523
| 5.755814
| 0.193023
| 0.036364
| 0.08404
| 0.116364
| 0.854949
| 0.845253
| 0.845253
| 0.792727
| 0.758788
| 0.664242
| 0
| 0.067397
| 0.33407
| 4,523
| 103
| 125
| 43.912621
| 0.754316
| 0
| 0
| 0.731183
| 0
| 0
| 0.01017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053763
| false
| 0
| 0.032258
| 0.043011
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6ab0c1109fc2e04eead391dc4bb1e7056cf708c3
| 165
|
py
|
Python
|
Utils/__init__.py
|
whkwls2653/Pytorch_Face_Recognition-
|
60f3849def589957d9080457a1a9833112a71f6c
|
[
"MIT"
] | 62
|
2020-08-26T05:42:39.000Z
|
2022-03-31T04:25:50.000Z
|
Utils/__init__.py
|
whkwls2653/Pytorch_Face_Recognition-
|
60f3849def589957d9080457a1a9833112a71f6c
|
[
"MIT"
] | 10
|
2020-08-27T06:46:10.000Z
|
2021-09-29T03:36:07.000Z
|
Utils/__init__.py
|
whkwls2653/Pytorch_Face_Recognition-
|
60f3849def589957d9080457a1a9833112a71f6c
|
[
"MIT"
] | 13
|
2020-08-30T00:27:37.000Z
|
2021-12-09T02:56:07.000Z
|
from Utils.Other_Utils.ChangeTimeFormat import ChangeTimeFormat
from Utils.Other_Utils.Logging import init_logger
from Utils.Other_Utils.Visualizer import Visualizer
| 55
| 63
| 0.89697
| 22
| 165
| 6.545455
| 0.409091
| 0.1875
| 0.291667
| 0.395833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 165
| 3
| 64
| 55
| 0.935065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6acc1cb544307572f81b72c04516b2bd92e9cf19
| 374,355
|
py
|
Python
|
sdk/metricsadvisor/azure-ai-metricsadvisor/azure/ai/metricsadvisor/_generated/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/metricsadvisor/azure-ai-metricsadvisor/azure/ai/metricsadvisor/_generated/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/metricsadvisor/azure-ai-metricsadvisor/azure/ai/metricsadvisor/_generated/models/_models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AlertingResultQuery(msrest.serialization.Model):
"""AlertingResultQuery.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. start time.
:type start_time: ~datetime.datetime
:param end_time: Required. end time.
:type end_time: ~datetime.datetime
:param time_mode: Required. time mode. Possible values include: "AnomalyTime", "CreatedTime",
"ModifiedTime".
:type time_mode: str or ~azure.ai.metricsadvisor.models.TimeMode
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
'time_mode': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_mode': {'key': 'timeMode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AlertingResultQuery, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.time_mode = kwargs['time_mode']
class AlertResult(msrest.serialization.Model):
"""AlertResult.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar alert_id: alert id.
:vartype alert_id: str
:ivar timestamp: anomaly time.
:vartype timestamp: ~datetime.datetime
:ivar created_time: created time.
:vartype created_time: ~datetime.datetime
:ivar modified_time: modified time.
:vartype modified_time: ~datetime.datetime
"""
_validation = {
'alert_id': {'readonly': True},
'timestamp': {'readonly': True},
'created_time': {'readonly': True},
'modified_time': {'readonly': True},
}
_attribute_map = {
'alert_id': {'key': 'alertId', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(AlertResult, self).__init__(**kwargs)
self.alert_id = None
self.timestamp = None
self.created_time = None
self.modified_time = None
class AlertResultList(msrest.serialization.Model):
"""AlertResultList.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar next_link:
:vartype next_link: str
:param value: Required.
:type value: list[~azure.ai.metricsadvisor.models.AlertResult]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[AlertResult]'},
}
def __init__(
self,
**kwargs
):
super(AlertResultList, self).__init__(**kwargs)
self.next_link = None
self.value = kwargs['value']
class AlertSnoozeCondition(msrest.serialization.Model):
"""AlertSnoozeCondition.
All required parameters must be populated in order to send to Azure.
:param auto_snooze: Required. snooze point count, value range : [0, +∞).
:type auto_snooze: int
:param snooze_scope: Required. snooze scope. Possible values include: "Metric", "Series".
:type snooze_scope: str or ~azure.ai.metricsadvisor.models.SnoozeScope
:param only_for_successive: Required. only snooze for successive anomalies.
:type only_for_successive: bool
"""
_validation = {
'auto_snooze': {'required': True},
'snooze_scope': {'required': True},
'only_for_successive': {'required': True},
}
_attribute_map = {
'auto_snooze': {'key': 'autoSnooze', 'type': 'int'},
'snooze_scope': {'key': 'snoozeScope', 'type': 'str'},
'only_for_successive': {'key': 'onlyForSuccessive', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(AlertSnoozeCondition, self).__init__(**kwargs)
self.auto_snooze = kwargs['auto_snooze']
self.snooze_scope = kwargs['snooze_scope']
self.only_for_successive = kwargs['only_for_successive']
class AnomalyAlertingConfiguration(msrest.serialization.Model):
"""AnomalyAlertingConfiguration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar anomaly_alerting_configuration_id: anomaly alerting configuration unique id.
:vartype anomaly_alerting_configuration_id: str
:param name: Required. anomaly alerting configuration name.
:type name: str
:param description: anomaly alerting configuration description.
:type description: str
:param cross_metrics_operator: cross metrics operator
should be specified when setting up multiple metric alerting configurations. Possible values
include: "AND", "OR", "XOR".
:type cross_metrics_operator: str or
~azure.ai.metricsadvisor.models.AnomalyAlertingConfigurationLogicType
:param split_alert_by_dimensions: dimensions used to split alert.
:type split_alert_by_dimensions: list[str]
:param hook_ids: Required. hook unique ids.
:type hook_ids: list[str]
:param metric_alerting_configurations: Required. Anomaly alerting configurations.
:type metric_alerting_configurations:
list[~azure.ai.metricsadvisor.models.MetricAlertingConfiguration]
"""
_validation = {
'anomaly_alerting_configuration_id': {'readonly': True},
'name': {'required': True},
'split_alert_by_dimensions': {'unique': True},
'hook_ids': {'required': True, 'unique': True},
'metric_alerting_configurations': {'required': True, 'unique': True},
}
_attribute_map = {
'anomaly_alerting_configuration_id': {'key': 'anomalyAlertingConfigurationId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'cross_metrics_operator': {'key': 'crossMetricsOperator', 'type': 'str'},
'split_alert_by_dimensions': {'key': 'splitAlertByDimensions', 'type': '[str]'},
'hook_ids': {'key': 'hookIds', 'type': '[str]'},
'metric_alerting_configurations': {'key': 'metricAlertingConfigurations', 'type': '[MetricAlertingConfiguration]'},
}
def __init__(
self,
**kwargs
):
super(AnomalyAlertingConfiguration, self).__init__(**kwargs)
self.anomaly_alerting_configuration_id = None
self.name = kwargs['name']
self.description = kwargs.get('description', "")
self.cross_metrics_operator = kwargs.get('cross_metrics_operator', None)
self.split_alert_by_dimensions = kwargs.get('split_alert_by_dimensions', None)
self.hook_ids = kwargs['hook_ids']
self.metric_alerting_configurations = kwargs['metric_alerting_configurations']
class AnomalyAlertingConfigurationList(msrest.serialization.Model):
"""AnomalyAlertingConfigurationList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.AnomalyAlertingConfiguration]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AnomalyAlertingConfiguration]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AnomalyAlertingConfigurationList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class AnomalyAlertingConfigurationPatch(msrest.serialization.Model):
"""AnomalyAlertingConfigurationPatch.
:param name: Anomaly alerting configuration name.
:type name: str
:param description: anomaly alerting configuration description.
:type description: str
:param cross_metrics_operator: cross metrics operator. Possible values include: "AND", "OR",
"XOR".
:type cross_metrics_operator: str or
~azure.ai.metricsadvisor.models.AnomalyAlertingConfigurationLogicType
:param split_alert_by_dimensions: dimensions used to split alert.
:type split_alert_by_dimensions: list[str]
:param hook_ids: hook unique ids.
:type hook_ids: list[str]
:param metric_alerting_configurations: Anomaly alerting configurations.
:type metric_alerting_configurations:
list[~azure.ai.metricsadvisor.models.MetricAlertingConfiguration]
"""
_validation = {
'split_alert_by_dimensions': {'unique': True},
'hook_ids': {'unique': True},
'metric_alerting_configurations': {'unique': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'cross_metrics_operator': {'key': 'crossMetricsOperator', 'type': 'str'},
'split_alert_by_dimensions': {'key': 'splitAlertByDimensions', 'type': '[str]'},
'hook_ids': {'key': 'hookIds', 'type': '[str]'},
'metric_alerting_configurations': {'key': 'metricAlertingConfigurations', 'type': '[MetricAlertingConfiguration]'},
}
def __init__(
self,
**kwargs
):
super(AnomalyAlertingConfigurationPatch, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', "")
self.cross_metrics_operator = kwargs.get('cross_metrics_operator', None)
self.split_alert_by_dimensions = kwargs.get('split_alert_by_dimensions', None)
self.hook_ids = kwargs.get('hook_ids', None)
self.metric_alerting_configurations = kwargs.get('metric_alerting_configurations', None)
class AnomalyDetectionConfiguration(msrest.serialization.Model):
"""AnomalyDetectionConfiguration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar anomaly_detection_configuration_id: anomaly detection configuration unique id.
:vartype anomaly_detection_configuration_id: str
:param name: Required. anomaly detection configuration name.
:type name: str
:param description: anomaly detection configuration description.
:type description: str
:param metric_id: Required. metric unique id.
:type metric_id: str
:param whole_metric_configuration: Required.
:type whole_metric_configuration: ~azure.ai.metricsadvisor.models.WholeMetricConfiguration
:param dimension_group_override_configurations: detection configuration for series group.
:type dimension_group_override_configurations:
list[~azure.ai.metricsadvisor.models.DimensionGroupConfiguration]
:param series_override_configurations: detection configuration for specific series.
:type series_override_configurations: list[~azure.ai.metricsadvisor.models.SeriesConfiguration]
"""
_validation = {
'anomaly_detection_configuration_id': {'readonly': True},
'name': {'required': True},
'metric_id': {'required': True},
'whole_metric_configuration': {'required': True},
'dimension_group_override_configurations': {'unique': True},
'series_override_configurations': {'unique': True},
}
_attribute_map = {
'anomaly_detection_configuration_id': {'key': 'anomalyDetectionConfigurationId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'whole_metric_configuration': {'key': 'wholeMetricConfiguration', 'type': 'WholeMetricConfiguration'},
'dimension_group_override_configurations': {'key': 'dimensionGroupOverrideConfigurations', 'type': '[DimensionGroupConfiguration]'},
'series_override_configurations': {'key': 'seriesOverrideConfigurations', 'type': '[SeriesConfiguration]'},
}
def __init__(
self,
**kwargs
):
super(AnomalyDetectionConfiguration, self).__init__(**kwargs)
self.anomaly_detection_configuration_id = None
self.name = kwargs['name']
self.description = kwargs.get('description', "")
self.metric_id = kwargs['metric_id']
self.whole_metric_configuration = kwargs['whole_metric_configuration']
self.dimension_group_override_configurations = kwargs.get('dimension_group_override_configurations', None)
self.series_override_configurations = kwargs.get('series_override_configurations', None)
class AnomalyDetectionConfigurationList(msrest.serialization.Model):
"""AnomalyDetectionConfigurationList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.AnomalyDetectionConfiguration]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AnomalyDetectionConfiguration]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AnomalyDetectionConfigurationList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class AnomalyDetectionConfigurationPatch(msrest.serialization.Model):
"""AnomalyDetectionConfigurationPatch.
:param name: anomaly detection configuration name.
:type name: str
:param description: anomaly detection configuration description.
:type description: str
:param whole_metric_configuration:
:type whole_metric_configuration: ~azure.ai.metricsadvisor.models.WholeMetricConfigurationPatch
:param dimension_group_override_configurations: detection configuration for series group.
:type dimension_group_override_configurations:
list[~azure.ai.metricsadvisor.models.DimensionGroupConfiguration]
:param series_override_configurations: detection configuration for specific series.
:type series_override_configurations: list[~azure.ai.metricsadvisor.models.SeriesConfiguration]
"""
_validation = {
'dimension_group_override_configurations': {'unique': True},
'series_override_configurations': {'unique': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'whole_metric_configuration': {'key': 'wholeMetricConfiguration', 'type': 'WholeMetricConfigurationPatch'},
'dimension_group_override_configurations': {'key': 'dimensionGroupOverrideConfigurations', 'type': '[DimensionGroupConfiguration]'},
'series_override_configurations': {'key': 'seriesOverrideConfigurations', 'type': '[SeriesConfiguration]'},
}
def __init__(
self,
**kwargs
):
super(AnomalyDetectionConfigurationPatch, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', "")
self.whole_metric_configuration = kwargs.get('whole_metric_configuration', None)
self.dimension_group_override_configurations = kwargs.get('dimension_group_override_configurations', None)
self.series_override_configurations = kwargs.get('series_override_configurations', None)
class AnomalyDimensionList(msrest.serialization.Model):
"""AnomalyDimensionList.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar next_link:
:vartype next_link: str
:param value: Required.
:type value: list[str]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AnomalyDimensionList, self).__init__(**kwargs)
self.next_link = None
self.value = kwargs['value']
class AnomalyDimensionQuery(msrest.serialization.Model):
"""AnomalyDimensionQuery.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. start time.
:type start_time: ~datetime.datetime
:param end_time: Required. end time.
:type end_time: ~datetime.datetime
:param dimension_name: Required. dimension to query.
:type dimension_name: str
:param dimension_filter:
:type dimension_filter: ~azure.ai.metricsadvisor.models.DimensionGroupIdentity
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
'dimension_name': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'dimension_name': {'key': 'dimensionName', 'type': 'str'},
'dimension_filter': {'key': 'dimensionFilter', 'type': 'DimensionGroupIdentity'},
}
def __init__(
self,
**kwargs
):
super(AnomalyDimensionQuery, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.dimension_name = kwargs['dimension_name']
self.dimension_filter = kwargs.get('dimension_filter', None)
class MetricFeedback(msrest.serialization.Model):
"""MetricFeedback.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AnomalyFeedback, ChangePointFeedback, CommentFeedback, PeriodFeedback.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param feedback_type: Required. feedback type.Constant filled by server. Possible values
include: "Anomaly", "ChangePoint", "Period", "Comment".
:type feedback_type: str or ~azure.ai.metricsadvisor.models.FeedbackType
:ivar feedback_id: feedback unique id.
:vartype feedback_id: str
:ivar created_time: feedback created time.
:vartype created_time: ~datetime.datetime
:ivar user_principal: user who gives this feedback.
:vartype user_principal: str
:param metric_id: Required. metric unique id.
:type metric_id: str
:param dimension_filter: Required.
:type dimension_filter: ~azure.ai.metricsadvisor.models.FeedbackDimensionFilter
"""
_validation = {
'feedback_type': {'required': True},
'feedback_id': {'readonly': True},
'created_time': {'readonly': True},
'user_principal': {'readonly': True},
'metric_id': {'required': True},
'dimension_filter': {'required': True},
}
_attribute_map = {
'feedback_type': {'key': 'feedbackType', 'type': 'str'},
'feedback_id': {'key': 'feedbackId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'user_principal': {'key': 'userPrincipal', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'dimension_filter': {'key': 'dimensionFilter', 'type': 'FeedbackDimensionFilter'},
}
_subtype_map = {
'feedback_type': {'Anomaly': 'AnomalyFeedback', 'ChangePoint': 'ChangePointFeedback', 'Comment': 'CommentFeedback', 'Period': 'PeriodFeedback'}
}
def __init__(
self,
**kwargs
):
super(MetricFeedback, self).__init__(**kwargs)
self.feedback_type = None # type: Optional[str]
self.feedback_id = None
self.created_time = None
self.user_principal = None
self.metric_id = kwargs['metric_id']
self.dimension_filter = kwargs['dimension_filter']
class AnomalyFeedback(MetricFeedback):
"""AnomalyFeedback.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param feedback_type: Required. feedback type.Constant filled by server. Possible values
include: "Anomaly", "ChangePoint", "Period", "Comment".
:type feedback_type: str or ~azure.ai.metricsadvisor.models.FeedbackType
:ivar feedback_id: feedback unique id.
:vartype feedback_id: str
:ivar created_time: feedback created time.
:vartype created_time: ~datetime.datetime
:ivar user_principal: user who gives this feedback.
:vartype user_principal: str
:param metric_id: Required. metric unique id.
:type metric_id: str
:param dimension_filter: Required.
:type dimension_filter: ~azure.ai.metricsadvisor.models.FeedbackDimensionFilter
:param start_time: Required. the start timestamp of feedback time range.
:type start_time: ~datetime.datetime
:param end_time: Required. the end timestamp of feedback time range, when equals to startTime
means only one timestamp.
:type end_time: ~datetime.datetime
:param value: Required.
:type value: ~azure.ai.metricsadvisor.models.AnomalyFeedbackValue
:param anomaly_detection_configuration_id: the corresponding anomaly detection configuration of
this feedback.
:type anomaly_detection_configuration_id: str
:param anomaly_detection_configuration_snapshot:
:type anomaly_detection_configuration_snapshot:
~azure.ai.metricsadvisor.models.AnomalyDetectionConfiguration
"""
_validation = {
'feedback_type': {'required': True},
'feedback_id': {'readonly': True},
'created_time': {'readonly': True},
'user_principal': {'readonly': True},
'metric_id': {'required': True},
'dimension_filter': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'feedback_type': {'key': 'feedbackType', 'type': 'str'},
'feedback_id': {'key': 'feedbackId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'user_principal': {'key': 'userPrincipal', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'dimension_filter': {'key': 'dimensionFilter', 'type': 'FeedbackDimensionFilter'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'AnomalyFeedbackValue'},
'anomaly_detection_configuration_id': {'key': 'anomalyDetectionConfigurationId', 'type': 'str'},
'anomaly_detection_configuration_snapshot': {'key': 'anomalyDetectionConfigurationSnapshot', 'type': 'AnomalyDetectionConfiguration'},
}
def __init__(
self,
**kwargs
):
super(AnomalyFeedback, self).__init__(**kwargs)
self.feedback_type = 'Anomaly' # type: str
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.value = kwargs['value']
self.anomaly_detection_configuration_id = kwargs.get('anomaly_detection_configuration_id', None)
self.anomaly_detection_configuration_snapshot = kwargs.get('anomaly_detection_configuration_snapshot', None)
class AnomalyFeedbackValue(msrest.serialization.Model):
"""AnomalyFeedbackValue.
All required parameters must be populated in order to send to Azure.
:param anomaly_value: Required. Possible values include: "AutoDetect", "Anomaly",
"NotAnomaly".
:type anomaly_value: str or ~azure.ai.metricsadvisor.models.AnomalyValue
"""
_validation = {
'anomaly_value': {'required': True},
}
_attribute_map = {
'anomaly_value': {'key': 'anomalyValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AnomalyFeedbackValue, self).__init__(**kwargs)
self.anomaly_value = kwargs['anomaly_value']
class AnomalyProperty(msrest.serialization.Model):
"""AnomalyProperty.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param anomaly_severity: Required. anomaly severity. Possible values include: "Low", "Medium",
"High".
:type anomaly_severity: str or ~azure.ai.metricsadvisor.models.Severity
:ivar anomaly_status: anomaly status
only return for alerting anomaly result. Possible values include: "Active", "Resolved".
:vartype anomaly_status: str or ~azure.ai.metricsadvisor.models.AnomalyStatus
:ivar value: value of the anomaly.
:vartype value: float
:ivar expected_value: expected value of the anomaly given by smart detector.
:vartype expected_value: float
"""
_validation = {
'anomaly_severity': {'required': True},
'anomaly_status': {'readonly': True},
'value': {'readonly': True},
'expected_value': {'readonly': True},
}
_attribute_map = {
'anomaly_severity': {'key': 'anomalySeverity', 'type': 'str'},
'anomaly_status': {'key': 'anomalyStatus', 'type': 'str'},
'value': {'key': 'value', 'type': 'float'},
'expected_value': {'key': 'expectedValue', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(AnomalyProperty, self).__init__(**kwargs)
self.anomaly_severity = kwargs['anomaly_severity']
self.anomaly_status = None
self.value = None
self.expected_value = None
class AnomalyResult(msrest.serialization.Model):
"""AnomalyResult.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar data_feed_id: data feed unique id
only return for alerting anomaly result.
:vartype data_feed_id: str
:ivar metric_id: metric unique id
only return for alerting anomaly result.
:vartype metric_id: str
:ivar anomaly_detection_configuration_id: anomaly detection configuration unique id
only return for alerting anomaly result.
:vartype anomaly_detection_configuration_id: str
:param timestamp: Required. anomaly time.
:type timestamp: ~datetime.datetime
:ivar created_time: created time
only return for alerting result.
:vartype created_time: ~datetime.datetime
:ivar modified_time: modified time
only return for alerting result.
:vartype modified_time: ~datetime.datetime
:param dimension: Required. dimension specified for series.
:type dimension: dict[str, str]
:param property: Required.
:type property: ~azure.ai.metricsadvisor.models.AnomalyProperty
"""
_validation = {
'data_feed_id': {'readonly': True},
'metric_id': {'readonly': True},
'anomaly_detection_configuration_id': {'readonly': True},
'timestamp': {'required': True},
'created_time': {'readonly': True},
'modified_time': {'readonly': True},
'dimension': {'required': True},
'property': {'required': True},
}
_attribute_map = {
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'anomaly_detection_configuration_id': {'key': 'anomalyDetectionConfigurationId', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'dimension': {'key': 'dimension', 'type': '{str}'},
'property': {'key': 'property', 'type': 'AnomalyProperty'},
}
def __init__(
self,
**kwargs
):
super(AnomalyResult, self).__init__(**kwargs)
self.data_feed_id = None
self.metric_id = None
self.anomaly_detection_configuration_id = None
self.timestamp = kwargs['timestamp']
self.created_time = None
self.modified_time = None
self.dimension = kwargs['dimension']
self.property = kwargs['property']
class AnomalyResultList(msrest.serialization.Model):
"""AnomalyResultList.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar next_link:
:vartype next_link: str
:param value: Required.
:type value: list[~azure.ai.metricsadvisor.models.AnomalyResult]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[AnomalyResult]'},
}
def __init__(
self,
**kwargs
):
super(AnomalyResultList, self).__init__(**kwargs)
self.next_link = None
self.value = kwargs['value']
class DataFeedDetail(msrest.serialization.Model):
"""DataFeedDetail.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureApplicationInsightsDataFeed, AzureBlobDataFeed, AzureCosmosDBDataFeed, AzureDataExplorerDataFeed, AzureDataLakeStorageGen2DataFeed, AzureEventHubsDataFeed, AzureLogAnalyticsDataFeed, AzureTableDataFeed, InfluxDBDataFeed, MongoDBDataFeed, MySqlDataFeed, PostgreSqlDataFeed, SQLServerDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
}
_subtype_map = {
'data_source_type': {'AzureApplicationInsights': 'AzureApplicationInsightsDataFeed', 'AzureBlob': 'AzureBlobDataFeed', 'AzureCosmosDB': 'AzureCosmosDBDataFeed', 'AzureDataExplorer': 'AzureDataExplorerDataFeed', 'AzureDataLakeStorageGen2': 'AzureDataLakeStorageGen2DataFeed', 'AzureEventHubs': 'AzureEventHubsDataFeed', 'AzureLogAnalytics': 'AzureLogAnalyticsDataFeed', 'AzureTable': 'AzureTableDataFeed', 'InfluxDB': 'InfluxDBDataFeed', 'MongoDB': 'MongoDBDataFeed', 'MySql': 'MySqlDataFeed', 'PostgreSql': 'PostgreSqlDataFeed', 'SqlServer': 'SQLServerDataFeed'}
}
def __init__(
self,
**kwargs
):
super(DataFeedDetail, self).__init__(**kwargs)
self.data_source_type = None # type: Optional[str]
self.data_feed_id = None
self.data_feed_name = kwargs['data_feed_name']
self.data_feed_description = kwargs.get('data_feed_description', "")
self.granularity_name = kwargs['granularity_name']
self.granularity_amount = kwargs.get('granularity_amount', None)
self.metrics = kwargs['metrics']
self.dimension = kwargs.get('dimension', None)
self.timestamp_column = kwargs.get('timestamp_column', "")
self.data_start_from = kwargs['data_start_from']
self.start_offset_in_seconds = kwargs.get('start_offset_in_seconds', 0)
self.max_concurrency = kwargs.get('max_concurrency', -1)
self.min_retry_interval_in_seconds = kwargs.get('min_retry_interval_in_seconds', -1)
self.stop_retry_after_in_seconds = kwargs.get('stop_retry_after_in_seconds', -1)
self.need_rollup = kwargs.get('need_rollup', None)
self.roll_up_method = kwargs.get('roll_up_method', None)
self.roll_up_columns = kwargs.get('roll_up_columns', None)
self.all_up_identification = kwargs.get('all_up_identification', None)
self.fill_missing_point_type = kwargs.get('fill_missing_point_type', None)
self.fill_missing_point_value = kwargs.get('fill_missing_point_value', None)
self.view_mode = kwargs.get('view_mode', None)
self.admins = kwargs.get('admins', None)
self.viewers = kwargs.get('viewers', None)
self.is_admin = None
self.creator = None
self.status = None
self.created_time = None
self.action_link_template = kwargs.get('action_link_template', "")
self.authentication_type = kwargs.get('authentication_type', None)
self.credential_id = kwargs.get('credential_id', None)
class AzureApplicationInsightsDataFeed(DataFeedDetail):
"""AzureApplicationInsightsDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureApplicationInsightsParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureApplicationInsightsParameter'},
}
def __init__(
self,
**kwargs
):
super(AzureApplicationInsightsDataFeed, self).__init__(**kwargs)
self.data_source_type = 'AzureApplicationInsights' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class DataFeedDetailPatch(msrest.serialization.Model):
"""DataFeedDetailPatch.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureApplicationInsightsDataFeedPatch, AzureBlobDataFeedPatch, AzureCosmosDBDataFeedPatch, AzureDataExplorerDataFeedPatch, AzureDataLakeStorageGen2DataFeedPatch, AzureEventHubsDataFeedPatch, AzureLogAnalyticsDataFeedPatch, AzureTableDataFeedPatch, InfluxDBDataFeedPatch, MongoDBDataFeedPatch, MySqlDataFeedPatch, PostgreSqlDataFeedPatch, SQLServerDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
}
_subtype_map = {
'data_source_type': {'AzureApplicationInsights': 'AzureApplicationInsightsDataFeedPatch', 'AzureBlob': 'AzureBlobDataFeedPatch', 'AzureCosmosDB': 'AzureCosmosDBDataFeedPatch', 'AzureDataExplorer': 'AzureDataExplorerDataFeedPatch', 'AzureDataLakeStorageGen2': 'AzureDataLakeStorageGen2DataFeedPatch', 'AzureEventHubs': 'AzureEventHubsDataFeedPatch', 'AzureLogAnalytics': 'AzureLogAnalyticsDataFeedPatch', 'AzureTable': 'AzureTableDataFeedPatch', 'InfluxDB': 'InfluxDBDataFeedPatch', 'MongoDB': 'MongoDBDataFeedPatch', 'MySql': 'MySqlDataFeedPatch', 'PostgreSql': 'PostgreSqlDataFeedPatch', 'SqlServer': 'SQLServerDataFeedPatch'}
}
def __init__(
self,
**kwargs
):
super(DataFeedDetailPatch, self).__init__(**kwargs)
self.data_source_type = None # type: Optional[str]
self.data_feed_name = kwargs.get('data_feed_name', None)
self.data_feed_description = kwargs.get('data_feed_description', None)
self.timestamp_column = kwargs.get('timestamp_column', None)
self.data_start_from = kwargs.get('data_start_from', None)
self.start_offset_in_seconds = kwargs.get('start_offset_in_seconds', None)
self.max_concurrency = kwargs.get('max_concurrency', None)
self.min_retry_interval_in_seconds = kwargs.get('min_retry_interval_in_seconds', None)
self.stop_retry_after_in_seconds = kwargs.get('stop_retry_after_in_seconds', None)
self.need_rollup = kwargs.get('need_rollup', None)
self.roll_up_method = kwargs.get('roll_up_method', None)
self.roll_up_columns = kwargs.get('roll_up_columns', None)
self.all_up_identification = kwargs.get('all_up_identification', None)
self.fill_missing_point_type = kwargs.get('fill_missing_point_type', None)
self.fill_missing_point_value = kwargs.get('fill_missing_point_value', None)
self.view_mode = kwargs.get('view_mode', None)
self.admins = kwargs.get('admins', None)
self.viewers = kwargs.get('viewers', None)
self.status = kwargs.get('status', None)
self.action_link_template = kwargs.get('action_link_template', None)
self.authentication_type = kwargs.get('authentication_type', None)
self.credential_id = kwargs.get('credential_id', None)
class AzureApplicationInsightsDataFeedPatch(DataFeedDetailPatch):
"""AzureApplicationInsightsDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter:
~azure.ai.metricsadvisor.models.AzureApplicationInsightsParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureApplicationInsightsParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureApplicationInsightsDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'AzureApplicationInsights' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class AzureApplicationInsightsParameter(msrest.serialization.Model):
"""AzureApplicationInsightsParameter.
All required parameters must be populated in order to send to Azure.
:param azure_cloud: The Azure cloud that this Azure Application Insights in.
:type azure_cloud: str
:param application_id: The application id of this Azure Application Insights.
:type application_id: str
:param api_key: The API Key that can access this Azure Application Insights.
:type api_key: str
:param query: Required. The statement to query this Azure Application Insights.
:type query: str
"""
_validation = {
'query': {'required': True},
}
_attribute_map = {
'azure_cloud': {'key': 'azureCloud', 'type': 'str'},
'application_id': {'key': 'applicationId', 'type': 'str'},
'api_key': {'key': 'apiKey', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureApplicationInsightsParameter, self).__init__(**kwargs)
self.azure_cloud = kwargs.get('azure_cloud', None)
self.application_id = kwargs.get('application_id', None)
self.api_key = kwargs.get('api_key', None)
self.query = kwargs['query']
class AzureApplicationInsightsParameterPatch(msrest.serialization.Model):
"""AzureApplicationInsightsParameterPatch.
:param azure_cloud: The Azure cloud that this Azure Application Insights in.
:type azure_cloud: str
:param application_id: The application id of this Azure Application Insights.
:type application_id: str
:param api_key: The API Key that can access this Azure Application Insights.
:type api_key: str
:param query: The statement to query this Azure Application Insights.
:type query: str
"""
_attribute_map = {
'azure_cloud': {'key': 'azureCloud', 'type': 'str'},
'application_id': {'key': 'applicationId', 'type': 'str'},
'api_key': {'key': 'apiKey', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureApplicationInsightsParameterPatch, self).__init__(**kwargs)
self.azure_cloud = kwargs.get('azure_cloud', None)
self.application_id = kwargs.get('application_id', None)
self.api_key = kwargs.get('api_key', None)
self.query = kwargs.get('query', None)
class AzureBlobDataFeed(DataFeedDetail):
"""AzureBlobDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureBlobParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureBlobParameter'},
}
def __init__(
self,
**kwargs
):
super(AzureBlobDataFeed, self).__init__(**kwargs)
self.data_source_type = 'AzureBlob' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class AzureBlobDataFeedPatch(DataFeedDetailPatch):
"""AzureBlobDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureBlobParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureBlobParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureBlobDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'AzureBlob' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class AzureBlobParameter(msrest.serialization.Model):
"""AzureBlobParameter.
All required parameters must be populated in order to send to Azure.
:param connection_string: The connection string of this Azure Blob.
:type connection_string: str
:param container: Required. The container name in this Azure Blob.
:type container: str
:param blob_template: Required. The path template in this container.
:type blob_template: str
"""
_validation = {
'container': {'required': True},
'blob_template': {'required': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container': {'key': 'container', 'type': 'str'},
'blob_template': {'key': 'blobTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureBlobParameter, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.container = kwargs['container']
self.blob_template = kwargs['blob_template']
class AzureBlobParameterPatch(msrest.serialization.Model):
"""AzureBlobParameterPatch.
:param connection_string: The connection string of this Azure Blob.
:type connection_string: str
:param container: The container name in this Azure Blob.
:type container: str
:param blob_template: The path template in this container.
:type blob_template: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container': {'key': 'container', 'type': 'str'},
'blob_template': {'key': 'blobTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureBlobParameterPatch, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.container = kwargs.get('container', None)
self.blob_template = kwargs.get('blob_template', None)
class AzureCosmosDBDataFeed(DataFeedDetail):
"""AzureCosmosDBDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureCosmosDBParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureCosmosDBParameter'},
}
def __init__(
self,
**kwargs
):
super(AzureCosmosDBDataFeed, self).__init__(**kwargs)
self.data_source_type = 'AzureCosmosDB' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class AzureCosmosDBDataFeedPatch(DataFeedDetailPatch):
"""AzureCosmosDBDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureCosmosDBParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureCosmosDBParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureCosmosDBDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'AzureCosmosDB' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class AzureCosmosDBParameter(msrest.serialization.Model):
"""AzureCosmosDBParameter.
All required parameters must be populated in order to send to Azure.
:param connection_string: The connection string of this Azure CosmosDB.
:type connection_string: str
:param sql_query: Required. The statement to query this collection.
:type sql_query: str
:param database: Required. A database name in this Azure CosmosDB.
:type database: str
:param collection_id: Required. A collection id in this database.
:type collection_id: str
"""
_validation = {
'sql_query': {'required': True},
'database': {'required': True},
'collection_id': {'required': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'sql_query': {'key': 'sqlQuery', 'type': 'str'},
'database': {'key': 'database', 'type': 'str'},
'collection_id': {'key': 'collectionId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureCosmosDBParameter, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.sql_query = kwargs['sql_query']
self.database = kwargs['database']
self.collection_id = kwargs['collection_id']
class AzureCosmosDBParameterPatch(msrest.serialization.Model):
"""AzureCosmosDBParameterPatch.
:param connection_string: The connection string of this Azure CosmosDB.
:type connection_string: str
:param sql_query: The statement to query this collection.
:type sql_query: str
:param database: A database name in this Azure CosmosDB.
:type database: str
:param collection_id: A collection id in this database.
:type collection_id: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'sql_query': {'key': 'sqlQuery', 'type': 'str'},
'database': {'key': 'database', 'type': 'str'},
'collection_id': {'key': 'collectionId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureCosmosDBParameterPatch, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.sql_query = kwargs.get('sql_query', None)
self.database = kwargs.get('database', None)
self.collection_id = kwargs.get('collection_id', None)
class AzureDataExplorerDataFeed(DataFeedDetail):
"""AzureDataExplorerDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.SqlSourceParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'SqlSourceParameter'},
}
def __init__(
self,
**kwargs
):
super(AzureDataExplorerDataFeed, self).__init__(**kwargs)
self.data_source_type = 'AzureDataExplorer' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class AzureDataExplorerDataFeedPatch(DataFeedDetailPatch):
"""AzureDataExplorerDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.SQLSourceParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'SQLSourceParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureDataExplorerDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'AzureDataExplorer' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class AzureDataLakeStorageGen2DataFeed(DataFeedDetail):
"""AzureDataLakeStorageGen2DataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureDataLakeStorageGen2Parameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureDataLakeStorageGen2Parameter'},
}
def __init__(
self,
**kwargs
):
super(AzureDataLakeStorageGen2DataFeed, self).__init__(**kwargs)
self.data_source_type = 'AzureDataLakeStorageGen2' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class AzureDataLakeStorageGen2DataFeedPatch(DataFeedDetailPatch):
"""AzureDataLakeStorageGen2DataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter:
~azure.ai.metricsadvisor.models.AzureDataLakeStorageGen2ParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureDataLakeStorageGen2ParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureDataLakeStorageGen2DataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'AzureDataLakeStorageGen2' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class AzureDataLakeStorageGen2Parameter(msrest.serialization.Model):
"""AzureDataLakeStorageGen2Parameter.
All required parameters must be populated in order to send to Azure.
:param account_name: The account name of this Azure Data Lake.
:type account_name: str
:param account_key: The account key that can access this Azure Data Lake.
:type account_key: str
:param file_system_name: Required. The file system (container) name in this Azure Data Lake.
:type file_system_name: str
:param directory_template: Required. The directory template under this file system.
:type directory_template: str
:param file_template: Required. The file template.
:type file_template: str
"""
_validation = {
'file_system_name': {'required': True},
'directory_template': {'required': True},
'file_template': {'required': True},
}
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
'account_key': {'key': 'accountKey', 'type': 'str'},
'file_system_name': {'key': 'fileSystemName', 'type': 'str'},
'directory_template': {'key': 'directoryTemplate', 'type': 'str'},
'file_template': {'key': 'fileTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureDataLakeStorageGen2Parameter, self).__init__(**kwargs)
self.account_name = kwargs.get('account_name', None)
self.account_key = kwargs.get('account_key', None)
self.file_system_name = kwargs['file_system_name']
self.directory_template = kwargs['directory_template']
self.file_template = kwargs['file_template']
class AzureDataLakeStorageGen2ParameterPatch(msrest.serialization.Model):
"""AzureDataLakeStorageGen2ParameterPatch.
:param account_name: The account name of this Azure Data Lake.
:type account_name: str
:param account_key: The account key that can access this Azure Data Lake.
:type account_key: str
:param file_system_name: The file system (container) name in this Azure Data Lake.
:type file_system_name: str
:param directory_template: The directory template under this file system.
:type directory_template: str
:param file_template: The file template.
:type file_template: str
"""
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
'account_key': {'key': 'accountKey', 'type': 'str'},
'file_system_name': {'key': 'fileSystemName', 'type': 'str'},
'directory_template': {'key': 'directoryTemplate', 'type': 'str'},
'file_template': {'key': 'fileTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureDataLakeStorageGen2ParameterPatch, self).__init__(**kwargs)
self.account_name = kwargs.get('account_name', None)
self.account_key = kwargs.get('account_key', None)
self.file_system_name = kwargs.get('file_system_name', None)
self.directory_template = kwargs.get('directory_template', None)
self.file_template = kwargs.get('file_template', None)
class AzureEventHubsDataFeed(DataFeedDetail):
"""AzureEventHubsDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureEventHubsParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureEventHubsParameter'},
}
def __init__(
self,
**kwargs
):
super(AzureEventHubsDataFeed, self).__init__(**kwargs)
self.data_source_type = 'AzureEventHubs' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class AzureEventHubsDataFeedPatch(DataFeedDetailPatch):
"""AzureEventHubsDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureEventHubsParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureEventHubsParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureEventHubsDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'AzureEventHubs' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class AzureEventHubsParameter(msrest.serialization.Model):
"""AzureEventHubsParameter.
All required parameters must be populated in order to send to Azure.
:param connection_string: The connection string of this Azure Event Hubs.
:type connection_string: str
:param consumer_group: Required. The consumer group to be used in this data feed.
:type consumer_group: str
"""
_validation = {
'consumer_group': {'required': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'consumer_group': {'key': 'consumerGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureEventHubsParameter, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.consumer_group = kwargs['consumer_group']
class AzureEventHubsParameterPatch(msrest.serialization.Model):
"""AzureEventHubsParameterPatch.
:param connection_string: The connection string of this Azure Event Hubs.
:type connection_string: str
:param consumer_group: The consumer group to be used in this data feed.
:type consumer_group: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'consumer_group': {'key': 'consumerGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureEventHubsParameterPatch, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.consumer_group = kwargs.get('consumer_group', None)
class AzureLogAnalyticsDataFeed(DataFeedDetail):
"""AzureLogAnalyticsDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureLogAnalyticsParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureLogAnalyticsParameter'},
}
def __init__(
self,
**kwargs
):
super(AzureLogAnalyticsDataFeed, self).__init__(**kwargs)
self.data_source_type = 'AzureLogAnalytics' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class AzureLogAnalyticsDataFeedPatch(DataFeedDetailPatch):
"""AzureLogAnalyticsDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureLogAnalyticsParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureLogAnalyticsParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureLogAnalyticsDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'AzureLogAnalytics' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class AzureLogAnalyticsParameter(msrest.serialization.Model):
"""AzureLogAnalyticsParameter.
All required parameters must be populated in order to send to Azure.
:param tenant_id: The tenant id of service principal that have access to this Log Analytics.
:type tenant_id: str
:param client_id: The client id of service principal that have access to this Log Analytics.
:type client_id: str
:param client_secret: The client secret of service principal that have access to this Log
Analytics.
:type client_secret: str
:param workspace_id: Required. The workspace id of this Log Analytics.
:type workspace_id: str
:param query: Required. The KQL (Kusto Query Language) query to fetch data from this Log
Analytics.
:type query: str
"""
_validation = {
'workspace_id': {'required': True},
'query': {'required': True},
}
_attribute_map = {
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'client_secret': {'key': 'clientSecret', 'type': 'str'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureLogAnalyticsParameter, self).__init__(**kwargs)
self.tenant_id = kwargs.get('tenant_id', None)
self.client_id = kwargs.get('client_id', None)
self.client_secret = kwargs.get('client_secret', None)
self.workspace_id = kwargs['workspace_id']
self.query = kwargs['query']
class AzureLogAnalyticsParameterPatch(msrest.serialization.Model):
"""AzureLogAnalyticsParameterPatch.
:param tenant_id: The tenant id of service principal that have access to this Log Analytics.
:type tenant_id: str
:param client_id: The client id of service principal that have access to this Log Analytics.
:type client_id: str
:param client_secret: The client secret of service principal that have access to this Log
Analytics.
:type client_secret: str
:param workspace_id: The workspace id of this Log Analytics.
:type workspace_id: str
:param query: The KQL (Kusto Query Language) query to fetch data from this Log Analytics.
:type query: str
"""
_attribute_map = {
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'client_secret': {'key': 'clientSecret', 'type': 'str'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureLogAnalyticsParameterPatch, self).__init__(**kwargs)
self.tenant_id = kwargs.get('tenant_id', None)
self.client_id = kwargs.get('client_id', None)
self.client_secret = kwargs.get('client_secret', None)
self.workspace_id = kwargs.get('workspace_id', None)
self.query = kwargs.get('query', None)
class DataSourceCredential(msrest.serialization.Model):
"""DataSourceCredential.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureSQLConnectionStringCredential, DataLakeGen2SharedKeyCredential, ServicePrincipalCredential, ServicePrincipalInKVCredential.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:ivar data_source_credential_id: Unique id of data source credential.
:vartype data_source_credential_id: str
:param data_source_credential_name: Required. Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
"""
_validation = {
'data_source_credential_type': {'required': True},
'data_source_credential_id': {'readonly': True},
'data_source_credential_name': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_id': {'key': 'dataSourceCredentialId', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
}
_subtype_map = {
'data_source_credential_type': {'AzureSQLConnectionString': 'AzureSQLConnectionStringCredential', 'DataLakeGen2SharedKey': 'DataLakeGen2SharedKeyCredential', 'ServicePrincipal': 'ServicePrincipalCredential', 'ServicePrincipalInKV': 'ServicePrincipalInKVCredential'}
}
def __init__(
self,
**kwargs
):
super(DataSourceCredential, self).__init__(**kwargs)
self.data_source_credential_type = None # type: Optional[str]
self.data_source_credential_id = None
self.data_source_credential_name = kwargs['data_source_credential_name']
self.data_source_credential_description = kwargs.get('data_source_credential_description', None)
class AzureSQLConnectionStringCredential(DataSourceCredential):
"""AzureSQLConnectionStringCredential.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:ivar data_source_credential_id: Unique id of data source credential.
:vartype data_source_credential_id: str
:param data_source_credential_name: Required. Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
:param parameters: Required.
:type parameters: ~azure.ai.metricsadvisor.models.AzureSQLConnectionStringParam
"""
_validation = {
'data_source_credential_type': {'required': True},
'data_source_credential_id': {'readonly': True},
'data_source_credential_name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_id': {'key': 'dataSourceCredentialId', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'AzureSQLConnectionStringParam'},
}
def __init__(
self,
**kwargs
):
super(AzureSQLConnectionStringCredential, self).__init__(**kwargs)
self.data_source_credential_type = 'AzureSQLConnectionString' # type: str
self.parameters = kwargs['parameters']
class DataSourceCredentialPatch(msrest.serialization.Model):
"""DataSourceCredentialPatch.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureSQLConnectionStringCredentialPatch, DataLakeGen2SharedKeyCredentialPatch, ServicePrincipalCredentialPatch, ServicePrincipalInKVCredentialPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:param data_source_credential_name: Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
"""
_validation = {
'data_source_credential_type': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
}
_subtype_map = {
'data_source_credential_type': {'AzureSQLConnectionString': 'AzureSQLConnectionStringCredentialPatch', 'DataLakeGen2SharedKey': 'DataLakeGen2SharedKeyCredentialPatch', 'ServicePrincipal': 'ServicePrincipalCredentialPatch', 'ServicePrincipalInKV': 'ServicePrincipalInKVCredentialPatch'}
}
def __init__(
self,
**kwargs
):
super(DataSourceCredentialPatch, self).__init__(**kwargs)
self.data_source_credential_type = None # type: Optional[str]
self.data_source_credential_name = kwargs.get('data_source_credential_name', None)
self.data_source_credential_description = kwargs.get('data_source_credential_description', None)
class AzureSQLConnectionStringCredentialPatch(DataSourceCredentialPatch):
"""AzureSQLConnectionStringCredentialPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:param data_source_credential_name: Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
:param parameters:
:type parameters: ~azure.ai.metricsadvisor.models.AzureSQLConnectionStringParamPatch
"""
_validation = {
'data_source_credential_type': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'AzureSQLConnectionStringParamPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureSQLConnectionStringCredentialPatch, self).__init__(**kwargs)
self.data_source_credential_type = 'AzureSQLConnectionString' # type: str
self.parameters = kwargs.get('parameters', None)
class AzureSQLConnectionStringParam(msrest.serialization.Model):
"""AzureSQLConnectionStringParam.
:param connection_string: The connection string to access the Azure SQL.
:type connection_string: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureSQLConnectionStringParam, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
class AzureSQLConnectionStringParamPatch(msrest.serialization.Model):
"""AzureSQLConnectionStringParamPatch.
:param connection_string: The connection string to access the Azure SQL.
:type connection_string: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureSQLConnectionStringParamPatch, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
class AzureTableDataFeed(DataFeedDetail):
"""AzureTableDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureTableParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureTableParameter'},
}
def __init__(
self,
**kwargs
):
super(AzureTableDataFeed, self).__init__(**kwargs)
self.data_source_type = 'AzureTable' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class AzureTableDataFeedPatch(DataFeedDetailPatch):
"""AzureTableDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.AzureTableParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'AzureTableParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(AzureTableDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'AzureTable' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class AzureTableParameter(msrest.serialization.Model):
"""AzureTableParameter.
All required parameters must be populated in order to send to Azure.
:param connection_string: The connection string of this Azure Table.
:type connection_string: str
:param table: Required. A table name in this Azure Table.
:type table: str
:param query: Required. The statement to query this table. Please find syntax and details from
Azure Table documents.
:type query: str
"""
_validation = {
'table': {'required': True},
'query': {'required': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'table': {'key': 'table', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureTableParameter, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.table = kwargs['table']
self.query = kwargs['query']
class AzureTableParameterPatch(msrest.serialization.Model):
"""AzureTableParameterPatch.
:param connection_string: The connection string of this Azure Table.
:type connection_string: str
:param table: A table name in this Azure Table.
:type table: str
:param query: The statement to query this table. Please find syntax and details from Azure
Table documents.
:type query: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'table': {'key': 'table', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureTableParameterPatch, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.table = kwargs.get('table', None)
self.query = kwargs.get('query', None)
class ChangePointFeedback(MetricFeedback):
"""ChangePointFeedback.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param feedback_type: Required. feedback type.Constant filled by server. Possible values
include: "Anomaly", "ChangePoint", "Period", "Comment".
:type feedback_type: str or ~azure.ai.metricsadvisor.models.FeedbackType
:ivar feedback_id: feedback unique id.
:vartype feedback_id: str
:ivar created_time: feedback created time.
:vartype created_time: ~datetime.datetime
:ivar user_principal: user who gives this feedback.
:vartype user_principal: str
:param metric_id: Required. metric unique id.
:type metric_id: str
:param dimension_filter: Required.
:type dimension_filter: ~azure.ai.metricsadvisor.models.FeedbackDimensionFilter
:param start_time: Required. the start timestamp of feedback time range.
:type start_time: ~datetime.datetime
:param end_time: Required. the end timestamp of feedback time range, when equals to startTime
means only one timestamp.
:type end_time: ~datetime.datetime
:param value: Required.
:type value: ~azure.ai.metricsadvisor.models.ChangePointFeedbackValue
"""
_validation = {
'feedback_type': {'required': True},
'feedback_id': {'readonly': True},
'created_time': {'readonly': True},
'user_principal': {'readonly': True},
'metric_id': {'required': True},
'dimension_filter': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'feedback_type': {'key': 'feedbackType', 'type': 'str'},
'feedback_id': {'key': 'feedbackId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'user_principal': {'key': 'userPrincipal', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'dimension_filter': {'key': 'dimensionFilter', 'type': 'FeedbackDimensionFilter'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'ChangePointFeedbackValue'},
}
def __init__(
self,
**kwargs
):
super(ChangePointFeedback, self).__init__(**kwargs)
self.feedback_type = 'ChangePoint' # type: str
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.value = kwargs['value']
class ChangePointFeedbackValue(msrest.serialization.Model):
"""ChangePointFeedbackValue.
All required parameters must be populated in order to send to Azure.
:param change_point_value: Required. Possible values include: "AutoDetect", "ChangePoint",
"NotChangePoint".
:type change_point_value: str or ~azure.ai.metricsadvisor.models.ChangePointValue
"""
_validation = {
'change_point_value': {'required': True},
}
_attribute_map = {
'change_point_value': {'key': 'changePointValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ChangePointFeedbackValue, self).__init__(**kwargs)
self.change_point_value = kwargs['change_point_value']
class ChangeThresholdCondition(msrest.serialization.Model):
"""ChangeThresholdCondition.
All required parameters must be populated in order to send to Azure.
:param change_percentage: Required. change percentage, value range : [0, +∞).
:type change_percentage: float
:param shift_point: Required. shift point, value range : [1, +∞).
:type shift_point: int
:param within_range: Required. if the withinRange = true, detected data is abnormal when the
value falls in the range, in this case anomalyDetectorDirection must be Both
if the withinRange = false, detected data is abnormal when the value falls out of the range.
:type within_range: bool
:param anomaly_detector_direction: Required. detection direction. Possible values include:
"Both", "Down", "Up".
:type anomaly_detector_direction: str or
~azure.ai.metricsadvisor.models.AnomalyDetectorDirection
:param suppress_condition: Required.
:type suppress_condition: ~azure.ai.metricsadvisor.models.SuppressCondition
"""
_validation = {
'change_percentage': {'required': True},
'shift_point': {'required': True},
'within_range': {'required': True},
'anomaly_detector_direction': {'required': True},
'suppress_condition': {'required': True},
}
_attribute_map = {
'change_percentage': {'key': 'changePercentage', 'type': 'float'},
'shift_point': {'key': 'shiftPoint', 'type': 'int'},
'within_range': {'key': 'withinRange', 'type': 'bool'},
'anomaly_detector_direction': {'key': 'anomalyDetectorDirection', 'type': 'str'},
'suppress_condition': {'key': 'suppressCondition', 'type': 'SuppressCondition'},
}
def __init__(
self,
**kwargs
):
super(ChangeThresholdCondition, self).__init__(**kwargs)
self.change_percentage = kwargs['change_percentage']
self.shift_point = kwargs['shift_point']
self.within_range = kwargs['within_range']
self.anomaly_detector_direction = kwargs['anomaly_detector_direction']
self.suppress_condition = kwargs['suppress_condition']
class ChangeThresholdConditionPatch(msrest.serialization.Model):
"""ChangeThresholdConditionPatch.
:param change_percentage: change percentage, value range : [0, +∞).
:type change_percentage: float
:param shift_point: shift point, value range : [1, +∞).
:type shift_point: int
:param within_range: if the withinRange = true, detected data is abnormal when the value falls
in the range, in this case anomalyDetectorDirection must be Both
if the withinRange = false, detected data is abnormal when the value falls out of the range.
:type within_range: bool
:param anomaly_detector_direction: detection direction. Possible values include: "Both",
"Down", "Up".
:type anomaly_detector_direction: str or
~azure.ai.metricsadvisor.models.AnomalyDetectorDirection
:param suppress_condition:
:type suppress_condition: ~azure.ai.metricsadvisor.models.SuppressConditionPatch
"""
_attribute_map = {
'change_percentage': {'key': 'changePercentage', 'type': 'float'},
'shift_point': {'key': 'shiftPoint', 'type': 'int'},
'within_range': {'key': 'withinRange', 'type': 'bool'},
'anomaly_detector_direction': {'key': 'anomalyDetectorDirection', 'type': 'str'},
'suppress_condition': {'key': 'suppressCondition', 'type': 'SuppressConditionPatch'},
}
def __init__(
self,
**kwargs
):
super(ChangeThresholdConditionPatch, self).__init__(**kwargs)
self.change_percentage = kwargs.get('change_percentage', None)
self.shift_point = kwargs.get('shift_point', None)
self.within_range = kwargs.get('within_range', None)
self.anomaly_detector_direction = kwargs.get('anomaly_detector_direction', None)
self.suppress_condition = kwargs.get('suppress_condition', None)
class CommentFeedback(MetricFeedback):
"""CommentFeedback.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param feedback_type: Required. feedback type.Constant filled by server. Possible values
include: "Anomaly", "ChangePoint", "Period", "Comment".
:type feedback_type: str or ~azure.ai.metricsadvisor.models.FeedbackType
:ivar feedback_id: feedback unique id.
:vartype feedback_id: str
:ivar created_time: feedback created time.
:vartype created_time: ~datetime.datetime
:ivar user_principal: user who gives this feedback.
:vartype user_principal: str
:param metric_id: Required. metric unique id.
:type metric_id: str
:param dimension_filter: Required.
:type dimension_filter: ~azure.ai.metricsadvisor.models.FeedbackDimensionFilter
:param start_time: the start timestamp of feedback time range.
:type start_time: ~datetime.datetime
:param end_time: the end timestamp of feedback time range, when equals to startTime means only
one timestamp.
:type end_time: ~datetime.datetime
:param value: Required.
:type value: ~azure.ai.metricsadvisor.models.CommentFeedbackValue
"""
_validation = {
'feedback_type': {'required': True},
'feedback_id': {'readonly': True},
'created_time': {'readonly': True},
'user_principal': {'readonly': True},
'metric_id': {'required': True},
'dimension_filter': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'feedback_type': {'key': 'feedbackType', 'type': 'str'},
'feedback_id': {'key': 'feedbackId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'user_principal': {'key': 'userPrincipal', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'dimension_filter': {'key': 'dimensionFilter', 'type': 'FeedbackDimensionFilter'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'CommentFeedbackValue'},
}
def __init__(
self,
**kwargs
):
super(CommentFeedback, self).__init__(**kwargs)
self.feedback_type = 'Comment' # type: str
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.value = kwargs['value']
class CommentFeedbackValue(msrest.serialization.Model):
"""CommentFeedbackValue.
All required parameters must be populated in order to send to Azure.
:param comment_value: Required. the comment string.
:type comment_value: str
"""
_validation = {
'comment_value': {'required': True},
}
_attribute_map = {
'comment_value': {'key': 'commentValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CommentFeedbackValue, self).__init__(**kwargs)
self.comment_value = kwargs['comment_value']
class DataFeedIngestionProgress(msrest.serialization.Model):
"""DataFeedIngestionProgress.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar latest_success_timestamp: the timestamp of latest success ingestion job.
null indicates not available.
:vartype latest_success_timestamp: ~datetime.datetime
:ivar latest_active_timestamp: the timestamp of latest ingestion job with status update.
null indicates not available.
:vartype latest_active_timestamp: ~datetime.datetime
"""
_validation = {
'latest_success_timestamp': {'readonly': True},
'latest_active_timestamp': {'readonly': True},
}
_attribute_map = {
'latest_success_timestamp': {'key': 'latestSuccessTimestamp', 'type': 'iso-8601'},
'latest_active_timestamp': {'key': 'latestActiveTimestamp', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(DataFeedIngestionProgress, self).__init__(**kwargs)
self.latest_success_timestamp = None
self.latest_active_timestamp = None
class DataFeedList(msrest.serialization.Model):
"""DataFeedList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link:
:vartype next_link: str
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.DataFeedDetail]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[DataFeedDetail]'},
}
def __init__(
self,
**kwargs
):
super(DataFeedList, self).__init__(**kwargs)
self.next_link = None
self.value = None
class DataLakeGen2SharedKeyCredential(DataSourceCredential):
"""DataLakeGen2SharedKeyCredential.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:ivar data_source_credential_id: Unique id of data source credential.
:vartype data_source_credential_id: str
:param data_source_credential_name: Required. Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
:param parameters: Required.
:type parameters: ~azure.ai.metricsadvisor.models.DataLakeGen2SharedKeyParam
"""
_validation = {
'data_source_credential_type': {'required': True},
'data_source_credential_id': {'readonly': True},
'data_source_credential_name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_id': {'key': 'dataSourceCredentialId', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'DataLakeGen2SharedKeyParam'},
}
def __init__(
self,
**kwargs
):
super(DataLakeGen2SharedKeyCredential, self).__init__(**kwargs)
self.data_source_credential_type = 'DataLakeGen2SharedKey' # type: str
self.parameters = kwargs['parameters']
class DataLakeGen2SharedKeyCredentialPatch(DataSourceCredentialPatch):
"""DataLakeGen2SharedKeyCredentialPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:param data_source_credential_name: Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
:param parameters:
:type parameters: ~azure.ai.metricsadvisor.models.DataLakeGen2SharedKeyParamPatch
"""
_validation = {
'data_source_credential_type': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'DataLakeGen2SharedKeyParamPatch'},
}
def __init__(
self,
**kwargs
):
super(DataLakeGen2SharedKeyCredentialPatch, self).__init__(**kwargs)
self.data_source_credential_type = 'DataLakeGen2SharedKey' # type: str
self.parameters = kwargs.get('parameters', None)
class DataLakeGen2SharedKeyParam(msrest.serialization.Model):
"""DataLakeGen2SharedKeyParam.
:param account_key: The account key to access the Azure Data Lake Storage Gen2.
:type account_key: str
"""
_attribute_map = {
'account_key': {'key': 'accountKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataLakeGen2SharedKeyParam, self).__init__(**kwargs)
self.account_key = kwargs.get('account_key', None)
class DataLakeGen2SharedKeyParamPatch(msrest.serialization.Model):
"""DataLakeGen2SharedKeyParamPatch.
:param account_key: The account key to access the Azure Data Lake Storage Gen2.
:type account_key: str
"""
_attribute_map = {
'account_key': {'key': 'accountKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataLakeGen2SharedKeyParamPatch, self).__init__(**kwargs)
self.account_key = kwargs.get('account_key', None)
class DataSourceCredentialList(msrest.serialization.Model):
"""DataSourceCredentialList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link:
:vartype next_link: str
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.DataSourceCredential]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'readonly': True, 'unique': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[DataSourceCredential]'},
}
def __init__(
self,
**kwargs
):
super(DataSourceCredentialList, self).__init__(**kwargs)
self.next_link = None
self.value = None
class DetectionAnomalyFilterCondition(msrest.serialization.Model):
"""DetectionAnomalyFilterCondition.
:param dimension_filter: dimension filter.
:type dimension_filter: list[~azure.ai.metricsadvisor.models.DimensionGroupIdentity]
:param severity_filter:
:type severity_filter: ~azure.ai.metricsadvisor.models.SeverityFilterCondition
"""
_validation = {
'dimension_filter': {'unique': True},
}
_attribute_map = {
'dimension_filter': {'key': 'dimensionFilter', 'type': '[DimensionGroupIdentity]'},
'severity_filter': {'key': 'severityFilter', 'type': 'SeverityFilterCondition'},
}
def __init__(
self,
**kwargs
):
super(DetectionAnomalyFilterCondition, self).__init__(**kwargs)
self.dimension_filter = kwargs.get('dimension_filter', None)
self.severity_filter = kwargs.get('severity_filter', None)
class DetectionAnomalyResultQuery(msrest.serialization.Model):
"""DetectionAnomalyResultQuery.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. start time.
:type start_time: ~datetime.datetime
:param end_time: Required. end time.
:type end_time: ~datetime.datetime
:param filter:
:type filter: ~azure.ai.metricsadvisor.models.DetectionAnomalyFilterCondition
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'filter': {'key': 'filter', 'type': 'DetectionAnomalyFilterCondition'},
}
def __init__(
self,
**kwargs
):
super(DetectionAnomalyResultQuery, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.filter = kwargs.get('filter', None)
class DetectionIncidentFilterCondition(msrest.serialization.Model):
"""DetectionIncidentFilterCondition.
:param dimension_filter: dimension filter.
:type dimension_filter: list[~azure.ai.metricsadvisor.models.DimensionGroupIdentity]
"""
_validation = {
'dimension_filter': {'unique': True},
}
_attribute_map = {
'dimension_filter': {'key': 'dimensionFilter', 'type': '[DimensionGroupIdentity]'},
}
def __init__(
self,
**kwargs
):
super(DetectionIncidentFilterCondition, self).__init__(**kwargs)
self.dimension_filter = kwargs.get('dimension_filter', None)
class DetectionIncidentResultQuery(msrest.serialization.Model):
"""DetectionIncidentResultQuery.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. start time.
:type start_time: ~datetime.datetime
:param end_time: Required. end time.
:type end_time: ~datetime.datetime
:param filter:
:type filter: ~azure.ai.metricsadvisor.models.DetectionIncidentFilterCondition
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'filter': {'key': 'filter', 'type': 'DetectionIncidentFilterCondition'},
}
def __init__(
self,
**kwargs
):
super(DetectionIncidentResultQuery, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.filter = kwargs.get('filter', None)
class DetectionSeriesQuery(msrest.serialization.Model):
"""DetectionSeriesQuery.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. This is inclusive. The maximum number of data points (series
number * time range) is 10000.
:type start_time: ~datetime.datetime
:param end_time: Required. This is exclusive. The maximum number of data points (series number
* time range) is 10000.
:type end_time: ~datetime.datetime
:param series: Required. The series to be queried. The identity must be able to define one
single time series instead of a group of time series. The maximum number of series is 100.
:type series: list[~azure.ai.metricsadvisor.models.SeriesIdentity]
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
'series': {'required': True, 'unique': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'series': {'key': 'series', 'type': '[SeriesIdentity]'},
}
def __init__(
self,
**kwargs
):
super(DetectionSeriesQuery, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.series = kwargs['series']
class Dimension(msrest.serialization.Model):
"""Dimension.
All required parameters must be populated in order to send to Azure.
:param dimension_name: Required. dimension name.
:type dimension_name: str
:param dimension_display_name: dimension display name.
:type dimension_display_name: str
"""
_validation = {
'dimension_name': {'required': True},
'dimension_display_name': {'pattern': r'[.a-zA-Z0-9_-]+'},
}
_attribute_map = {
'dimension_name': {'key': 'dimensionName', 'type': 'str'},
'dimension_display_name': {'key': 'dimensionDisplayName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.dimension_name = kwargs['dimension_name']
self.dimension_display_name = kwargs.get('dimension_display_name', None)
class DimensionGroupConfiguration(msrest.serialization.Model):
"""DimensionGroupConfiguration.
All required parameters must be populated in order to send to Azure.
:param group: Required.
:type group: ~azure.ai.metricsadvisor.models.DimensionGroupIdentity
:param condition_operator: condition operator
should be specified when combining multiple detection conditions. Possible values include:
"AND", "OR".
:type condition_operator: str or
~azure.ai.metricsadvisor.models.AnomalyDetectionConfigurationLogicType
:param smart_detection_condition:
:type smart_detection_condition: ~azure.ai.metricsadvisor.models.SmartDetectionCondition
:param hard_threshold_condition:
:type hard_threshold_condition: ~azure.ai.metricsadvisor.models.HardThresholdCondition
:param change_threshold_condition:
:type change_threshold_condition: ~azure.ai.metricsadvisor.models.ChangeThresholdCondition
"""
_validation = {
'group': {'required': True},
}
_attribute_map = {
'group': {'key': 'group', 'type': 'DimensionGroupIdentity'},
'condition_operator': {'key': 'conditionOperator', 'type': 'str'},
'smart_detection_condition': {'key': 'smartDetectionCondition', 'type': 'SmartDetectionCondition'},
'hard_threshold_condition': {'key': 'hardThresholdCondition', 'type': 'HardThresholdCondition'},
'change_threshold_condition': {'key': 'changeThresholdCondition', 'type': 'ChangeThresholdCondition'},
}
def __init__(
self,
**kwargs
):
super(DimensionGroupConfiguration, self).__init__(**kwargs)
self.group = kwargs['group']
self.condition_operator = kwargs.get('condition_operator', None)
self.smart_detection_condition = kwargs.get('smart_detection_condition', None)
self.hard_threshold_condition = kwargs.get('hard_threshold_condition', None)
self.change_threshold_condition = kwargs.get('change_threshold_condition', None)
class DimensionGroupIdentity(msrest.serialization.Model):
"""DimensionGroupIdentity.
All required parameters must be populated in order to send to Azure.
:param dimension: Required. dimension specified for series group.
:type dimension: dict[str, str]
"""
_validation = {
'dimension': {'required': True},
}
_attribute_map = {
'dimension': {'key': 'dimension', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(DimensionGroupIdentity, self).__init__(**kwargs)
self.dimension = kwargs['dimension']
class HookInfo(msrest.serialization.Model):
"""HookInfo.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EmailHookInfo, WebhookHookInfo.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param hook_type: Required. hook type.Constant filled by server. Possible values include:
"Webhook", "Email".
:type hook_type: str or ~azure.ai.metricsadvisor.models.HookType
:ivar hook_id: Hook unique id.
:vartype hook_id: str
:param hook_name: Required. hook unique name.
:type hook_name: str
:param description: hook description.
:type description: str
:param external_link: hook external link.
:type external_link: str
:param admins: hook administrators.
:type admins: list[str]
"""
_validation = {
'hook_type': {'required': True},
'hook_id': {'readonly': True},
'hook_name': {'required': True},
'admins': {'unique': True},
}
_attribute_map = {
'hook_type': {'key': 'hookType', 'type': 'str'},
'hook_id': {'key': 'hookId', 'type': 'str'},
'hook_name': {'key': 'hookName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'external_link': {'key': 'externalLink', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
}
_subtype_map = {
'hook_type': {'Email': 'EmailHookInfo', 'Webhook': 'WebhookHookInfo'}
}
def __init__(
self,
**kwargs
):
super(HookInfo, self).__init__(**kwargs)
self.hook_type = None # type: Optional[str]
self.hook_id = None
self.hook_name = kwargs['hook_name']
self.description = kwargs.get('description', "")
self.external_link = kwargs.get('external_link', "")
self.admins = kwargs.get('admins', None)
class EmailHookInfo(HookInfo):
"""EmailHookInfo.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param hook_type: Required. hook type.Constant filled by server. Possible values include:
"Webhook", "Email".
:type hook_type: str or ~azure.ai.metricsadvisor.models.HookType
:ivar hook_id: Hook unique id.
:vartype hook_id: str
:param hook_name: Required. hook unique name.
:type hook_name: str
:param description: hook description.
:type description: str
:param external_link: hook external link.
:type external_link: str
:param admins: hook administrators.
:type admins: list[str]
:param hook_parameter: Required.
:type hook_parameter: ~azure.ai.metricsadvisor.models.EmailHookParameter
"""
_validation = {
'hook_type': {'required': True},
'hook_id': {'readonly': True},
'hook_name': {'required': True},
'admins': {'unique': True},
'hook_parameter': {'required': True},
}
_attribute_map = {
'hook_type': {'key': 'hookType', 'type': 'str'},
'hook_id': {'key': 'hookId', 'type': 'str'},
'hook_name': {'key': 'hookName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'external_link': {'key': 'externalLink', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'hook_parameter': {'key': 'hookParameter', 'type': 'EmailHookParameter'},
}
def __init__(
self,
**kwargs
):
super(EmailHookInfo, self).__init__(**kwargs)
self.hook_type = 'Email' # type: str
self.hook_parameter = kwargs['hook_parameter']
class HookInfoPatch(msrest.serialization.Model):
"""HookInfoPatch.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EmailHookInfoPatch, WebhookHookInfoPatch.
All required parameters must be populated in order to send to Azure.
:param hook_type: Required. hook type.Constant filled by server. Possible values include:
"Webhook", "Email".
:type hook_type: str or ~azure.ai.metricsadvisor.models.HookType
:param hook_name: hook unique name.
:type hook_name: str
:param description: hook description.
:type description: str
:param external_link: hook external link.
:type external_link: str
:param admins: hook administrators.
:type admins: list[str]
"""
_validation = {
'hook_type': {'required': True},
'admins': {'unique': True},
}
_attribute_map = {
'hook_type': {'key': 'hookType', 'type': 'str'},
'hook_name': {'key': 'hookName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'external_link': {'key': 'externalLink', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
}
_subtype_map = {
'hook_type': {'Email': 'EmailHookInfoPatch', 'Webhook': 'WebhookHookInfoPatch'}
}
def __init__(
self,
**kwargs
):
super(HookInfoPatch, self).__init__(**kwargs)
self.hook_type = None # type: Optional[str]
self.hook_name = kwargs.get('hook_name', None)
self.description = kwargs.get('description', None)
self.external_link = kwargs.get('external_link', None)
self.admins = kwargs.get('admins', None)
class EmailHookInfoPatch(HookInfoPatch):
"""EmailHookInfoPatch.
All required parameters must be populated in order to send to Azure.
:param hook_type: Required. hook type.Constant filled by server. Possible values include:
"Webhook", "Email".
:type hook_type: str or ~azure.ai.metricsadvisor.models.HookType
:param hook_name: hook unique name.
:type hook_name: str
:param description: hook description.
:type description: str
:param external_link: hook external link.
:type external_link: str
:param admins: hook administrators.
:type admins: list[str]
:param hook_parameter:
:type hook_parameter: ~azure.ai.metricsadvisor.models.EmailHookParameterPatch
"""
_validation = {
'hook_type': {'required': True},
'admins': {'unique': True},
}
_attribute_map = {
'hook_type': {'key': 'hookType', 'type': 'str'},
'hook_name': {'key': 'hookName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'external_link': {'key': 'externalLink', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'hook_parameter': {'key': 'hookParameter', 'type': 'EmailHookParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(EmailHookInfoPatch, self).__init__(**kwargs)
self.hook_type = 'Email' # type: str
self.hook_parameter = kwargs.get('hook_parameter', None)
class EmailHookParameter(msrest.serialization.Model):
"""EmailHookParameter.
All required parameters must be populated in order to send to Azure.
:param to_list: Required. Email TO: list.
:type to_list: list[str]
"""
_validation = {
'to_list': {'required': True, 'unique': True},
}
_attribute_map = {
'to_list': {'key': 'toList', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(EmailHookParameter, self).__init__(**kwargs)
self.to_list = kwargs['to_list']
class EmailHookParameterPatch(msrest.serialization.Model):
"""EmailHookParameterPatch.
:param to_list: Email TO: list.
:type to_list: list[str]
"""
_validation = {
'to_list': {'unique': True},
}
_attribute_map = {
'to_list': {'key': 'toList', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(EmailHookParameterPatch, self).__init__(**kwargs)
self.to_list = kwargs.get('to_list', None)
class EnrichmentStatus(msrest.serialization.Model):
"""EnrichmentStatus.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar timestamp: data slice timestamp.
:vartype timestamp: ~datetime.datetime
:ivar status: latest enrichment status for this data slice.
:vartype status: str
:ivar message: the trimmed message describes details of the enrichment status.
:vartype message: str
"""
_validation = {
'timestamp': {'readonly': True},
'status': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EnrichmentStatus, self).__init__(**kwargs)
self.timestamp = None
self.status = None
self.message = None
class EnrichmentStatusList(msrest.serialization.Model):
"""EnrichmentStatusList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link:
:vartype next_link: str
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.EnrichmentStatus]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[EnrichmentStatus]'},
}
def __init__(
self,
**kwargs
):
super(EnrichmentStatusList, self).__init__(**kwargs)
self.next_link = None
self.value = None
class EnrichmentStatusQueryOption(msrest.serialization.Model):
"""EnrichmentStatusQueryOption.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. the start point of time range to query anomaly detection status.
:type start_time: ~datetime.datetime
:param end_time: Required. the end point of time range to query anomaly detection status.
:type end_time: ~datetime.datetime
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(EnrichmentStatusQueryOption, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
class ErrorCode(msrest.serialization.Model):
"""ErrorCode.
:param message:
:type message: str
:param code:
:type code: str
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorCode, self).__init__(**kwargs)
self.message = kwargs.get('message', None)
self.code = kwargs.get('code', None)
class FeedbackDimensionFilter(msrest.serialization.Model):
"""FeedbackDimensionFilter.
All required parameters must be populated in order to send to Azure.
:param dimension: Required. metric dimension filter.
:type dimension: dict[str, str]
"""
_validation = {
'dimension': {'required': True},
}
_attribute_map = {
'dimension': {'key': 'dimension', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(FeedbackDimensionFilter, self).__init__(**kwargs)
self.dimension = kwargs['dimension']
class HardThresholdCondition(msrest.serialization.Model):
"""HardThresholdCondition.
All required parameters must be populated in order to send to Azure.
:param lower_bound: lower bound
should be specified when anomalyDetectorDirection is Both or Down.
:type lower_bound: float
:param upper_bound: upper bound
should be specified when anomalyDetectorDirection is Both or Up.
:type upper_bound: float
:param anomaly_detector_direction: Required. detection direction. Possible values include:
"Both", "Down", "Up".
:type anomaly_detector_direction: str or
~azure.ai.metricsadvisor.models.AnomalyDetectorDirection
:param suppress_condition: Required.
:type suppress_condition: ~azure.ai.metricsadvisor.models.SuppressCondition
"""
_validation = {
'anomaly_detector_direction': {'required': True},
'suppress_condition': {'required': True},
}
_attribute_map = {
'lower_bound': {'key': 'lowerBound', 'type': 'float'},
'upper_bound': {'key': 'upperBound', 'type': 'float'},
'anomaly_detector_direction': {'key': 'anomalyDetectorDirection', 'type': 'str'},
'suppress_condition': {'key': 'suppressCondition', 'type': 'SuppressCondition'},
}
def __init__(
self,
**kwargs
):
super(HardThresholdCondition, self).__init__(**kwargs)
self.lower_bound = kwargs.get('lower_bound', None)
self.upper_bound = kwargs.get('upper_bound', None)
self.anomaly_detector_direction = kwargs['anomaly_detector_direction']
self.suppress_condition = kwargs['suppress_condition']
class HardThresholdConditionPatch(msrest.serialization.Model):
"""HardThresholdConditionPatch.
:param lower_bound: lower bound
should be specified when anomalyDetectorDirection is Both or Down.
:type lower_bound: float
:param upper_bound: upper bound
should be specified when anomalyDetectorDirection is Both or Up.
:type upper_bound: float
:param anomaly_detector_direction: detection direction. Possible values include: "Both",
"Down", "Up".
:type anomaly_detector_direction: str or
~azure.ai.metricsadvisor.models.AnomalyDetectorDirection
:param suppress_condition:
:type suppress_condition: ~azure.ai.metricsadvisor.models.SuppressConditionPatch
"""
_attribute_map = {
'lower_bound': {'key': 'lowerBound', 'type': 'float'},
'upper_bound': {'key': 'upperBound', 'type': 'float'},
'anomaly_detector_direction': {'key': 'anomalyDetectorDirection', 'type': 'str'},
'suppress_condition': {'key': 'suppressCondition', 'type': 'SuppressConditionPatch'},
}
def __init__(
self,
**kwargs
):
super(HardThresholdConditionPatch, self).__init__(**kwargs)
self.lower_bound = kwargs.get('lower_bound', None)
self.upper_bound = kwargs.get('upper_bound', None)
self.anomaly_detector_direction = kwargs.get('anomaly_detector_direction', None)
self.suppress_condition = kwargs.get('suppress_condition', None)
class HookList(msrest.serialization.Model):
"""HookList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link:
:vartype next_link: str
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.HookInfo]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'readonly': True, 'unique': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[HookInfo]'},
}
def __init__(
self,
**kwargs
):
super(HookList, self).__init__(**kwargs)
self.next_link = None
self.value = None
class IncidentProperty(msrest.serialization.Model):
"""IncidentProperty.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param max_severity: Required. max severity of latest anomalies in the incident. Possible
values include: "Low", "Medium", "High".
:type max_severity: str or ~azure.ai.metricsadvisor.models.Severity
:ivar incident_status: incident status
only return for alerting incident result. Possible values include: "Active", "Resolved".
:vartype incident_status: str or ~azure.ai.metricsadvisor.models.IncidentStatus
:ivar value_of_root_node: value of the root node.
:vartype value_of_root_node: float
:ivar expected_value_of_root_node: expected value of the root node given by smart detector.
:vartype expected_value_of_root_node: float
"""
_validation = {
'max_severity': {'required': True},
'incident_status': {'readonly': True},
'value_of_root_node': {'readonly': True},
'expected_value_of_root_node': {'readonly': True},
}
_attribute_map = {
'max_severity': {'key': 'maxSeverity', 'type': 'str'},
'incident_status': {'key': 'incidentStatus', 'type': 'str'},
'value_of_root_node': {'key': 'valueOfRootNode', 'type': 'float'},
'expected_value_of_root_node': {'key': 'expectedValueOfRootNode', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(IncidentProperty, self).__init__(**kwargs)
self.max_severity = kwargs['max_severity']
self.incident_status = None
self.value_of_root_node = None
self.expected_value_of_root_node = None
class IncidentResult(msrest.serialization.Model):
"""IncidentResult.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar data_feed_id: data feed unique id
only return for alerting anomaly result.
:vartype data_feed_id: str
:ivar metric_id: metric unique id
only return for alerting incident result.
:vartype metric_id: str
:ivar anomaly_detection_configuration_id: anomaly detection configuration unique id
only return for alerting incident result.
:vartype anomaly_detection_configuration_id: str
:param incident_id: Required. incident id.
:type incident_id: str
:param start_time: Required. incident start time.
:type start_time: ~datetime.datetime
:param last_time: Required. incident last time.
:type last_time: ~datetime.datetime
:param root_node: Required.
:type root_node: ~azure.ai.metricsadvisor.models.SeriesIdentity
:param property: Required.
:type property: ~azure.ai.metricsadvisor.models.IncidentProperty
"""
_validation = {
'data_feed_id': {'readonly': True},
'metric_id': {'readonly': True},
'anomaly_detection_configuration_id': {'readonly': True},
'incident_id': {'required': True},
'start_time': {'required': True},
'last_time': {'required': True},
'root_node': {'required': True},
'property': {'required': True},
}
_attribute_map = {
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'anomaly_detection_configuration_id': {'key': 'anomalyDetectionConfigurationId', 'type': 'str'},
'incident_id': {'key': 'incidentId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_time': {'key': 'lastTime', 'type': 'iso-8601'},
'root_node': {'key': 'rootNode', 'type': 'SeriesIdentity'},
'property': {'key': 'property', 'type': 'IncidentProperty'},
}
def __init__(
self,
**kwargs
):
super(IncidentResult, self).__init__(**kwargs)
self.data_feed_id = None
self.metric_id = None
self.anomaly_detection_configuration_id = None
self.incident_id = kwargs['incident_id']
self.start_time = kwargs['start_time']
self.last_time = kwargs['last_time']
self.root_node = kwargs['root_node']
self.property = kwargs['property']
class IncidentResultList(msrest.serialization.Model):
"""IncidentResultList.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar next_link:
:vartype next_link: str
:param value: Required.
:type value: list[~azure.ai.metricsadvisor.models.IncidentResult]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[IncidentResult]'},
}
def __init__(
self,
**kwargs
):
super(IncidentResultList, self).__init__(**kwargs)
self.next_link = None
self.value = kwargs['value']
class InfluxDBDataFeed(DataFeedDetail):
"""InfluxDBDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.InfluxDBParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'InfluxDBParameter'},
}
def __init__(
self,
**kwargs
):
super(InfluxDBDataFeed, self).__init__(**kwargs)
self.data_source_type = 'InfluxDB' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class InfluxDBDataFeedPatch(DataFeedDetailPatch):
"""InfluxDBDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.InfluxDBParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'InfluxDBParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(InfluxDBDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'InfluxDB' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class InfluxDBParameter(msrest.serialization.Model):
"""InfluxDBParameter.
All required parameters must be populated in order to send to Azure.
:param connection_string: The connection string of this InfluxDB.
:type connection_string: str
:param database: A database name.
:type database: str
:param user_name: The user name of the account that can access this database.
:type user_name: str
:param password: The password of the account that can access this database.
:type password: str
:param query: Required. The script to query this database.
:type query: str
"""
_validation = {
'query': {'required': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'database': {'key': 'database', 'type': 'str'},
'user_name': {'key': 'userName', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InfluxDBParameter, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.database = kwargs.get('database', None)
self.user_name = kwargs.get('user_name', None)
self.password = kwargs.get('password', None)
self.query = kwargs['query']
class InfluxDBParameterPatch(msrest.serialization.Model):
"""InfluxDBParameterPatch.
:param connection_string: The connection string of this InfluxDB.
:type connection_string: str
:param database: A database name.
:type database: str
:param user_name: The user name of the account that can access this database.
:type user_name: str
:param password: The password of the account that can access this database.
:type password: str
:param query: The script to query this database.
:type query: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'database': {'key': 'database', 'type': 'str'},
'user_name': {'key': 'userName', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InfluxDBParameterPatch, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.database = kwargs.get('database', None)
self.user_name = kwargs.get('user_name', None)
self.password = kwargs.get('password', None)
self.query = kwargs.get('query', None)
class IngestionProgressResetOptions(msrest.serialization.Model):
"""IngestionProgressResetOptions.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. the start point of time range to reset data ingestion status.
:type start_time: ~datetime.datetime
:param end_time: Required. the end point of time range to reset data ingestion status.
:type end_time: ~datetime.datetime
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(IngestionProgressResetOptions, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
class IngestionStatus(msrest.serialization.Model):
"""IngestionStatus.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar timestamp: data slice timestamp.
:vartype timestamp: ~datetime.datetime
:ivar status: latest ingestion task status for this data slice. Possible values include:
"NotStarted", "Scheduled", "Running", "Succeeded", "Failed", "NoData", "Error", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.IngestionStatusType
:ivar message: the trimmed message of last ingestion job.
:vartype message: str
"""
_validation = {
'timestamp': {'readonly': True},
'status': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IngestionStatus, self).__init__(**kwargs)
self.timestamp = None
self.status = None
self.message = None
class IngestionStatusList(msrest.serialization.Model):
"""IngestionStatusList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link:
:vartype next_link: str
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.IngestionStatus]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[IngestionStatus]'},
}
def __init__(
self,
**kwargs
):
super(IngestionStatusList, self).__init__(**kwargs)
self.next_link = None
self.value = None
class IngestionStatusQueryOptions(msrest.serialization.Model):
"""IngestionStatusQueryOptions.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. the start point of time range to query data ingestion status.
:type start_time: ~datetime.datetime
:param end_time: Required. the end point of time range to query data ingestion status.
:type end_time: ~datetime.datetime
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(IngestionStatusQueryOptions, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
class Metric(msrest.serialization.Model):
"""Metric.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar metric_id: metric id.
:vartype metric_id: str
:param metric_name: Required. metric name.
:type metric_name: str
:param metric_display_name: metric display name.
:type metric_display_name: str
:param metric_description: metric description.
:type metric_description: str
"""
_validation = {
'metric_id': {'readonly': True},
'metric_name': {'required': True},
'metric_display_name': {'pattern': r'[.a-zA-Z0-9_-]+'},
}
_attribute_map = {
'metric_id': {'key': 'metricId', 'type': 'str'},
'metric_name': {'key': 'metricName', 'type': 'str'},
'metric_display_name': {'key': 'metricDisplayName', 'type': 'str'},
'metric_description': {'key': 'metricDescription', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Metric, self).__init__(**kwargs)
self.metric_id = None
self.metric_name = kwargs['metric_name']
self.metric_display_name = kwargs.get('metric_display_name', None)
self.metric_description = kwargs.get('metric_description', None)
class MetricAlertingConfiguration(msrest.serialization.Model):
"""MetricAlertingConfiguration.
All required parameters must be populated in order to send to Azure.
:param anomaly_detection_configuration_id: Required. Anomaly detection configuration unique id.
:type anomaly_detection_configuration_id: str
:param anomaly_scope_type: Required. Anomaly scope. Possible values include: "All",
"Dimension", "TopN".
:type anomaly_scope_type: str or ~azure.ai.metricsadvisor.models.AnomalyScope
:param negation_operation: Negation operation.
:type negation_operation: bool
:param dimension_anomaly_scope:
:type dimension_anomaly_scope: ~azure.ai.metricsadvisor.models.DimensionGroupIdentity
:param top_n_anomaly_scope:
:type top_n_anomaly_scope: ~azure.ai.metricsadvisor.models.TopNGroupScope
:param severity_filter:
:type severity_filter: ~azure.ai.metricsadvisor.models.SeverityCondition
:param snooze_filter:
:type snooze_filter: ~azure.ai.metricsadvisor.models.AlertSnoozeCondition
:param value_filter:
:type value_filter: ~azure.ai.metricsadvisor.models.ValueCondition
"""
_validation = {
'anomaly_detection_configuration_id': {'required': True},
'anomaly_scope_type': {'required': True},
}
_attribute_map = {
'anomaly_detection_configuration_id': {'key': 'anomalyDetectionConfigurationId', 'type': 'str'},
'anomaly_scope_type': {'key': 'anomalyScopeType', 'type': 'str'},
'negation_operation': {'key': 'negationOperation', 'type': 'bool'},
'dimension_anomaly_scope': {'key': 'dimensionAnomalyScope', 'type': 'DimensionGroupIdentity'},
'top_n_anomaly_scope': {'key': 'topNAnomalyScope', 'type': 'TopNGroupScope'},
'severity_filter': {'key': 'severityFilter', 'type': 'SeverityCondition'},
'snooze_filter': {'key': 'snoozeFilter', 'type': 'AlertSnoozeCondition'},
'value_filter': {'key': 'valueFilter', 'type': 'ValueCondition'},
}
def __init__(
self,
**kwargs
):
super(MetricAlertingConfiguration, self).__init__(**kwargs)
self.anomaly_detection_configuration_id = kwargs['anomaly_detection_configuration_id']
self.anomaly_scope_type = kwargs['anomaly_scope_type']
self.negation_operation = kwargs.get('negation_operation', False)
self.dimension_anomaly_scope = kwargs.get('dimension_anomaly_scope', None)
self.top_n_anomaly_scope = kwargs.get('top_n_anomaly_scope', None)
self.severity_filter = kwargs.get('severity_filter', None)
self.snooze_filter = kwargs.get('snooze_filter', None)
self.value_filter = kwargs.get('value_filter', None)
class MetricDataItem(msrest.serialization.Model):
"""MetricDataItem.
Variables are only populated by the server, and will be ignored when sending a request.
:param id:
:type id: ~azure.ai.metricsadvisor.models.MetricSeriesItem
:ivar timestamp_list: timestamps of the data related to this time series.
:vartype timestamp_list: list[~datetime.datetime]
:ivar value_list: values of the data related to this time series.
:vartype value_list: list[float]
"""
_validation = {
'timestamp_list': {'readonly': True},
'value_list': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'MetricSeriesItem'},
'timestamp_list': {'key': 'timestampList', 'type': '[iso-8601]'},
'value_list': {'key': 'valueList', 'type': '[float]'},
}
def __init__(
self,
**kwargs
):
super(MetricDataItem, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.timestamp_list = None
self.value_list = None
class MetricDataList(msrest.serialization.Model):
"""MetricDataList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.MetricDataItem]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MetricDataItem]'},
}
def __init__(
self,
**kwargs
):
super(MetricDataList, self).__init__(**kwargs)
self.value = None
class MetricDataQueryOptions(msrest.serialization.Model):
"""MetricDataQueryOptions.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. start time of query a time series data, and format should be
yyyy-MM-ddThh:mm:ssZ. The maximum number of data points (series number * time range) is 10000.
:type start_time: ~datetime.datetime
:param end_time: Required. start time of query a time series data, and format should be
yyyy-MM-ddThh:mm:ssZ. The maximum number of data points (series number * time range) is 10000.
:type end_time: ~datetime.datetime
:param series: Required. query specific series. The maximum number of series is 100.
:type series: list[dict[str, str]]
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
'series': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'series': {'key': 'series', 'type': '[{str}]'},
}
def __init__(
self,
**kwargs
):
super(MetricDataQueryOptions, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.series = kwargs['series']
class MetricDimensionList(msrest.serialization.Model):
"""MetricDimensionList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link:
:vartype next_link: str
:ivar value:
:vartype value: list[str]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'readonly': True, 'unique': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricDimensionList, self).__init__(**kwargs)
self.next_link = None
self.value = None
class MetricDimensionQueryOptions(msrest.serialization.Model):
"""MetricDimensionQueryOptions.
All required parameters must be populated in order to send to Azure.
:param dimension_name: Required. dimension name.
:type dimension_name: str
:param dimension_value_filter: dimension value to be filtered.
:type dimension_value_filter: str
"""
_validation = {
'dimension_name': {'required': True},
}
_attribute_map = {
'dimension_name': {'key': 'dimensionName', 'type': 'str'},
'dimension_value_filter': {'key': 'dimensionValueFilter', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricDimensionQueryOptions, self).__init__(**kwargs)
self.dimension_name = kwargs['dimension_name']
self.dimension_value_filter = kwargs.get('dimension_value_filter', None)
class MetricFeedbackFilter(msrest.serialization.Model):
"""MetricFeedbackFilter.
All required parameters must be populated in order to send to Azure.
:param metric_id: Required. filter feedbacks by metric id.
:type metric_id: str
:param dimension_filter:
:type dimension_filter: ~azure.ai.metricsadvisor.models.FeedbackDimensionFilter
:param feedback_type: filter feedbacks by type. Possible values include: "Anomaly",
"ChangePoint", "Period", "Comment".
:type feedback_type: str or ~azure.ai.metricsadvisor.models.FeedbackType
:param start_time: start time filter under chosen time mode.
:type start_time: ~datetime.datetime
:param end_time: end time filter under chosen time mode.
:type end_time: ~datetime.datetime
:param time_mode: time mode to filter feedback. Possible values include: "MetricTimestamp",
"FeedbackCreatedTime".
:type time_mode: str or ~azure.ai.metricsadvisor.models.FeedbackQueryTimeMode
"""
_validation = {
'metric_id': {'required': True},
}
_attribute_map = {
'metric_id': {'key': 'metricId', 'type': 'str'},
'dimension_filter': {'key': 'dimensionFilter', 'type': 'FeedbackDimensionFilter'},
'feedback_type': {'key': 'feedbackType', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_mode': {'key': 'timeMode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricFeedbackFilter, self).__init__(**kwargs)
self.metric_id = kwargs['metric_id']
self.dimension_filter = kwargs.get('dimension_filter', None)
self.feedback_type = kwargs.get('feedback_type', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.time_mode = kwargs.get('time_mode', None)
class MetricFeedbackList(msrest.serialization.Model):
"""MetricFeedbackList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link:
:vartype next_link: str
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.MetricFeedback]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[MetricFeedback]'},
}
def __init__(
self,
**kwargs
):
super(MetricFeedbackList, self).__init__(**kwargs)
self.next_link = None
self.value = None
class MetricSeriesItem(msrest.serialization.Model):
"""MetricSeriesItem.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar metric_id: metric unique id.
:vartype metric_id: str
:ivar dimension: dimension name and value pair.
:vartype dimension: dict[str, str]
"""
_validation = {
'metric_id': {'readonly': True},
'dimension': {'readonly': True},
}
_attribute_map = {
'metric_id': {'key': 'metricId', 'type': 'str'},
'dimension': {'key': 'dimension', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(MetricSeriesItem, self).__init__(**kwargs)
self.metric_id = None
self.dimension = None
class MetricSeriesList(msrest.serialization.Model):
"""MetricSeriesList.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link:
:vartype next_link: str
:ivar value:
:vartype value: list[~azure.ai.metricsadvisor.models.MetricSeriesItem]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'next_link': {'key': '@nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[MetricSeriesItem]'},
}
def __init__(
self,
**kwargs
):
super(MetricSeriesList, self).__init__(**kwargs)
self.next_link = None
self.value = None
class MetricSeriesQueryOptions(msrest.serialization.Model):
"""MetricSeriesQueryOptions.
All required parameters must be populated in order to send to Azure.
:param active_since: Required. query series ingested after this time, the format should be
yyyy-MM-ddTHH:mm:ssZ.
:type active_since: ~datetime.datetime
:param dimension_filter: filter specific dimension name and values.
:type dimension_filter: dict[str, list[str]]
"""
_validation = {
'active_since': {'required': True},
}
_attribute_map = {
'active_since': {'key': 'activeSince', 'type': 'iso-8601'},
'dimension_filter': {'key': 'dimensionFilter', 'type': '{[str]}'},
}
def __init__(
self,
**kwargs
):
super(MetricSeriesQueryOptions, self).__init__(**kwargs)
self.active_since = kwargs['active_since']
self.dimension_filter = kwargs.get('dimension_filter', None)
class MongoDBDataFeed(DataFeedDetail):
"""MongoDBDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.MongoDBParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'MongoDBParameter'},
}
def __init__(
self,
**kwargs
):
super(MongoDBDataFeed, self).__init__(**kwargs)
self.data_source_type = 'MongoDB' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class MongoDBDataFeedPatch(DataFeedDetailPatch):
"""MongoDBDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.MongoDBParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'MongoDBParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(MongoDBDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'MongoDB' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class MongoDBParameter(msrest.serialization.Model):
"""MongoDBParameter.
All required parameters must be populated in order to send to Azure.
:param connection_string: The connection string of this MongoDB.
:type connection_string: str
:param database: A database name in this MongoDB.
:type database: str
:param command: Required. The script to query this database.
:type command: str
"""
_validation = {
'command': {'required': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'database': {'key': 'database', 'type': 'str'},
'command': {'key': 'command', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MongoDBParameter, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.database = kwargs.get('database', None)
self.command = kwargs['command']
class MongoDBParameterPatch(msrest.serialization.Model):
"""MongoDBParameterPatch.
:param connection_string: The connection string of this MongoDB.
:type connection_string: str
:param database: A database name in this MongoDB.
:type database: str
:param command: The script to query this database.
:type command: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'database': {'key': 'database', 'type': 'str'},
'command': {'key': 'command', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MongoDBParameterPatch, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.database = kwargs.get('database', None)
self.command = kwargs.get('command', None)
class MySqlDataFeed(DataFeedDetail):
"""MySqlDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.SqlSourceParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'SqlSourceParameter'},
}
def __init__(
self,
**kwargs
):
super(MySqlDataFeed, self).__init__(**kwargs)
self.data_source_type = 'MySql' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class MySqlDataFeedPatch(DataFeedDetailPatch):
"""MySqlDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.SQLSourceParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'SQLSourceParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(MySqlDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'MySql' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class PeriodFeedback(MetricFeedback):
"""PeriodFeedback.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param feedback_type: Required. feedback type.Constant filled by server. Possible values
include: "Anomaly", "ChangePoint", "Period", "Comment".
:type feedback_type: str or ~azure.ai.metricsadvisor.models.FeedbackType
:ivar feedback_id: feedback unique id.
:vartype feedback_id: str
:ivar created_time: feedback created time.
:vartype created_time: ~datetime.datetime
:ivar user_principal: user who gives this feedback.
:vartype user_principal: str
:param metric_id: Required. metric unique id.
:type metric_id: str
:param dimension_filter: Required.
:type dimension_filter: ~azure.ai.metricsadvisor.models.FeedbackDimensionFilter
:param value: Required.
:type value: ~azure.ai.metricsadvisor.models.PeriodFeedbackValue
"""
_validation = {
'feedback_type': {'required': True},
'feedback_id': {'readonly': True},
'created_time': {'readonly': True},
'user_principal': {'readonly': True},
'metric_id': {'required': True},
'dimension_filter': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'feedback_type': {'key': 'feedbackType', 'type': 'str'},
'feedback_id': {'key': 'feedbackId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'user_principal': {'key': 'userPrincipal', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'dimension_filter': {'key': 'dimensionFilter', 'type': 'FeedbackDimensionFilter'},
'value': {'key': 'value', 'type': 'PeriodFeedbackValue'},
}
def __init__(
self,
**kwargs
):
super(PeriodFeedback, self).__init__(**kwargs)
self.feedback_type = 'Period' # type: str
self.value = kwargs['value']
class PeriodFeedbackValue(msrest.serialization.Model):
"""PeriodFeedbackValue.
All required parameters must be populated in order to send to Azure.
:param period_type: Required. the type of setting period. Possible values include:
"AutoDetect", "AssignValue".
:type period_type: str or ~azure.ai.metricsadvisor.models.PeriodType
:param period_value: Required. the number of intervals a period contains, when no period set to
0.
:type period_value: int
"""
_validation = {
'period_type': {'required': True},
'period_value': {'required': True},
}
_attribute_map = {
'period_type': {'key': 'periodType', 'type': 'str'},
'period_value': {'key': 'periodValue', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(PeriodFeedbackValue, self).__init__(**kwargs)
self.period_type = kwargs['period_type']
self.period_value = kwargs['period_value']
class PostgreSqlDataFeed(DataFeedDetail):
"""PostgreSqlDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.SqlSourceParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'SqlSourceParameter'},
}
def __init__(
self,
**kwargs
):
super(PostgreSqlDataFeed, self).__init__(**kwargs)
self.data_source_type = 'PostgreSql' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class PostgreSqlDataFeedPatch(DataFeedDetailPatch):
"""PostgreSqlDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.SQLSourceParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'SQLSourceParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(PostgreSqlDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'PostgreSql' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class RootCause(msrest.serialization.Model):
"""RootCause.
All required parameters must be populated in order to send to Azure.
:param root_cause: Required.
:type root_cause: ~azure.ai.metricsadvisor.models.DimensionGroupIdentity
:param path: Required. drilling down path from query anomaly to root cause.
:type path: list[str]
:param score: Required. score of the root cause.
:type score: float
:param description: Required. description of the root cause.
:type description: str
"""
_validation = {
'root_cause': {'required': True},
'path': {'required': True},
'score': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'root_cause': {'key': 'rootCause', 'type': 'DimensionGroupIdentity'},
'path': {'key': 'path', 'type': '[str]'},
'score': {'key': 'score', 'type': 'float'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RootCause, self).__init__(**kwargs)
self.root_cause = kwargs['root_cause']
self.path = kwargs['path']
self.score = kwargs['score']
self.description = kwargs['description']
class RootCauseList(msrest.serialization.Model):
"""RootCauseList.
All required parameters must be populated in order to send to Azure.
:param value: Required.
:type value: list[~azure.ai.metricsadvisor.models.RootCause]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RootCause]'},
}
def __init__(
self,
**kwargs
):
super(RootCauseList, self).__init__(**kwargs)
self.value = kwargs['value']
class SeriesConfiguration(msrest.serialization.Model):
"""SeriesConfiguration.
All required parameters must be populated in order to send to Azure.
:param series: Required.
:type series: ~azure.ai.metricsadvisor.models.SeriesIdentity
:param condition_operator: condition operator
should be specified when combining multiple detection conditions. Possible values include:
"AND", "OR".
:type condition_operator: str or
~azure.ai.metricsadvisor.models.AnomalyDetectionConfigurationLogicType
:param smart_detection_condition:
:type smart_detection_condition: ~azure.ai.metricsadvisor.models.SmartDetectionCondition
:param hard_threshold_condition:
:type hard_threshold_condition: ~azure.ai.metricsadvisor.models.HardThresholdCondition
:param change_threshold_condition:
:type change_threshold_condition: ~azure.ai.metricsadvisor.models.ChangeThresholdCondition
"""
_validation = {
'series': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': 'SeriesIdentity'},
'condition_operator': {'key': 'conditionOperator', 'type': 'str'},
'smart_detection_condition': {'key': 'smartDetectionCondition', 'type': 'SmartDetectionCondition'},
'hard_threshold_condition': {'key': 'hardThresholdCondition', 'type': 'HardThresholdCondition'},
'change_threshold_condition': {'key': 'changeThresholdCondition', 'type': 'ChangeThresholdCondition'},
}
def __init__(
self,
**kwargs
):
super(SeriesConfiguration, self).__init__(**kwargs)
self.series = kwargs['series']
self.condition_operator = kwargs.get('condition_operator', None)
self.smart_detection_condition = kwargs.get('smart_detection_condition', None)
self.hard_threshold_condition = kwargs.get('hard_threshold_condition', None)
self.change_threshold_condition = kwargs.get('change_threshold_condition', None)
class SeriesIdentity(msrest.serialization.Model):
"""SeriesIdentity.
All required parameters must be populated in order to send to Azure.
:param dimension: Required. dimension specified for series.
:type dimension: dict[str, str]
"""
_validation = {
'dimension': {'required': True},
}
_attribute_map = {
'dimension': {'key': 'dimension', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(SeriesIdentity, self).__init__(**kwargs)
self.dimension = kwargs['dimension']
class SeriesResult(msrest.serialization.Model):
"""SeriesResult.
All required parameters must be populated in order to send to Azure.
:param series: Required.
:type series: ~azure.ai.metricsadvisor.models.SeriesIdentity
:param timestamp_list: Required. timestamps of the series.
:type timestamp_list: list[~datetime.datetime]
:param value_list: Required. values of the series.
:type value_list: list[float]
:param is_anomaly_list: Required. whether points of the series are anomalies.
:type is_anomaly_list: list[bool]
:param period_list: Required. period calculated on each point of the series.
:type period_list: list[int]
:param expected_value_list: Required. expected values of the series given by smart detector.
:type expected_value_list: list[float]
:param lower_boundary_list: Required. lower boundary list of the series given by smart
detector.
:type lower_boundary_list: list[float]
:param upper_boundary_list: Required. upper boundary list of the series given by smart
detector.
:type upper_boundary_list: list[float]
"""
_validation = {
'series': {'required': True},
'timestamp_list': {'required': True},
'value_list': {'required': True},
'is_anomaly_list': {'required': True},
'period_list': {'required': True},
'expected_value_list': {'required': True},
'lower_boundary_list': {'required': True},
'upper_boundary_list': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': 'SeriesIdentity'},
'timestamp_list': {'key': 'timestampList', 'type': '[iso-8601]'},
'value_list': {'key': 'valueList', 'type': '[float]'},
'is_anomaly_list': {'key': 'isAnomalyList', 'type': '[bool]'},
'period_list': {'key': 'periodList', 'type': '[int]'},
'expected_value_list': {'key': 'expectedValueList', 'type': '[float]'},
'lower_boundary_list': {'key': 'lowerBoundaryList', 'type': '[float]'},
'upper_boundary_list': {'key': 'upperBoundaryList', 'type': '[float]'},
}
def __init__(
self,
**kwargs
):
super(SeriesResult, self).__init__(**kwargs)
self.series = kwargs['series']
self.timestamp_list = kwargs['timestamp_list']
self.value_list = kwargs['value_list']
self.is_anomaly_list = kwargs['is_anomaly_list']
self.period_list = kwargs['period_list']
self.expected_value_list = kwargs['expected_value_list']
self.lower_boundary_list = kwargs['lower_boundary_list']
self.upper_boundary_list = kwargs['upper_boundary_list']
class SeriesResultList(msrest.serialization.Model):
"""SeriesResultList.
All required parameters must be populated in order to send to Azure.
:param value: Required.
:type value: list[~azure.ai.metricsadvisor.models.SeriesResult]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SeriesResult]'},
}
def __init__(
self,
**kwargs
):
super(SeriesResultList, self).__init__(**kwargs)
self.value = kwargs['value']
class ServicePrincipalCredential(DataSourceCredential):
"""ServicePrincipalCredential.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:ivar data_source_credential_id: Unique id of data source credential.
:vartype data_source_credential_id: str
:param data_source_credential_name: Required. Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
:param parameters: Required.
:type parameters: ~azure.ai.metricsadvisor.models.ServicePrincipalParam
"""
_validation = {
'data_source_credential_type': {'required': True},
'data_source_credential_id': {'readonly': True},
'data_source_credential_name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_id': {'key': 'dataSourceCredentialId', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'ServicePrincipalParam'},
}
def __init__(
self,
**kwargs
):
super(ServicePrincipalCredential, self).__init__(**kwargs)
self.data_source_credential_type = 'ServicePrincipal' # type: str
self.parameters = kwargs['parameters']
class ServicePrincipalCredentialPatch(DataSourceCredentialPatch):
"""ServicePrincipalCredentialPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:param data_source_credential_name: Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
:param parameters:
:type parameters: ~azure.ai.metricsadvisor.models.ServicePrincipalParamPatch
"""
_validation = {
'data_source_credential_type': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'ServicePrincipalParamPatch'},
}
def __init__(
self,
**kwargs
):
super(ServicePrincipalCredentialPatch, self).__init__(**kwargs)
self.data_source_credential_type = 'ServicePrincipal' # type: str
self.parameters = kwargs.get('parameters', None)
class ServicePrincipalInKVCredential(DataSourceCredential):
"""ServicePrincipalInKVCredential.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:ivar data_source_credential_id: Unique id of data source credential.
:vartype data_source_credential_id: str
:param data_source_credential_name: Required. Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
:param parameters: Required.
:type parameters: ~azure.ai.metricsadvisor.models.ServicePrincipalInKVParam
"""
_validation = {
'data_source_credential_type': {'required': True},
'data_source_credential_id': {'readonly': True},
'data_source_credential_name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_id': {'key': 'dataSourceCredentialId', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'ServicePrincipalInKVParam'},
}
def __init__(
self,
**kwargs
):
super(ServicePrincipalInKVCredential, self).__init__(**kwargs)
self.data_source_credential_type = 'ServicePrincipalInKV' # type: str
self.parameters = kwargs['parameters']
class ServicePrincipalInKVCredentialPatch(DataSourceCredentialPatch):
"""ServicePrincipalInKVCredentialPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_credential_type: Required. Type of data source credential.Constant filled by
server. Possible values include: "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type data_source_credential_type: str or
~azure.ai.metricsadvisor.models.DataSourceCredentialType
:param data_source_credential_name: Name of data source credential.
:type data_source_credential_name: str
:param data_source_credential_description: Description of data source credential.
:type data_source_credential_description: str
:param parameters:
:type parameters: ~azure.ai.metricsadvisor.models.ServicePrincipalInKVParamPatch
"""
_validation = {
'data_source_credential_type': {'required': True},
}
_attribute_map = {
'data_source_credential_type': {'key': 'dataSourceCredentialType', 'type': 'str'},
'data_source_credential_name': {'key': 'dataSourceCredentialName', 'type': 'str'},
'data_source_credential_description': {'key': 'dataSourceCredentialDescription', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'ServicePrincipalInKVParamPatch'},
}
def __init__(
self,
**kwargs
):
super(ServicePrincipalInKVCredentialPatch, self).__init__(**kwargs)
self.data_source_credential_type = 'ServicePrincipalInKV' # type: str
self.parameters = kwargs.get('parameters', None)
class ServicePrincipalInKVParam(msrest.serialization.Model):
"""ServicePrincipalInKVParam.
All required parameters must be populated in order to send to Azure.
:param key_vault_endpoint: Required. The Key Vault endpoint that storing the service principal.
:type key_vault_endpoint: str
:param key_vault_client_id: Required. The Client Id to access the Key Vault.
:type key_vault_client_id: str
:param key_vault_client_secret: The Client Secret to access the Key Vault.
:type key_vault_client_secret: str
:param service_principal_id_name_in_kv: Required. The secret name of the service principal's
client Id in the Key Vault.
:type service_principal_id_name_in_kv: str
:param service_principal_secret_name_in_kv: Required. The secret name of the service
principal's client secret in the Key Vault.
:type service_principal_secret_name_in_kv: str
:param tenant_id: Required. The tenant id of your service principal.
:type tenant_id: str
"""
_validation = {
'key_vault_endpoint': {'required': True},
'key_vault_client_id': {'required': True},
'service_principal_id_name_in_kv': {'required': True},
'service_principal_secret_name_in_kv': {'required': True},
'tenant_id': {'required': True},
}
_attribute_map = {
'key_vault_endpoint': {'key': 'keyVaultEndpoint', 'type': 'str'},
'key_vault_client_id': {'key': 'keyVaultClientId', 'type': 'str'},
'key_vault_client_secret': {'key': 'keyVaultClientSecret', 'type': 'str'},
'service_principal_id_name_in_kv': {'key': 'servicePrincipalIdNameInKV', 'type': 'str'},
'service_principal_secret_name_in_kv': {'key': 'servicePrincipalSecretNameInKV', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServicePrincipalInKVParam, self).__init__(**kwargs)
self.key_vault_endpoint = kwargs['key_vault_endpoint']
self.key_vault_client_id = kwargs['key_vault_client_id']
self.key_vault_client_secret = kwargs.get('key_vault_client_secret', None)
self.service_principal_id_name_in_kv = kwargs['service_principal_id_name_in_kv']
self.service_principal_secret_name_in_kv = kwargs['service_principal_secret_name_in_kv']
self.tenant_id = kwargs['tenant_id']
class ServicePrincipalInKVParamPatch(msrest.serialization.Model):
"""ServicePrincipalInKVParamPatch.
:param key_vault_endpoint: The Key Vault endpoint that storing the service principal.
:type key_vault_endpoint: str
:param key_vault_client_id: The Client Id to access the Key Vault.
:type key_vault_client_id: str
:param key_vault_client_secret: The Client Secret to access the Key Vault.
:type key_vault_client_secret: str
:param service_principal_id_name_in_kv: The secret name of the service principal's client Id in
the Key Vault.
:type service_principal_id_name_in_kv: str
:param service_principal_secret_name_in_kv: The secret name of the service principal's client
secret in the Key Vault.
:type service_principal_secret_name_in_kv: str
:param tenant_id: The tenant id of your service principal.
:type tenant_id: str
"""
_attribute_map = {
'key_vault_endpoint': {'key': 'keyVaultEndpoint', 'type': 'str'},
'key_vault_client_id': {'key': 'keyVaultClientId', 'type': 'str'},
'key_vault_client_secret': {'key': 'keyVaultClientSecret', 'type': 'str'},
'service_principal_id_name_in_kv': {'key': 'servicePrincipalIdNameInKV', 'type': 'str'},
'service_principal_secret_name_in_kv': {'key': 'servicePrincipalSecretNameInKV', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServicePrincipalInKVParamPatch, self).__init__(**kwargs)
self.key_vault_endpoint = kwargs.get('key_vault_endpoint', None)
self.key_vault_client_id = kwargs.get('key_vault_client_id', None)
self.key_vault_client_secret = kwargs.get('key_vault_client_secret', None)
self.service_principal_id_name_in_kv = kwargs.get('service_principal_id_name_in_kv', None)
self.service_principal_secret_name_in_kv = kwargs.get('service_principal_secret_name_in_kv', None)
self.tenant_id = kwargs.get('tenant_id', None)
class ServicePrincipalParam(msrest.serialization.Model):
"""ServicePrincipalParam.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. The client id of the service principal.
:type client_id: str
:param client_secret: The client secret of the service principal.
:type client_secret: str
:param tenant_id: Required. The tenant id of the service principal.
:type tenant_id: str
"""
_validation = {
'client_id': {'required': True},
'tenant_id': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'client_secret': {'key': 'clientSecret', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServicePrincipalParam, self).__init__(**kwargs)
self.client_id = kwargs['client_id']
self.client_secret = kwargs.get('client_secret', None)
self.tenant_id = kwargs['tenant_id']
class ServicePrincipalParamPatch(msrest.serialization.Model):
"""ServicePrincipalParamPatch.
:param client_id: The client id of the service principal.
:type client_id: str
:param client_secret: The client secret of the service principal.
:type client_secret: str
:param tenant_id: The tenant id of the service principal.
:type tenant_id: str
"""
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'client_secret': {'key': 'clientSecret', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServicePrincipalParamPatch, self).__init__(**kwargs)
self.client_id = kwargs.get('client_id', None)
self.client_secret = kwargs.get('client_secret', None)
self.tenant_id = kwargs.get('tenant_id', None)
class SeverityCondition(msrest.serialization.Model):
"""SeverityCondition.
All required parameters must be populated in order to send to Azure.
:param min_alert_severity: Required. min alert severity. Possible values include: "Low",
"Medium", "High".
:type min_alert_severity: str or ~azure.ai.metricsadvisor.models.Severity
:param max_alert_severity: Required. max alert severity. Possible values include: "Low",
"Medium", "High".
:type max_alert_severity: str or ~azure.ai.metricsadvisor.models.Severity
"""
_validation = {
'min_alert_severity': {'required': True},
'max_alert_severity': {'required': True},
}
_attribute_map = {
'min_alert_severity': {'key': 'minAlertSeverity', 'type': 'str'},
'max_alert_severity': {'key': 'maxAlertSeverity', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SeverityCondition, self).__init__(**kwargs)
self.min_alert_severity = kwargs['min_alert_severity']
self.max_alert_severity = kwargs['max_alert_severity']
class SeverityFilterCondition(msrest.serialization.Model):
"""SeverityFilterCondition.
All required parameters must be populated in order to send to Azure.
:param min: Required. min severity. Possible values include: "Low", "Medium", "High".
:type min: str or ~azure.ai.metricsadvisor.models.Severity
:param max: Required. max severity. Possible values include: "Low", "Medium", "High".
:type max: str or ~azure.ai.metricsadvisor.models.Severity
"""
_validation = {
'min': {'required': True},
'max': {'required': True},
}
_attribute_map = {
'min': {'key': 'min', 'type': 'str'},
'max': {'key': 'max', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SeverityFilterCondition, self).__init__(**kwargs)
self.min = kwargs['min']
self.max = kwargs['max']
class SmartDetectionCondition(msrest.serialization.Model):
"""SmartDetectionCondition.
All required parameters must be populated in order to send to Azure.
:param sensitivity: Required. sensitivity, value range : (0, 100].
:type sensitivity: float
:param anomaly_detector_direction: Required. detection direction. Possible values include:
"Both", "Down", "Up".
:type anomaly_detector_direction: str or
~azure.ai.metricsadvisor.models.AnomalyDetectorDirection
:param suppress_condition: Required.
:type suppress_condition: ~azure.ai.metricsadvisor.models.SuppressCondition
"""
_validation = {
'sensitivity': {'required': True},
'anomaly_detector_direction': {'required': True},
'suppress_condition': {'required': True},
}
_attribute_map = {
'sensitivity': {'key': 'sensitivity', 'type': 'float'},
'anomaly_detector_direction': {'key': 'anomalyDetectorDirection', 'type': 'str'},
'suppress_condition': {'key': 'suppressCondition', 'type': 'SuppressCondition'},
}
def __init__(
self,
**kwargs
):
super(SmartDetectionCondition, self).__init__(**kwargs)
self.sensitivity = kwargs['sensitivity']
self.anomaly_detector_direction = kwargs['anomaly_detector_direction']
self.suppress_condition = kwargs['suppress_condition']
class SmartDetectionConditionPatch(msrest.serialization.Model):
"""SmartDetectionConditionPatch.
:param sensitivity: sensitivity, value range : (0, 100].
:type sensitivity: float
:param anomaly_detector_direction: detection direction. Possible values include: "Both",
"Down", "Up".
:type anomaly_detector_direction: str or
~azure.ai.metricsadvisor.models.AnomalyDetectorDirection
:param suppress_condition:
:type suppress_condition: ~azure.ai.metricsadvisor.models.SuppressConditionPatch
"""
_attribute_map = {
'sensitivity': {'key': 'sensitivity', 'type': 'float'},
'anomaly_detector_direction': {'key': 'anomalyDetectorDirection', 'type': 'str'},
'suppress_condition': {'key': 'suppressCondition', 'type': 'SuppressConditionPatch'},
}
def __init__(
self,
**kwargs
):
super(SmartDetectionConditionPatch, self).__init__(**kwargs)
self.sensitivity = kwargs.get('sensitivity', None)
self.anomaly_detector_direction = kwargs.get('anomaly_detector_direction', None)
self.suppress_condition = kwargs.get('suppress_condition', None)
class SQLServerDataFeed(DataFeedDetail):
"""SQLServerDataFeed.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:ivar data_feed_id: data feed unique id.
:vartype data_feed_id: str
:param data_feed_name: Required. data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param granularity_name: Required. granularity of the time series. Possible values include:
"Yearly", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Custom".
:type granularity_name: str or ~azure.ai.metricsadvisor.models.Granularity
:param granularity_amount: if granularity is custom,it is required.
:type granularity_amount: int
:param metrics: Required. measure list.
:type metrics: list[~azure.ai.metricsadvisor.models.Metric]
:param dimension: dimension list.
:type dimension: list[~azure.ai.metricsadvisor.models.Dimension]
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: Required. ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:ivar is_admin: the query user is one of data feed administrator or not.
:vartype is_admin: bool
:ivar creator: data feed creator.
:vartype creator: str
:ivar status: data feed status. Possible values include: "Active", "Paused".
:vartype status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:ivar created_time: data feed created time.
:vartype created_time: ~datetime.datetime
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter: Required.
:type data_source_parameter: ~azure.ai.metricsadvisor.models.SqlSourceParameter
"""
_validation = {
'data_source_type': {'required': True},
'data_feed_id': {'readonly': True},
'data_feed_name': {'required': True},
'granularity_name': {'required': True},
'metrics': {'required': True, 'unique': True},
'dimension': {'unique': True},
'data_start_from': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
'is_admin': {'readonly': True},
'creator': {'readonly': True},
'status': {'readonly': True},
'created_time': {'readonly': True},
'data_source_parameter': {'required': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_id': {'key': 'dataFeedId', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'granularity_name': {'key': 'granularityName', 'type': 'str'},
'granularity_amount': {'key': 'granularityAmount', 'type': 'int'},
'metrics': {'key': 'metrics', 'type': '[Metric]'},
'dimension': {'key': 'dimension', 'type': '[Dimension]'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'is_admin': {'key': 'isAdmin', 'type': 'bool'},
'creator': {'key': 'creator', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'SqlSourceParameter'},
}
def __init__(
self,
**kwargs
):
super(SQLServerDataFeed, self).__init__(**kwargs)
self.data_source_type = 'SqlServer' # type: str
self.data_source_parameter = kwargs['data_source_parameter']
class SQLServerDataFeedPatch(DataFeedDetailPatch):
"""SQLServerDataFeedPatch.
All required parameters must be populated in order to send to Azure.
:param data_source_type: Required. data source type.Constant filled by server. Possible values
include: "AzureApplicationInsights", "AzureBlob", "AzureCosmosDB", "AzureDataExplorer",
"AzureDataLakeStorageGen2", "AzureEventHubs", "AzureLogAnalytics", "AzureTable", "InfluxDB",
"MongoDB", "MySql", "PostgreSql", "SqlServer".
:type data_source_type: str or ~azure.ai.metricsadvisor.models.DataSourceType
:param data_feed_name: data feed name.
:type data_feed_name: str
:param data_feed_description: data feed description.
:type data_feed_description: str
:param timestamp_column: user-defined timestamp column. if timestampColumn is null, start time
of every time slice will be used as default value.
:type timestamp_column: str
:param data_start_from: ingestion start time.
:type data_start_from: ~datetime.datetime
:param start_offset_in_seconds: the time that the beginning of data ingestion task will delay
for every data slice according to this offset.
:type start_offset_in_seconds: long
:param max_concurrency: the max concurrency of data ingestion queries against user data source.
0 means no limitation.
:type max_concurrency: int
:param min_retry_interval_in_seconds: the min retry interval for failed data ingestion tasks.
:type min_retry_interval_in_seconds: long
:param stop_retry_after_in_seconds: stop retry data ingestion after the data slice first
schedule time in seconds.
:type stop_retry_after_in_seconds: long
:param need_rollup: mark if the data feed need rollup. Possible values include: "NoRollup",
"NeedRollup", "AlreadyRollup".
:type need_rollup: str or ~azure.ai.metricsadvisor.models.NeedRollupEnum
:param roll_up_method: roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:type roll_up_method: str or ~azure.ai.metricsadvisor.models.RollUpMethod
:param roll_up_columns: roll up columns.
:type roll_up_columns: list[str]
:param all_up_identification: the identification value for the row of calculated all-up value.
:type all_up_identification: str
:param fill_missing_point_type: the type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling".
:type fill_missing_point_type: str or ~azure.ai.metricsadvisor.models.FillMissingPointType
:param fill_missing_point_value: the value of fill missing point for anomaly detection.
:type fill_missing_point_value: float
:param view_mode: data feed access mode, default is Private. Possible values include:
"Private", "Public".
:type view_mode: str or ~azure.ai.metricsadvisor.models.ViewMode
:param admins: data feed administrator.
:type admins: list[str]
:param viewers: data feed viewer.
:type viewers: list[str]
:param status: data feed status. Possible values include: "Active", "Paused".
:type status: str or ~azure.ai.metricsadvisor.models.EntityStatus
:param action_link_template: action link for alert.
:type action_link_template: str
:param authentication_type: authentication type for corresponding data source. Possible values
include: "Basic", "ManagedIdentity", "AzureSQLConnectionString", "DataLakeGen2SharedKey",
"ServicePrincipal", "ServicePrincipalInKV".
:type authentication_type: str or ~azure.ai.metricsadvisor.models.AuthenticationTypeEnum
:param credential_id: The credential entity id.
:type credential_id: str
:param data_source_parameter:
:type data_source_parameter: ~azure.ai.metricsadvisor.models.SQLSourceParameterPatch
"""
_validation = {
'data_source_type': {'required': True},
'roll_up_columns': {'unique': True},
'admins': {'unique': True},
'viewers': {'unique': True},
}
_attribute_map = {
'data_source_type': {'key': 'dataSourceType', 'type': 'str'},
'data_feed_name': {'key': 'dataFeedName', 'type': 'str'},
'data_feed_description': {'key': 'dataFeedDescription', 'type': 'str'},
'timestamp_column': {'key': 'timestampColumn', 'type': 'str'},
'data_start_from': {'key': 'dataStartFrom', 'type': 'iso-8601'},
'start_offset_in_seconds': {'key': 'startOffsetInSeconds', 'type': 'long'},
'max_concurrency': {'key': 'maxConcurrency', 'type': 'int'},
'min_retry_interval_in_seconds': {'key': 'minRetryIntervalInSeconds', 'type': 'long'},
'stop_retry_after_in_seconds': {'key': 'stopRetryAfterInSeconds', 'type': 'long'},
'need_rollup': {'key': 'needRollup', 'type': 'str'},
'roll_up_method': {'key': 'rollUpMethod', 'type': 'str'},
'roll_up_columns': {'key': 'rollUpColumns', 'type': '[str]'},
'all_up_identification': {'key': 'allUpIdentification', 'type': 'str'},
'fill_missing_point_type': {'key': 'fillMissingPointType', 'type': 'str'},
'fill_missing_point_value': {'key': 'fillMissingPointValue', 'type': 'float'},
'view_mode': {'key': 'viewMode', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'viewers': {'key': 'viewers', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'action_link_template': {'key': 'actionLinkTemplate', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'credential_id': {'key': 'credentialId', 'type': 'str'},
'data_source_parameter': {'key': 'dataSourceParameter', 'type': 'SQLSourceParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(SQLServerDataFeedPatch, self).__init__(**kwargs)
self.data_source_type = 'SqlServer' # type: str
self.data_source_parameter = kwargs.get('data_source_parameter', None)
class SqlSourceParameter(msrest.serialization.Model):
"""SqlSourceParameter.
All required parameters must be populated in order to send to Azure.
:param connection_string: The connection string of this database.
:type connection_string: str
:param query: Required. The script to query this database.
:type query: str
"""
_validation = {
'query': {'required': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlSourceParameter, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.query = kwargs['query']
class SQLSourceParameterPatch(msrest.serialization.Model):
"""SQLSourceParameterPatch.
:param connection_string: The connection string of this database.
:type connection_string: str
:param query: The script to query this database.
:type query: str
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SQLSourceParameterPatch, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
self.query = kwargs.get('query', None)
class SuppressCondition(msrest.serialization.Model):
"""SuppressCondition.
All required parameters must be populated in order to send to Azure.
:param min_number: Required. min point number, value range : [1, +∞).
:type min_number: int
:param min_ratio: Required. min point ratio, value range : (0, 100].
:type min_ratio: float
"""
_validation = {
'min_number': {'required': True},
'min_ratio': {'required': True},
}
_attribute_map = {
'min_number': {'key': 'minNumber', 'type': 'int'},
'min_ratio': {'key': 'minRatio', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(SuppressCondition, self).__init__(**kwargs)
self.min_number = kwargs['min_number']
self.min_ratio = kwargs['min_ratio']
class SuppressConditionPatch(msrest.serialization.Model):
"""SuppressConditionPatch.
:param min_number: min point number, value range : [1, +∞).
:type min_number: int
:param min_ratio: min point ratio, value range : (0, 100].
:type min_ratio: float
"""
_attribute_map = {
'min_number': {'key': 'minNumber', 'type': 'int'},
'min_ratio': {'key': 'minRatio', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(SuppressConditionPatch, self).__init__(**kwargs)
self.min_number = kwargs.get('min_number', None)
self.min_ratio = kwargs.get('min_ratio', None)
class TopNGroupScope(msrest.serialization.Model):
"""TopNGroupScope.
All required parameters must be populated in order to send to Azure.
:param top: Required. top N, value range : [1, +∞).
:type top: int
:param period: Required. point count used to look back, value range : [1, +∞).
:type period: int
:param min_top_count: Required. min count should be in top N, value range : [1, +∞)
should be less than or equal to period.
:type min_top_count: int
"""
_validation = {
'top': {'required': True},
'period': {'required': True},
'min_top_count': {'required': True},
}
_attribute_map = {
'top': {'key': 'top', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'min_top_count': {'key': 'minTopCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(TopNGroupScope, self).__init__(**kwargs)
self.top = kwargs['top']
self.period = kwargs['period']
self.min_top_count = kwargs['min_top_count']
class UsageStats(msrest.serialization.Model):
"""UsageStats.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar timestamp: The timestamp of the stats.
:vartype timestamp: ~datetime.datetime
:ivar active_series_count: The active series count.
:vartype active_series_count: int
:ivar all_series_count: All series count under non deleted data feed.
:vartype all_series_count: int
:ivar metrics_count: The metrics count under non deleted data feed.
:vartype metrics_count: int
:ivar data_feed_count: The count of non deleted data feed.
:vartype data_feed_count: int
"""
_validation = {
'timestamp': {'readonly': True},
'active_series_count': {'readonly': True},
'all_series_count': {'readonly': True},
'metrics_count': {'readonly': True},
'data_feed_count': {'readonly': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'active_series_count': {'key': 'activeSeriesCount', 'type': 'int'},
'all_series_count': {'key': 'allSeriesCount', 'type': 'int'},
'metrics_count': {'key': 'metricsCount', 'type': 'int'},
'data_feed_count': {'key': 'dataFeedCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(UsageStats, self).__init__(**kwargs)
self.timestamp = None
self.active_series_count = None
self.all_series_count = None
self.metrics_count = None
self.data_feed_count = None
class ValueCondition(msrest.serialization.Model):
"""ValueCondition.
All required parameters must be populated in order to send to Azure.
:param lower: lower bound
should be specified when direction is Both or Down.
:type lower: float
:param upper: upper bound
should be specified when direction is Both or Up.
:type upper: float
:param direction: Required. value filter direction. Possible values include: "Both", "Down",
"Up".
:type direction: str or ~azure.ai.metricsadvisor.models.Direction
:param type: data used to implement value filter. Possible values include: "Value", "Mean".
Default value: "Value".
:type type: str or ~azure.ai.metricsadvisor.models.ValueType
:param metric_id: the other metric unique id used for value filter.
:type metric_id: str
:param trigger_for_missing: trigger alert when the corresponding point is missing in the other
metric
should be specified only when using other metric to filter.
:type trigger_for_missing: bool
"""
_validation = {
'direction': {'required': True},
}
_attribute_map = {
'lower': {'key': 'lower', 'type': 'float'},
'upper': {'key': 'upper', 'type': 'float'},
'direction': {'key': 'direction', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'metric_id': {'key': 'metricId', 'type': 'str'},
'trigger_for_missing': {'key': 'triggerForMissing', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ValueCondition, self).__init__(**kwargs)
self.lower = kwargs.get('lower', None)
self.upper = kwargs.get('upper', None)
self.direction = kwargs['direction']
self.type = kwargs.get('type', "Value")
self.metric_id = kwargs.get('metric_id', None)
self.trigger_for_missing = kwargs.get('trigger_for_missing', None)
class WebhookHookInfo(HookInfo):
"""WebhookHookInfo.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param hook_type: Required. hook type.Constant filled by server. Possible values include:
"Webhook", "Email".
:type hook_type: str or ~azure.ai.metricsadvisor.models.HookType
:ivar hook_id: Hook unique id.
:vartype hook_id: str
:param hook_name: Required. hook unique name.
:type hook_name: str
:param description: hook description.
:type description: str
:param external_link: hook external link.
:type external_link: str
:param admins: hook administrators.
:type admins: list[str]
:param hook_parameter: Required.
:type hook_parameter: ~azure.ai.metricsadvisor.models.WebhookHookParameter
"""
_validation = {
'hook_type': {'required': True},
'hook_id': {'readonly': True},
'hook_name': {'required': True},
'admins': {'unique': True},
'hook_parameter': {'required': True},
}
_attribute_map = {
'hook_type': {'key': 'hookType', 'type': 'str'},
'hook_id': {'key': 'hookId', 'type': 'str'},
'hook_name': {'key': 'hookName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'external_link': {'key': 'externalLink', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'hook_parameter': {'key': 'hookParameter', 'type': 'WebhookHookParameter'},
}
def __init__(
self,
**kwargs
):
super(WebhookHookInfo, self).__init__(**kwargs)
self.hook_type = 'Webhook' # type: str
self.hook_parameter = kwargs['hook_parameter']
class WebhookHookInfoPatch(HookInfoPatch):
"""WebhookHookInfoPatch.
All required parameters must be populated in order to send to Azure.
:param hook_type: Required. hook type.Constant filled by server. Possible values include:
"Webhook", "Email".
:type hook_type: str or ~azure.ai.metricsadvisor.models.HookType
:param hook_name: hook unique name.
:type hook_name: str
:param description: hook description.
:type description: str
:param external_link: hook external link.
:type external_link: str
:param admins: hook administrators.
:type admins: list[str]
:param hook_parameter:
:type hook_parameter: ~azure.ai.metricsadvisor.models.WebhookHookParameterPatch
"""
_validation = {
'hook_type': {'required': True},
'admins': {'unique': True},
}
_attribute_map = {
'hook_type': {'key': 'hookType', 'type': 'str'},
'hook_name': {'key': 'hookName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'external_link': {'key': 'externalLink', 'type': 'str'},
'admins': {'key': 'admins', 'type': '[str]'},
'hook_parameter': {'key': 'hookParameter', 'type': 'WebhookHookParameterPatch'},
}
def __init__(
self,
**kwargs
):
super(WebhookHookInfoPatch, self).__init__(**kwargs)
self.hook_type = 'Webhook' # type: str
self.hook_parameter = kwargs.get('hook_parameter', None)
class WebhookHookParameter(msrest.serialization.Model):
"""WebhookHookParameter.
All required parameters must be populated in order to send to Azure.
:param endpoint: Required. API address, will be called when alert is triggered, only support
POST method via SSL.
:type endpoint: str
:param username: (Deprecated) The username, if using basic authentication.
:type username: str
:param password: (Deprecated) The password, if using basic authentication.
:type password: str
:param headers: custom headers in api call.
:type headers: dict[str, str]
:param certificate_key: The certificate key/URL, if using client certificate, please read
documents for more informations.
:type certificate_key: str
:param certificate_password: The certificate password, if using client certificate, please read
documents for more informations.
:type certificate_password: str
"""
_validation = {
'endpoint': {'required': True},
}
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'headers': {'key': 'headers', 'type': '{str}'},
'certificate_key': {'key': 'certificateKey', 'type': 'str'},
'certificate_password': {'key': 'certificatePassword', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WebhookHookParameter, self).__init__(**kwargs)
self.endpoint = kwargs['endpoint']
self.username = kwargs.get('username', None)
self.password = kwargs.get('password', None)
self.headers = kwargs.get('headers', None)
self.certificate_key = kwargs.get('certificate_key', None)
self.certificate_password = kwargs.get('certificate_password', None)
class WebhookHookParameterPatch(msrest.serialization.Model):
"""WebhookHookParameterPatch.
:param endpoint: API address, will be called when alert is triggered, only support POST method
via SSL.
:type endpoint: str
:param username: (Deprecated) The username, if using basic authentication.
:type username: str
:param password: (Deprecated) The password, if using basic authentication.
:type password: str
:param headers: custom headers in api call.
:type headers: dict[str, str]
:param certificate_key: The certificate key, if using client certificate.
:type certificate_key: str
:param certificate_password: The certificate password, if using client certificate.
:type certificate_password: str
"""
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'headers': {'key': 'headers', 'type': '{str}'},
'certificate_key': {'key': 'certificateKey', 'type': 'str'},
'certificate_password': {'key': 'certificatePassword', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WebhookHookParameterPatch, self).__init__(**kwargs)
self.endpoint = kwargs.get('endpoint', None)
self.username = kwargs.get('username', None)
self.password = kwargs.get('password', None)
self.headers = kwargs.get('headers', None)
self.certificate_key = kwargs.get('certificate_key', None)
self.certificate_password = kwargs.get('certificate_password', None)
class WholeMetricConfiguration(msrest.serialization.Model):
"""WholeMetricConfiguration.
:param condition_operator: condition operator
should be specified when combining multiple detection conditions. Possible values include:
"AND", "OR".
:type condition_operator: str or
~azure.ai.metricsadvisor.models.AnomalyDetectionConfigurationLogicType
:param smart_detection_condition:
:type smart_detection_condition: ~azure.ai.metricsadvisor.models.SmartDetectionCondition
:param hard_threshold_condition:
:type hard_threshold_condition: ~azure.ai.metricsadvisor.models.HardThresholdCondition
:param change_threshold_condition:
:type change_threshold_condition: ~azure.ai.metricsadvisor.models.ChangeThresholdCondition
"""
_attribute_map = {
'condition_operator': {'key': 'conditionOperator', 'type': 'str'},
'smart_detection_condition': {'key': 'smartDetectionCondition', 'type': 'SmartDetectionCondition'},
'hard_threshold_condition': {'key': 'hardThresholdCondition', 'type': 'HardThresholdCondition'},
'change_threshold_condition': {'key': 'changeThresholdCondition', 'type': 'ChangeThresholdCondition'},
}
def __init__(
self,
**kwargs
):
super(WholeMetricConfiguration, self).__init__(**kwargs)
self.condition_operator = kwargs.get('condition_operator', None)
self.smart_detection_condition = kwargs.get('smart_detection_condition', None)
self.hard_threshold_condition = kwargs.get('hard_threshold_condition', None)
self.change_threshold_condition = kwargs.get('change_threshold_condition', None)
class WholeMetricConfigurationPatch(msrest.serialization.Model):
"""WholeMetricConfigurationPatch.
:param condition_operator: condition operator
should be specified when combining multiple detection conditions. Possible values include:
"AND", "OR".
:type condition_operator: str or
~azure.ai.metricsadvisor.models.AnomalyDetectionConfigurationLogicType
:param smart_detection_condition:
:type smart_detection_condition: ~azure.ai.metricsadvisor.models.SmartDetectionConditionPatch
:param hard_threshold_condition:
:type hard_threshold_condition: ~azure.ai.metricsadvisor.models.HardThresholdConditionPatch
:param change_threshold_condition:
:type change_threshold_condition: ~azure.ai.metricsadvisor.models.ChangeThresholdConditionPatch
"""
_attribute_map = {
'condition_operator': {'key': 'conditionOperator', 'type': 'str'},
'smart_detection_condition': {'key': 'smartDetectionCondition', 'type': 'SmartDetectionConditionPatch'},
'hard_threshold_condition': {'key': 'hardThresholdCondition', 'type': 'HardThresholdConditionPatch'},
'change_threshold_condition': {'key': 'changeThresholdCondition', 'type': 'ChangeThresholdConditionPatch'},
}
def __init__(
self,
**kwargs
):
super(WholeMetricConfigurationPatch, self).__init__(**kwargs)
self.condition_operator = kwargs.get('condition_operator', None)
self.smart_detection_condition = kwargs.get('smart_detection_condition', None)
self.hard_threshold_condition = kwargs.get('hard_threshold_condition', None)
self.change_threshold_condition = kwargs.get('change_threshold_condition', None)
| 44.491918
| 635
| 0.675786
| 40,969
| 374,355
| 5.964754
| 0.01899
| 0.026812
| 0.034374
| 0.044195
| 0.869231
| 0.852756
| 0.844883
| 0.834669
| 0.825883
| 0.816696
| 0
| 0.001865
| 0.194893
| 374,355
| 8,413
| 636
| 44.497207
| 0.808896
| 0.481978
| 0
| 0.736539
| 0
| 0
| 0.38552
| 0.101136
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041035
| false
| 0.003136
| 0.000523
| 0
| 0.16022
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a871e02f56928dcc293dcadcec21c3acb9f66f8
| 1,971
|
py
|
Python
|
Books/GodOfPython/P00_OriginalSource/ch11/House.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | 2
|
2020-12-05T07:42:55.000Z
|
2021-01-06T23:23:18.000Z
|
Books/GodOfPython/P00_OriginalSource/ch11/House.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | null | null | null |
Books/GodOfPython/P00_OriginalSource/ch11/House.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | null | null | null |
# C:\gop\ch11\House.py
class House(object): # House 클래스 정의
def __init__(self, year, acreages, address, price):
self.year = year
self.acreages = acreages
self.address = address
self.price = price
def change_price(self, rate):
self.price = self.price * rate
def show_info(self):
print("This houes is built in {}, \
acreages : {}, \
address : {}, \
price : {} "
.format(self.year, self.acreages, self.address, self.price))
if __name__ == "__main__":
house_A = House(1999, 100, "seoul", 777777777) # 객체 house_A 생성
house_A.show_info() # 객체를 통한 메소드 사용
class House2(object): # House2 클래스 정의
Company = "Python Factory" # 클래스 속성
def __init__(self, year, acreages, address, price):
self.year = year
self.acreages = acreages
self.address = address
self.price = price
def show_Company(self):
print(House2.Company)
def change_price(self, rate):
self.price = self.price * rate
def show_info(self):
print("This houes was built by {} in {}, \
acreages : {}, \
address : {}, \
price : {} "
.format(House2.Company, self.year, self.acreages, self.address, self.price))
class House3(object): # House2 클래스 정의
Company = "Python Factory" # 클래스 속성
def __init__(self, year, acreages, address, price):
self.__year = year
self.__acreages = acreages
self.__address = address
self.__price = price
def show_Company(self):
print(House3.Company)
def change_price(self, rate):
self.__price = self.__price * rate
def show_info(self):
print("This houes was built by {} in {}, \
acreages : {}, \
address : {}, \
price : {} "
.format(House3.Company, self.__year, self.__acreages, self.__address, self.__price))
| 28.985294
| 98
| 0.568239
| 228
| 1,971
| 4.684211
| 0.214912
| 0.101124
| 0.11236
| 0.042135
| 0.840824
| 0.814607
| 0.814607
| 0.814607
| 0.777154
| 0.689139
| 0
| 0.019188
| 0.312532
| 1,971
| 67
| 99
| 29.41791
| 0.769004
| 0.052258
| 0
| 0.666667
| 0
| 0
| 0.022055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.215686
| false
| 0
| 0
| 0
| 0.313725
| 0.098039
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0adf2eb19a335c35c74d4f6c552ccd0ad4181a00
| 9,890
|
py
|
Python
|
shexer/local_code/test_shexer.py
|
white-gecko/shexerp3
|
afa24192c0c8375f5c6446fbb57bfc533707e97f
|
[
"Apache-2.0"
] | null | null | null |
shexer/local_code/test_shexer.py
|
white-gecko/shexerp3
|
afa24192c0c8375f5c6446fbb57bfc533707e97f
|
[
"Apache-2.0"
] | null | null | null |
shexer/local_code/test_shexer.py
|
white-gecko/shexerp3
|
afa24192c0c8375f5c6446fbb57bfc533707e97f
|
[
"Apache-2.0"
] | null | null | null |
from shexer.shaper import Shaper
from shexer.consts import TURTLE
a_graph = """
@base <http://library.edu/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix bf: <http://id.loc.gov/ontologies/bibframe.rdf/> .
<Instance_1> a bf:Instance ;
bf:seriesStatement "Department of State publication" ;
bf:seriesEnumeration "8961" ;
bf:subseriesStatement "General foreign policy series, 9876-5432" ;
bf:subseriesEnumeration "volume 310" .
bf:Instance_2 a bf:Instance ;
bf:seriesStatement "Department of another stuff" ;
bf:seriesEnumeration "8961" ;
rdfs:label "Cosa wena" ;
bf:subseriesStatement "General foreign policy series, 1232-5674" ;
bf:subseriesEnumeration "volume 311" .
bf:Instance rdfs:subClassOf bf:Concept .
bf:Concept rdfs:subClassOf bf:Class .
"""
namespaces_dict={
"http://id.loc.gov/ontologies/bibframe.rdf/" : "bf",
"http://weso.es/" : "",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs"
}
### raw + all_classes
shaper = Shaper(raw_graph=a_graph,
all_classes_mode=True,
input_format=TURTLE,
namespaces_dict=namespaces_dict)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### raw + target_classes
print("---------")
target_classes = ["http://id.loc.gov/ontologies/bibframe.rdf/Instance"]
shaper = Shaper(raw_graph=a_graph,
all_classes_mode=False,
target_classes=target_classes,
input_format=TURTLE,
namespaces_dict=namespaces_dict)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### raw + selectores
from shexer.consts import FIXED_SHAPE_MAP
raw_shape_map = """
{FOCUS rdfs:subClassOf _}@:a_child
bf:Instance_2@:certain_instance
"""
print("---------")
shaper = Shaper(raw_graph=a_graph,
all_classes_mode=False,
target_classes=None,
input_format=TURTLE,
shape_map_raw=raw_shape_map,
shape_map_format=FIXED_SHAPE_MAP,
namespaces_dict=namespaces_dict)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### raw + selectors + all_classes
print("---------")
shaper = Shaper(raw_graph=a_graph,
all_classes_mode=True,
target_classes=None,
input_format=TURTLE,
shape_map_raw=raw_shape_map,
shape_map_format=FIXED_SHAPE_MAP,
namespaces_dict=namespaces_dict)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
################################################### File input
print("------------------------------------------------------------"
"")
file_graph_name = "files\\test_shexer_graph.ttl"
### raw + all_classes
shaper = Shaper(graph_file_input=file_graph_name,
all_classes_mode=True,
input_format=TURTLE,
namespaces_dict=namespaces_dict)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### raw + target_classes
print("---------")
target_classes = ["http://id.loc.gov/ontologies/bibframe.rdf/Instance"]
shaper = Shaper(graph_file_input=file_graph_name,
all_classes_mode=False,
target_classes=target_classes,
input_format=TURTLE,
namespaces_dict=namespaces_dict)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### raw + selectores
from shexer.consts import FIXED_SHAPE_MAP
raw_shape_map = """
{FOCUS rdfs:subClassOf _}@:a_child
bf:Instance_2@:certain_instance
"""
print("---------")
shaper = Shaper(graph_file_input=file_graph_name,
all_classes_mode=False,
target_classes=None,
input_format=TURTLE,
shape_map_raw=raw_shape_map,
shape_map_format=FIXED_SHAPE_MAP,
namespaces_dict=namespaces_dict)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### raw + selectors + all_classes
print("---------")
shaper = Shaper(graph_file_input=file_graph_name,
all_classes_mode=True,
target_classes=None,
input_format=TURTLE,
shape_map_raw=raw_shape_map,
shape_map_format=FIXED_SHAPE_MAP,
namespaces_dict=namespaces_dict)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
######################################################### endpoint
### endpoint + target
print("---------")
target_classes = ["http://www.wikidata.org/entity/Q44062313", "http://www.wikidata.org/entity/Q54856362"]
endpoint_url = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
instantiation_property = "http://www.wikidata.org/prop/direct/P1344"
shaper = Shaper(all_classes_mode=False,
target_classes=target_classes,
url_endpoint=endpoint_url,
namespaces_dict=namespaces_dict,
instantiation_property=instantiation_property,
track_classes_for_entities_at_last_depth_level=False)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### endpoint + selectors
endpoint_url = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
instantiation_property = "http://www.wikidata.org/prop/direct/P31"
raw_shape_map = """
SPARQL 'SELECT ?s WHERE { ?s <http://www.wikidata.org/prop/direct/P1344> <http://www.wikidata.org/entity/Q44062313> ; <http://www.wikidata.org/prop/direct/P19> <http://www.wikidata.org/entity/Q14317> . }'@:wikifreakoviedo
"""
shaper = Shaper(all_classes_mode=False,
shape_map_raw=raw_shape_map,
url_endpoint=endpoint_url,
namespaces_dict=namespaces_dict,
instantiation_property=instantiation_property,
track_classes_for_entities_at_last_depth_level=False)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
## endpoint + selectors + all_clases_mode
endpoint_url = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
instantiation_property = "http://www.wikidata.org/prop/direct/P31"
raw_shape_map = """
SPARQL 'SELECT ?s WHERE { ?s <http://www.wikidata.org/prop/direct/P1344> <http://www.wikidata.org/entity/Q44062313> ; <http://www.wikidata.org/prop/direct/P19> <http://www.wikidata.org/entity/Q14317> . }'@:wikifreakoviedo
"""
shaper = Shaper(all_classes_mode=True,
shape_map_raw=raw_shape_map,
url_endpoint=endpoint_url,
namespaces_dict=namespaces_dict,
instantiation_property=instantiation_property,
track_classes_for_entities_at_last_depth_level=False)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
###################################################### input_url
### remote url + all_clases
from shexer.consts import RDF_TYPE, RDF_XML
print("---------")
remote_graph_url = "http://xmlns.com/foaf/spec/index.rdf"
instantiation_property = RDF_TYPE
shaper = Shaper(all_classes_mode=True,
input_format=RDF_XML,
url_graph_input=remote_graph_url,
namespaces_dict=namespaces_dict,
instantiation_property=instantiation_property)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### remote_url + target_classes
print("---------")
remote_graph_url = "http://xmlns.com/foaf/spec/index.rdf"
instantiation_property = RDF_TYPE
shaper = Shaper(all_classes_mode=False,
target_classes=["http://www.w3.org/2002/07/owl#AnnotationProperty" ],
input_format=RDF_XML,
url_graph_input=remote_graph_url,
namespaces_dict=namespaces_dict,
instantiation_property=instantiation_property)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### remote_url + selectors
print("---------")
remote_graph_url = "http://xmlns.com/foaf/spec/index.rdf"
instantiation_property = RDF_TYPE
raw_selector = """
{FOCUS rdfs:isDefinedBy _}@<:soyDenifidoPor>
<http://xmlns.com/foaf/0.1/Project>@<:Proyectico>
"""
shaper = Shaper(all_classes_mode=False,
shape_map_raw=raw_selector,
input_format=RDF_XML,
url_graph_input=remote_graph_url,
namespaces_dict=namespaces_dict,
instantiation_property=instantiation_property)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
### remote_url + selectors + all_classes
print("---------")
remote_graph_url = "http://xmlns.com/foaf/spec/index.rdf"
instantiation_property = RDF_TYPE
raw_selector = """
{FOCUS rdfs:isDefinedBy _}@<:soyDenifidoPor>
<http://xmlns.com/foaf/0.1/Project>@<:Proyectico>
"""
shaper = Shaper(all_classes_mode=True,
shape_map_raw=raw_selector,
input_format=RDF_XML,
url_graph_input=remote_graph_url,
namespaces_dict=namespaces_dict,
instantiation_property=instantiation_property)
result = shaper.shex_graph(aceptance_threshold=0.5,
string_output=True)
print(result)
| 29.789157
| 222
| 0.638827
| 1,125
| 9,890
| 5.32
| 0.131556
| 0.072515
| 0.035088
| 0.070175
| 0.895572
| 0.890226
| 0.874185
| 0.85447
| 0.826065
| 0.826065
| 0
| 0.020901
| 0.225986
| 9,890
| 331
| 223
| 29.879154
| 0.760941
| 0.039737
| 0
| 0.859813
| 0
| 0.018692
| 0.272639
| 0.025627
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023364
| 0
| 0.023364
| 0.126168
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0aed366f9287dce958ca77ed4ab3f432e66d58d2
| 110
|
py
|
Python
|
app/back/mongo/data/collect/graticules/__init__.py
|
jgphilpott/polyplot
|
c46861174ee5881dadffbfb2278d555462523547
|
[
"MIT"
] | 5
|
2021-05-17T14:17:14.000Z
|
2021-12-14T12:54:32.000Z
|
app/back/mongo/data/collect/graticules/__init__.py
|
jgphilpott/iGraph
|
2a91ba57e4950856a83d3a109753f8f2badee829
|
[
"MIT"
] | 8
|
2020-02-09T02:48:41.000Z
|
2021-05-16T04:57:02.000Z
|
app/back/mongo/data/collect/graticules/__init__.py
|
jgphilpott/iGraph
|
2a91ba57e4950856a83d3a109753f8f2badee829
|
[
"MIT"
] | 2
|
2016-09-12T03:48:16.000Z
|
2019-05-04T14:15:19.000Z
|
from back.mongo.data.collect.graticules.model import *
from back.mongo.data.collect.graticules.mongo import *
| 36.666667
| 54
| 0.818182
| 16
| 110
| 5.625
| 0.5
| 0.177778
| 0.288889
| 0.377778
| 0.755556
| 0.755556
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 110
| 2
| 55
| 55
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e4047a599de7714fc262eabc490c88c6499a2ca9
| 72,721
|
py
|
Python
|
calico/felix/test/test_endpoint.py
|
ozdanborne/felix
|
5eff313e6498b3a7d775aa16cb09fd4578331701
|
[
"Apache-2.0"
] | 6
|
2016-10-18T04:04:25.000Z
|
2016-10-18T04:06:49.000Z
|
calico/felix/test/test_endpoint.py
|
ozdanborne/felix
|
5eff313e6498b3a7d775aa16cb09fd4578331701
|
[
"Apache-2.0"
] | 1
|
2021-06-01T21:45:37.000Z
|
2021-06-01T21:45:37.000Z
|
calico/felix/test/test_endpoint.py
|
ozdanborne/felix
|
5eff313e6498b3a7d775aa16cb09fd4578331701
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016 Tigera, Inc. All rights reserved.
# Copyright (c) 2015 Cisco Systems. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_endpoint
~~~~~~~~~~~~~~~~~~~~~~~~
Tests of endpoint module.
"""
from collections import OrderedDict
from contextlib import nested
import logging
from netaddr import IPAddress
from calico.felix.dispatch import HostEndpointDispatchChains
from calico.felix.dispatch import WorkloadDispatchChains
from calico.felix.plugins.fiptgenerator import FelixIptablesGenerator
from calico.felix.selectors import parse_selector
from calico.felix.endpoint import EndpointManager, WorkloadEndpoint, \
HostEndpoint
from calico.felix.fetcd import EtcdStatusReporter
from calico.felix.fiptables import IptablesUpdater
from calico.felix.futils import FailedSystemCall
from calico.felix.profilerules import RulesManager
from calico.felix.fipmanager import FloatingIPManager
import mock
from mock import Mock
from calico.felix.test.base import BaseTestCase, load_config
from calico.felix.test import stub_utils
from calico.felix import endpoint
from calico.felix import futils
from calico.datamodel_v1 import WloadEndpointId, TieredPolicyId, HostEndpointId, \
ResolvedHostEndpointId
_log = logging.getLogger(__name__)
mock.patch.object = getattr(mock.patch, "object") # Keep PyCharm linter happy.
ENDPOINT_ID = WloadEndpointId("hostname", "b", "c", "d")
ENDPOINT_ID_2 = WloadEndpointId("hostname", "b", "c1", "d1")
HOST_ENDPOINT_ID = HostEndpointId("hostname", "id0")
class TestEndpointManager(BaseTestCase):
def setUp(self):
super(TestEndpointManager, self).setUp()
self.config = load_config("felix_default.cfg", env_dict={
"FELIX_FELIXHOSTNAME": "hostname"})
self.m_updater = Mock(spec=IptablesUpdater)
self.m_wl_dispatch = Mock(spec=WorkloadDispatchChains)
self.m_host_dispatch = Mock(spec=HostEndpointDispatchChains)
self.m_rules_mgr = Mock(spec=RulesManager)
self.m_fip_manager = Mock(spec=FloatingIPManager)
self.m_status_reporter = Mock(spec=EtcdStatusReporter)
self.mgr = EndpointManager(self.config, "IPv4", self.m_updater,
self.m_wl_dispatch, self.m_host_dispatch,
self.m_rules_mgr, self.m_fip_manager,
self.m_status_reporter)
self.mgr.get_and_incref = Mock()
self.mgr.decref = Mock()
def test_create(self):
obj = self.mgr._create(ENDPOINT_ID)
self.assertTrue(isinstance(obj, WorkloadEndpoint))
def test_create_host_ep(self):
obj = self.mgr._create(HOST_ENDPOINT_ID.resolve("eth0"))
self.assertTrue(isinstance(obj, HostEndpoint))
def test_create_host_ep_unexpected(self):
self.assertRaises(RuntimeError, self.mgr._create, HOST_ENDPOINT_ID)
def test_on_actor_started(self):
with mock.patch.object(self.mgr, "_iface_poll_greenlet") as m_glet:
self.mgr._on_actor_started()
m_glet.start.assert_called_once_with()
def test_on_started(self):
ep = {"name": "tap1234"}
self.mgr.on_endpoint_update(ENDPOINT_ID,
ep,
async=True)
self.step_actor(self.mgr)
m_endpoint = Mock(spec=WorkloadEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
self.mgr._on_object_started(ENDPOINT_ID, m_endpoint)
self.assertEqual(
m_endpoint.on_endpoint_update.mock_calls,
[mock.call(ep, async=True)]
)
def test_on_datamodel_in_sync(self):
ep = {"name": "tap1234"}
self.mgr.on_endpoint_update(ENDPOINT_ID,
ep,
async=True)
host_ep = {"name": "eth1", "expected_ipv4_addrs": ["10.0.0.1"]}
self.mgr.on_host_ep_update(HOST_ENDPOINT_ID,
host_ep,
async=True)
self.step_actor(self.mgr)
self.mgr.on_datamodel_in_sync(async=True)
self.step_actor(self.mgr)
self.assertEqual(
self.m_wl_dispatch.apply_snapshot.mock_calls,
[mock.call(frozenset(["tap1234"]), async=True)]
)
self.assertEqual(
self.m_host_dispatch.apply_snapshot.mock_calls,
[mock.call(frozenset(["eth1"]), async=True)]
)
# Second call should have no effect.
self.m_wl_dispatch.apply_snapshot.reset_mock()
self.mgr.on_datamodel_in_sync(async=True)
self.step_actor(self.mgr)
self.assertEqual(self.m_wl_dispatch.apply_snapshot.mock_calls, [])
def test_tiered_policy_ordering_and_updates(self):
"""
Check that the tier_sequence ordering is updated correctly as we
add and remove tiers and policies.
"""
# Make sure we have an endpoint so that we can check that it gets
# put in the dirty set.
self.mgr.on_datamodel_in_sync(async=True)
self.mgr.on_endpoint_update(ENDPOINT_ID,
{"name": "tap12345"},
async=True)
self.step_actor(self.mgr)
# Pretend that the endpoint is alive so that we'll send updates to id.
m_endpoint = Mock(spec=WorkloadEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
self.mgr._is_starting_or_live = Mock(return_value=True)
# Add a profile into the tier so it'll apply to the endpoint.
pol_id_a = TieredPolicyId("a", "a1")
self.mgr.on_policy_selector_update(pol_id_a, parse_selector("all()"),
10, async=True)
pol_id_b = TieredPolicyId("b", "b1")
self.mgr.on_policy_selector_update(pol_id_b, parse_selector("all()"),
10, async=True)
pol_id_c1 = TieredPolicyId("c1", "c1")
self.mgr.on_policy_selector_update(pol_id_c1, parse_selector("all()"),
10, async=True)
pol_id_c2 = TieredPolicyId("c2", "c2")
self.mgr.on_policy_selector_update(pol_id_c2, parse_selector("all()"),
10, async=True)
pol_id_c3 = TieredPolicyId("c3", "c3")
self.mgr.on_policy_selector_update(pol_id_c3, parse_selector("all()"),
10, async=True)
self.step_actor(self.mgr)
# Since we haven't set the tier ID yet, the policy won't get applied...
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(OrderedDict(), async=True)] * 5)
m_endpoint.on_tiered_policy_update.reset_mock()
# Adding a tier should trigger an update, adding the tier and policy.
self.mgr.on_tier_data_update("a", {"order": 1}, async=True)
self.step_actor(self.mgr)
self.assertEqual(self.mgr.endpoints_with_dirty_policy, set())
tiers = OrderedDict()
tiers["a"] = [pol_id_a]
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(tiers, async=True)])
m_endpoint.on_tiered_policy_update.reset_mock()
# Idempotent update should get squashed.
self.mgr.on_tier_data_update("a", {"order": 2}, async=True)
self.mgr.on_tier_data_update("a", {"order": 2}, async=True)
self.step_actor(self.mgr)
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls, [])
# Adding another tier should trigger an update.
self.mgr.on_tier_data_update("b", {"order": 3}, async=True)
self.step_actor(self.mgr)
tiers = OrderedDict()
tiers["a"] = [pol_id_a]
tiers["b"] = [pol_id_b]
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(tiers, async=True)])
m_endpoint.on_tiered_policy_update.reset_mock()
# Swapping the order should trigger an update.
self.mgr.on_tier_data_update("b", {"order": 1}, async=True)
self.step_actor(self.mgr)
tiers = OrderedDict()
tiers["b"] = [pol_id_b]
tiers["a"] = [pol_id_a]
self.assertEqual(m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(tiers, async=True)])
m_endpoint.on_tiered_policy_update.reset_mock()
# Check deletion and that it's idempotent.
self.mgr.on_tier_data_update("b", None, async=True)
self.step_actor(self.mgr)
self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True)
self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True)
self.step_actor(self.mgr)
self.mgr.on_tier_data_update("b", None, async=True)
self.step_actor(self.mgr)
self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True)
self.mgr.on_policy_selector_update(pol_id_b, None, None, async=True)
self.step_actor(self.mgr)
tiers = OrderedDict()
tiers["a"] = [pol_id_a]
self.assertEqual(
m_endpoint.on_tiered_policy_update.mock_calls,
[mock.call(tiers, async=True)] * 2 # One for policy, one for tier.
)
m_endpoint.on_tiered_policy_update.reset_mock()
# Check lexicographic tie-breaker.
self.mgr.on_tier_data_update("c1", {"order": 0}, async=True)
self.mgr.on_tier_data_update("c2", {"order": 0}, async=True)
self.mgr.on_tier_data_update("c3", {"order": 0}, async=True)
self.step_actor(self.mgr)
tiers = OrderedDict()
# All 'c's should sort before 'a' due to explicit ordering but 'c's
# should sort in lexicographic order.
tiers["c1"] = [pol_id_c1]
tiers["c2"] = [pol_id_c2]
tiers["c3"] = [pol_id_c3]
tiers["a"] = [pol_id_a]
actual_call = m_endpoint.on_tiered_policy_update.mock_calls[-1]
expected_call = mock.call(tiers, async=True)
self.assertEqual(actual_call, expected_call,
msg="\nExpected: %s\n Got: %s" %
(expected_call, actual_call))
m_endpoint.on_tiered_policy_update.reset_mock()
def test_label_inheritance(self):
# Make sure we have an endpoint so that we can check that it gets
# put in the dirty set. These have no labels at all so we test
# that no labels gets translated to an empty dict.
self.mgr.on_endpoint_update(ENDPOINT_ID, {"name": "tap12345",
"profile_ids": ["prof1"]},
async=True)
self.mgr.on_endpoint_update(ENDPOINT_ID_2, {"name": "tap23456",
"profile_ids": ["prof2"]},
async=True)
# And we need a selector to pick out one of the endpoints by the labels
# attached to its parent.
self.mgr.on_policy_selector_update(TieredPolicyId("a", "b"),
parse_selector('a == "b"'),
10,
async=True)
self.step_actor(self.mgr)
with mock.patch.object(self.mgr, "_update_dirty_policy") as m_update:
self.mgr.on_prof_labels_set("prof1", {"a": "b"}, async=True)
self.step_actor(self.mgr)
# Only the first endpoint should end up matching the selector.
self.assertEqual(self.mgr.endpoints_with_dirty_policy,
set([ENDPOINT_ID]))
# And an update should be triggered.
self.assertEqual(m_update.mock_calls, [mock.call()])
def test_endpoint_update_not_our_host(self):
ep = {"name": "tap1234"}
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
self.mgr.on_endpoint_update(
WloadEndpointId("notus", "b", "c", "d"),
ep,
async=True)
self.step_actor(self.mgr)
self.assertFalse(m_sol.called)
def test_endpoint_live_obj(self):
ep = {"name": "tap1234"}
# First send in an update to trigger creation.
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.step_actor(self.mgr)
self.assertEqual(self.mgr.get_and_incref.mock_calls,
[mock.call(ENDPOINT_ID)])
m_endpoint = Mock(spec=WorkloadEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
# Then send a second update to check that it gets passed on to the
# WorkloadEndpoint.
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.return_value = True
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.step_actor(self.mgr)
self.assertEqual(m_sol.mock_calls, [mock.call(ENDPOINT_ID)])
self.assertEqual(m_endpoint.on_endpoint_update.mock_calls,
[mock.call(ep, force_reprogram=False,
async=True)])
self.assertTrue(ENDPOINT_ID in self.mgr.local_endpoint_ids)
# Finally, send in a deletion.
m_endpoint.on_endpoint_update.reset_mock()
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.return_value = True
self.mgr.on_endpoint_update(ENDPOINT_ID, None, async=True)
self.step_actor(self.mgr)
self.assertEqual(m_endpoint.on_endpoint_update.mock_calls,
[mock.call(None, force_reprogram=False,
async=True)])
self.assertEqual(self.mgr.decref.mock_calls, [mock.call(ENDPOINT_ID)])
self.assertFalse(ENDPOINT_ID in self.mgr.local_endpoint_ids)
def test_endpoint_interface_rename(self):
ep = {"name": "tap1234"}
# First send in an update to trigger creation.
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.step_actor(self.mgr)
self.assertEqual(self.mgr.get_and_incref.mock_calls,
[mock.call(ENDPOINT_ID)])
m_endpoint = Mock(spec=WorkloadEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
# Then send an update with a different interface name. This should be
# treated as a delete then an add.
ep2 = {"name": "tap2345"}
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.side_effect = iter([True, False])
self.mgr.on_endpoint_update(ENDPOINT_ID, ep2, async=True)
self.step_actor(self.mgr)
# One call for deletion, one for creation:
self.assertEqual(m_sol.mock_calls, [mock.call(ENDPOINT_ID)] * 2)
# Deletion of old endpoint:
self.assertEqual(m_endpoint.on_endpoint_update.mock_calls,
[mock.call(None, force_reprogram=False,
async=True)])
self.assertEqual(self.mgr.decref.mock_calls, [mock.call(ENDPOINT_ID)])
# Should have another creation:
self.assertEqual(self.mgr.get_and_incref.mock_calls,
[mock.call(ENDPOINT_ID)] * 2)
self.assertTrue(ENDPOINT_ID in self.mgr.local_endpoint_ids)
def test_on_interface_update_unknown(self):
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
self.mgr.on_interface_update("foo", True, async=True)
self.step_actor(self.mgr)
self.assertFalse(m_sol.called)
def test_on_interface_update_known(self):
ep = {"name": "tap1234"}
m_endpoint = Mock(spec=WorkloadEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.return_value = True
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.mgr.on_interface_update("tap1234", True, async=True)
self.step_actor(self.mgr)
self.assertEqual(
m_endpoint.on_interface_update.mock_calls,
[mock.call(True, async=True)]
)
def test_on_interface_update_known_but_not_live(self):
ep = {"name": "tap1234"}
m_endpoint = Mock(spec=WorkloadEndpoint)
self.mgr.objects_by_id[ENDPOINT_ID] = m_endpoint
with mock.patch.object(self.mgr, "_is_starting_or_live") as m_sol:
m_sol.return_value = False
self.mgr.on_endpoint_update(ENDPOINT_ID, ep, async=True)
self.mgr.on_interface_update("tap1234", True, async=True)
self.step_actor(self.mgr)
self.assertEqual(m_endpoint.on_interface_update.mock_calls, [])
def test_resolve_host_eps_mainline(self):
ep1 = {"name": "eth0"}
self.mgr.on_host_ep_update(HostEndpointId("hostname", "ep1"),
ep1,
async=True)
ep2 = {"expected_ipv4_addrs": ["10.0.0.1"]}
self.mgr.on_host_ep_update(HostEndpointId("hostname", "ep2"),
ep2,
async=True)
self.mgr.on_host_ep_update(HostEndpointId("hostname", "ep3"),
{"expected_ipv4_addrs": ["10.0.0.2"]},
async=True)
with mock.patch.object(self.mgr, "on_endpoint_update") as m_on_ep_upd:
self.step_actor(self.mgr)
# Only one interface resolved by its explicit name.
m_on_ep_upd.assert_called_once_with(
ResolvedHostEndpointId("hostname", "ep1", "eth0"),
ep1
)
# Send in a new IP, should resolve.
self.mgr._on_iface_ips_update("eth2", ["10.0.0.1"], async=True)
with mock.patch.object(self.mgr, "on_endpoint_update") as m_on_ep_upd:
self.step_actor(self.mgr)
# Only one interface resolved by its explicit name.
m_on_ep_upd.assert_called_once_with(
ResolvedHostEndpointId("hostname", "ep2", "eth2"),
{"expected_ipv4_addrs": ["10.0.0.1"], "name": "eth2"}
)
# Send in a duplicate IP on another interface, should resolve.
self.mgr._on_iface_ips_update("eth3", ["10.0.0.1"], async=True)
with mock.patch.object(self.mgr, "on_endpoint_update") as m_on_ep_upd:
self.step_actor(self.mgr)
# Only one interface resolved by its explicit name.
m_on_ep_upd.assert_called_once_with(
ResolvedHostEndpointId("hostname", "ep2", "eth3"),
{"expected_ipv4_addrs": ["10.0.0.1"], "name": "eth3"}
)
# Delete first IP, should result in deletion.
self.mgr._on_iface_ips_update("eth2", None, async=True)
with mock.patch.object(self.mgr, "on_endpoint_update") as m_on_ep_upd:
self.step_actor(self.mgr)
# Only one interface resolved by its explicit name.
m_on_ep_upd.assert_called_once_with(
ResolvedHostEndpointId("hostname", "ep2", "eth2"),
None
)
def test_resolve_host_eps_multiple_ips(self):
ep1 = {"expected_ipv4_addrs": ["10.0.0.1", "10.0.0.2"]}
self.mgr.on_host_ep_update(HostEndpointId("hostname", "ep1"),
ep1,
async=True)
self.mgr._on_iface_ips_update("eth1", ["10.0.0.1", "10.0.0.2"],
async=True)
with mock.patch.object(self.mgr, "on_endpoint_update") as m_on_ep_upd:
self.step_actor(self.mgr)
# Two IPs, but should resolve only once.
m_on_ep_upd.assert_called_once_with(
ResolvedHostEndpointId("hostname", "ep1", "eth1"),
{"expected_ipv4_addrs": ["10.0.0.1", "10.0.0.2"], "name": "eth1"}
)
def test_other_host_ep_ignored(self):
ep1 = {"expected_ipv4_addrs": ["10.0.0.1"]}
self.mgr.on_host_ep_update(HostEndpointId("otherhost", "ep1"),
ep1,
async=True)
self.mgr._on_iface_ips_update("eth1", ["10.0.0.1"],
async=True)
with mock.patch.object(self.mgr, "on_endpoint_update") as m_on_ep_upd:
self.step_actor(self.mgr)
self.assertFalse(m_on_ep_upd.called)
def test_resolve_host_eps_multiple_conflicting_matches(self):
# Check that, if multiple endpoints match an interface, the first
# one wins.
ep1 = {"expected_ipv4_addrs": ["10.0.0.1"]}
ep2 = {"expected_ipv4_addrs": ["10.0.0.2"]}
# Loop over different IDs, the lower numbered one should be picked
# consistently.
for ii in xrange(9):
id_1 = "ep%s" % ii
ep_id_1 = HostEndpointId("hostname", id_1)
self.mgr.on_host_ep_update(HostEndpointId("hostname", id_1),
ep1,
async=True)
id_2 = "ep%s" % (ii + 1)
self.mgr.on_host_ep_update(HostEndpointId("hostname", id_2),
ep2,
async=True)
self.mgr._on_iface_ips_update("eth1", ["10.0.0.1", "10.0.0.2"],
async=True)
with mock.patch.object(self.mgr, "on_endpoint_update") as m_on_ep_upd:
self.step_actor(self.mgr)
# Should resolve only once.
m_on_ep_upd.assert_called_once_with(
ResolvedHostEndpointId("hostname", id_1, "eth1"),
{"expected_ipv4_addrs": ["10.0.0.1"], "name": "eth1"}
)
# Removing first ep should resolve with other.
self.mgr.on_host_ep_update(ep_id_1,
None,
async=True)
with mock.patch.object(self.mgr, "on_endpoint_update") as m_on_ep_upd:
self.step_actor(self.mgr)
self.assertEqual(
m_on_ep_upd.mock_calls,
[
mock.call(
ResolvedHostEndpointId("hostname", id_1, "eth1"),
None
),
mock.call(
ResolvedHostEndpointId("hostname", id_2, "eth1"),
{"expected_ipv4_addrs": ["10.0.0.2"], "name": "eth1"}
),
]
)
def test_poll_interfaces(self):
known_interfaces = {}
self.mgr.config.IFACE_PREFIX = ["tap"]
with mock.patch("calico.felix.devices.list_ips_by_iface",
autospec=True) as m_list_ips, \
mock.patch.object(self.mgr, "_on_iface_ips_update",
autospec=True) as m_on_ip_upd:
# Check no interfaces.
m_list_ips.return_value = {}
known_interfaces = self.mgr._poll_interfaces(known_interfaces)
self.assertEqual(known_interfaces, {})
# Mainline, eth0 passed through but tap gets skipped.
m_list_ips.return_value = {
"eth0": [IPAddress("10.0.0.1")],
"tapABCD": [IPAddress("10.0.0.2")],
}
known_interfaces = self.mgr._poll_interfaces(known_interfaces)
self.assertEqual(known_interfaces,
{"eth0": [IPAddress("10.0.0.1")]})
m_on_ip_upd.assert_called_once_with("eth0",
[IPAddress("10.0.0.1")],
async=True)
m_on_ip_upd.reset_mock()
# Deletion, should see interface removed.
m_list_ips.return_value = {}
known_interfaces = self.mgr._poll_interfaces(known_interfaces)
self.assertEqual(known_interfaces, {})
m_on_ip_upd.assert_called_once_with("eth0",
None,
async=True)
@mock.patch("gevent.sleep", autospec=True)
def test_interface_poll_loop(self, m_sleep):
self.mgr.config.HOST_IF_POLL_INTERVAL_SECS = 1
with mock.patch.object(self.mgr, "_poll_interfaces",
autospec=True) as m_poll:
m_poll.side_effect = iter([{"a": [IPAddress("10.0.0.1")]},
{"b": [IPAddress("10.0.0.2")]},
FinishLoop()])
self.assertRaises(FinishLoop, self.mgr._interface_poll_loop)
self.assertEqual(
m_poll.mock_calls,
[
mock.call({}),
mock.call({"a": [IPAddress("10.0.0.1")]}),
mock.call({"b": [IPAddress("10.0.0.2")]}),
]
)
self.assertEqual(m_sleep.mock_calls, [mock.call(1)] * 2)
@mock.patch("gevent.sleep", autospec=True)
def test_interface_poll_loop_disabled(self, m_sleep):
self.mgr.config.HOST_IF_POLL_INTERVAL_SECS = -1
with mock.patch.object(self.mgr, "_poll_interfaces",
autospec=True) as m_poll:
m_poll.side_effect = iter([{"a": [IPAddress("10.0.0.1")]},
AssertionError()])
self.mgr._interface_poll_loop()
self.assertEqual(
m_poll.mock_calls,
[
mock.call({}),
]
)
self.assertEqual(m_sleep.mock_calls, [])
@mock.patch("sys.exit", autospec=True)
def test_on_worker_died(self, m_exit):
m_glet = mock.Mock()
self.mgr._on_worker_died(m_glet)
m_exit.assert_called_once_with(1)
class FinishLoop(Exception):
pass
class TestWorkloadEndpoint(BaseTestCase):
def setUp(self):
super(TestWorkloadEndpoint, self).setUp()
self.config = load_config("felix_default.cfg", global_dict={
"EndpointReportingEnabled": "False"})
self.m_ipt_gen = Mock(spec=FelixIptablesGenerator)
self.m_ipt_gen.endpoint_updates.return_value = {}, {}
self.m_ipt_gen.host_endpoint_updates.side_effect = AssertionError()
self.m_iptables_updater = Mock(spec=IptablesUpdater)
self.m_dispatch_chains = Mock(spec=WorkloadDispatchChains)
self.m_host_dispatch_chains = Mock(spec=HostEndpointDispatchChains)
self.m_rules_mgr = Mock(spec=RulesManager)
self.m_manager = Mock(spec=EndpointManager)
self.m_fip_manager = Mock(spec=FloatingIPManager)
self.m_status_rep = Mock(spec=EtcdStatusReporter)
def create_endpoint(self, combined_id, ip_type):
local_endpoint = endpoint.WorkloadEndpoint(self.config,
combined_id,
ip_type,
self.m_iptables_updater,
self.m_dispatch_chains,
self.m_rules_mgr,
self.m_fip_manager,
self.m_status_rep)
local_endpoint._manager = self.m_manager
return local_endpoint
def test_on_endpoint_update_v4(self):
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
# Call with no data; should be ignored (no configuration to remove).
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
ips = ["1.2.3.4/32"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv4_nets': ips,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack,\
mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.4"]),
iface,
data['mac'],
reset_arp=True)
self.assertFalse(m_rem_conntrack.called)
# Send through an update with no changes - should be a no-op.
with mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack,\
mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as m_conf:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
self.assertFalse(m_conf.called)
self.assertFalse(m_set_routes.called)
self.assertFalse(m_rem_conntrack.called)
# Change the MAC address and try again, leading to reset of ARP
data = data.copy()
data['mac'] = stub_utils.get_mac()
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv4') as m_conf:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.4"]),
iface,
data['mac'],
reset_arp=True)
# Change the IP address, causing an iptables and route refresh.
data = data.copy()
data["ipv4_nets"] = ["1.2.3.5"]
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as _m_conf,\
mock.patch('calico.felix.endpoint.WorkloadEndpoint._update_chains') as _m_up_c,\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.5"]),
iface,
data['mac'],
reset_arp=True)
self.assertFalse(local_ep._update_chains.called)
m_rem_conntrack.assert_called_once_with(set(["1.2.3.4"]), 4)
# Change the nat mappings, causing an iptables and route refresh.
data = data.copy()
data['ipv4_nat'] = [
{
'int_ip': '1.2.3.4',
'ext_ip': '5.6.7.8'
}
]
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as _m_conf,\
mock.patch('calico.felix.endpoint.WorkloadEndpoint._update_chains') as _m_up_c,\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.5", "5.6.7.8"]),
iface,
data['mac'],
reset_arp=True)
local_ep._update_chains.assert_called_once_with()
self.assertFalse(m_rem_conntrack.called)
# Send empty data, which deletes the endpoint.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type, set(),
data["name"], None)
# Should clean up conntrack entries for all IPs.
m_rem_conntrack.assert_called_once_with(
set(['1.2.3.5', '5.6.7.8']), 4
)
def test_on_endpoint_update_v4_no_mac(self):
"""Test endpoint without MAC makes the right calls to set_routes"""
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
ips = ["1.2.3.4/32"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'name': iface,
'ipv4_nets': ips,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack,\
mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, None)
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.4"]),
iface,
None,
reset_arp=False)
self.assertFalse(m_rem_conntrack.called)
# Add a MAC address and try again, leading to reset of ARP
data = data.copy()
data['mac'] = stub_utils.get_mac()
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv4') as m_conf:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.4"]),
iface,
data['mac'],
reset_arp=True)
def test_on_endpoint_update_v4_no_ips(self):
"""Test that lack of IPs results in correct defaulting"""
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'name': iface,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack,\
mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, None)
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(),
iface,
None,
reset_arp=False)
self.assertFalse(m_rem_conntrack.called)
def test_on_endpoint_update_delete_fail(self):
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
ips = ["1.2.3.4/32"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv4_nets': ips,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack,\
mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv4') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(["1.2.3.4"]),
iface,
data['mac'],
reset_arp=True)
self.assertFalse(m_rem_conntrack.called)
# Send empty data, which deletes the endpoint. Raise an exception
# from set_routes to check that it's handled.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.interface_exists', return_value=True),\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
m_set_routes.side_effect = FailedSystemCall("", [], 1, "", "")
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type, set(),
data["name"], None)
# Should clean up conntrack entries for all IPs.
m_rem_conntrack.assert_called_once_with(
set(['1.2.3.4']), 4
)
def test_on_endpoint_update_v6(self):
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV6
local_ep = self.create_endpoint(combined_id, ip_type)
# Call with no data; should be ignored (no configuration to remove).
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
nets = ["2001::abcd/128"]
gway = "2020:ab::9876"
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv6_nets': nets,
'ipv6_gateway': gway,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv6') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up, \
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface, gway)
m_set_routes.assert_called_once_with(ip_type,
set(["2001::abcd"]),
iface,
data['mac'],
reset_arp=False)
self.assertFalse(m_rem_conntrack.called)
# Send through an update with no changes but a force update. Should
# force a re-write to iptables.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv6') as m_conf:
local_ep.on_endpoint_update(data, force_reprogram=True,
async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
self.assertTrue(m_conf.called)
self.assertTrue(m_set_routes.called)
# Send through an update with no changes - would reset ARP, but this is
# IPv6 so it won't.
data = data.copy()
data['mac'] = stub_utils.get_mac()
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv6') as m_conf:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
m_conf.assert_called_once_with(iface, gway)
m_set_routes.assert_called_once_with(ip_type,
set(["2001::abcd"]),
iface,
data['mac'],
reset_arp=False)
# Change the nat mappings, causing an iptables and route refresh.
data = data.copy()
nets.append('2001::abce/128')
data['ipv6_nat'] = [
{
'int_ip': '2001::abcd',
'ext_ip': '2001::abce'
}
]
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv6') as m_conf,\
mock.patch('calico.felix.endpoint.WorkloadEndpoint._update_chains') as _m_up_c:
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(
ip_type,
set(["2001::abcd", "2001::abce"]),
iface,
data['mac'],
reset_arp=False
)
local_ep._update_chains.assert_called_once_with()
# Send empty data, which deletes the endpoint.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
local_ep.on_endpoint_update(None, async=True)
local_ep.on_unreferenced(async=True)
self.step_actor(local_ep)
m_set_routes.assert_called_once_with(ip_type, set(),
data["name"], None)
local_ep._finish_msg_batch([], []) # Should be ignored
self.m_manager.on_object_cleanup_complete.assert_called_once_with(
local_ep._id,
local_ep,
async=True,
)
m_rem_conntrack.assert_called_once_with(set(['2001::abcd',
'2001::abce']), 6)
def test_on_endpoint_update_v6_no_ips(self):
"""Check that lack of v6 addresses is correctly defaulted"""
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV6
local_ep = self.create_endpoint(combined_id, ip_type)
# Call with no data; should be ignored (no configuration to remove).
local_ep.on_endpoint_update(None, async=True)
self.step_actor(local_ep)
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'name': iface,
'profile_ids': ["prof1"]
}
# Report an initial update (endpoint creation) and check configured
with mock.patch('calico.felix.devices.set_routes') as m_set_routes,\
mock.patch('calico.felix.devices.configure_interface_ipv6') as m_conf,\
mock.patch('calico.felix.devices.interface_exists') as m_iface_exists,\
mock.patch('calico.felix.devices.interface_up') as m_iface_up, \
mock.patch('calico.felix.devices.remove_conntrack_flows') as m_rem_conntrack:
m_iface_exists.return_value = True
m_iface_up.return_value = True
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, None)
m_conf.assert_called_once_with(iface, None)
m_set_routes.assert_called_once_with(ip_type,
set(),
iface,
None,
reset_arp=False)
self.assertFalse(m_rem_conntrack.called)
def test_on_interface_update_v4(self):
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
ips = ["1.2.3.4"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv4_nets': ips,
'profile_ids': ["prof1"]
}
# We can only get on_interface_update calls after the first
# on_endpoint_update, so trigger that.
with nested(
mock.patch('calico.felix.devices.set_routes'),
mock.patch('calico.felix.devices.configure_interface_ipv4'),
mock.patch('calico.felix.devices.interface_up'),
) as [m_set_routes, m_conf, m_iface_up]:
m_iface_up.return_value = False
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
self.assertFalse(m_conf.called)
self.assertFalse(m_set_routes.called)
self.assertFalse(local_ep._device_in_sync)
# Now pretend to get an interface update - does all the same work.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv4') as m_conf:
local_ep.on_interface_update(True, async=True)
self.step_actor(local_ep)
m_conf.assert_called_once_with(iface)
m_set_routes.assert_called_once_with(ip_type,
set(ips),
iface,
data['mac'],
reset_arp=True)
self.assertTrue(local_ep._device_in_sync)
@mock.patch("calico.felix.endpoint.devices", autospec=True)
def test_tiered_policy_mainline(self, m_devices):
self.config.plugins["iptables_generator"] = self.m_ipt_gen
ep = self.create_endpoint(ENDPOINT_ID, futils.IPV4)
mac = stub_utils.get_mac()
ep.on_endpoint_update(
{
'state': "active",
'endpoint': "endpoint_id",
'mac': mac,
'name': "tap1234",
'ipv4_nets': ["10.0.0.1"],
'profile_ids': ["prof1"]
},
async=True)
self.step_actor(ep)
self.assertEqual(
self.m_ipt_gen.endpoint_updates.mock_calls,
[
mock.call(4, 'd', '1234', mac, ['prof1'], {}),
]
)
self.m_ipt_gen.endpoint_updates.reset_mock()
tiers = OrderedDict()
t1_1 = TieredPolicyId("t1", "t1_1")
t1_2 = TieredPolicyId("t1", "t1_2")
tiers["t1"] = [t1_1, t1_2]
t2_1 = TieredPolicyId("t2", "t2_1")
tiers["t2"] = [t2_1]
ep.on_tiered_policy_update(tiers, async=True)
self.step_actor(ep)
self.assertEqual(
self.m_ipt_gen.endpoint_updates.mock_calls,
[
mock.call(4, 'd', '1234', mac, ['prof1'],
OrderedDict([('t1', [TieredPolicyId('t1','t1_1'),
TieredPolicyId('t1','t1_2')]),
('t2', [TieredPolicyId('t2','t2_1')])]))
])
def test_on_interface_update_v6(self):
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV6
local_ep = self.create_endpoint(combined_id, ip_type)
ips = ["1234::5678"]
iface = "tapabcdef"
data = {
'state': "active",
'endpoint': "endpoint_id",
'mac': stub_utils.get_mac(),
'name': iface,
'ipv6_nets': ips,
'profile_ids': ["prof1"]
}
# We can only get on_interface_update calls after the first
# on_endpoint_update, so trigger that.
with nested(
mock.patch('calico.felix.devices.set_routes'),
mock.patch('calico.felix.devices.configure_interface_ipv6'),
mock.patch('calico.felix.devices.interface_up'),
) as [m_set_routes, m_conf, m_iface_up]:
m_iface_up.return_value = False
local_ep.on_endpoint_update(data, async=True)
self.step_actor(local_ep)
self.assertEqual(local_ep._mac, data['mac'])
self.assertFalse(m_conf.called)
self.assertFalse(m_set_routes.called)
self.assertFalse(local_ep._device_in_sync)
# Now pretend to get an interface update - does all the same work.
with mock.patch('calico.felix.devices.set_routes') as m_set_routes:
with mock.patch('calico.felix.devices.'
'configure_interface_ipv6') as m_conf:
local_ep.on_interface_update(True, async=True)
self.step_actor(local_ep)
m_conf.assert_called_once_with(iface, None)
m_set_routes.assert_called_once_with(ip_type,
set(ips),
iface,
data['mac'],
reset_arp=False)
self.assertTrue(local_ep._device_in_sync)
# Now cover the error cases...
with mock.patch('calico.felix.devices.'
'configure_interface_ipv6') as m_conf:
with mock.patch('calico.felix.devices.'
'interface_exists') as ifce_exists:
with mock.patch('calico.felix.devices.'
'interface_up') as ifce_up:
# Cycle through all the possibilities for the state.
ifce_exists.side_effect = [True, False, True]
ifce_up.side_effect = [True, False]
m_conf.side_effect = FailedSystemCall("", [], 1, "", "")
local_ep.on_interface_update(False, async=True)
self.step_actor(local_ep)
local_ep.on_interface_update(True, async=True)
self.step_actor(local_ep)
local_ep.on_interface_update(True, async=True)
self.step_actor(local_ep)
self.assertFalse(local_ep._device_in_sync)
def test_profile_id_update_triggers_iptables(self):
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
ips = ["10.0.0.1"]
iface = "tapabcdef"
mac = stub_utils.get_mac()
data = {'endpoint': "endpoint_id", 'mac': mac,
'name': iface, 'ipv4_nets': ips, 'profile_ids': [],
'state': "active"}
local_ep._pending_endpoint = data.copy()
# First update with endpoint not yet set, should trigger full sync.
with mock.patch("calico.felix.devices.interface_up",
return_value=True):
local_ep._apply_endpoint_update()
self.assertEqual(local_ep.endpoint, data)
self.assertFalse(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
local_ep._iptables_in_sync = True
local_ep._device_in_sync = True
# No-op update
local_ep._pending_endpoint = data.copy()
local_ep._apply_endpoint_update()
self.assertTrue(local_ep._iptables_in_sync)
self.assertTrue(local_ep._device_in_sync)
# Set the state.
local_ep._pending_endpoint = data.copy()
local_ep._pending_endpoint["state"] = "inactive"
local_ep._apply_endpoint_update()
self.assertFalse(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
local_ep._device_in_sync = True
local_ep._iptables_in_sync = True
# Set the state back again...
local_ep._pending_endpoint = data.copy()
local_ep._pending_endpoint["state"] = "active"
local_ep._apply_endpoint_update()
self.assertFalse(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
local_ep._device_in_sync = True
local_ep._iptables_in_sync = True
# Profiles update. Should update iptables.
data = {'endpoint': "endpoint_id", 'mac': mac,
'name': iface, 'ipv4_nets': ips, 'profile_ids': ["prof2"],
"state": "active"}
local_ep._pending_endpoint = data.copy()
local_ep._apply_endpoint_update()
self.assertFalse(local_ep._iptables_in_sync) # Check...
local_ep._iptables_in_sync = True # ...then reset
self.assertTrue(local_ep._device_in_sync)
# IP update. Should update routing but not iptables.
data = {'endpoint': "endpoint_id", 'mac': mac,
'name': iface, 'ipv4_nets': ["10.0.0.2"],
'profile_ids': ["prof2"],
"state": "active"}
local_ep._pending_endpoint = data.copy()
local_ep._apply_endpoint_update()
self.assertTrue(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
local_ep._device_in_sync = True
# Delete, should update everything.
local_ep._pending_endpoint = None
local_ep._apply_endpoint_update()
self.assertFalse(local_ep._iptables_in_sync)
self.assertFalse(local_ep._device_in_sync)
def test_maybe_update_status_missing_deps(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'down'}, async=True
)
def test_maybe_update_status_missing_endpoint(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
local_ep._device_is_up = True
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'down'}, async=True
)
def test_maybe_update_status_iptables_failure(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "active"}
local_ep._device_is_up = True
local_ep._iptables_in_sync = False
local_ep._device_in_sync = True
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'error'}, async=True
)
def test_maybe_update_status_device_failure(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "active"}
local_ep._iptables_in_sync = True
local_ep._device_is_up = True
local_ep._device_in_sync = False
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'error'}, async=True
)
def test_maybe_update_status_iptables_up(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "active"}
local_ep._device_is_up = True
local_ep._iptables_in_sync = True
local_ep._device_in_sync = True
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'up'}, async=True
)
def test_maybe_update_status_admin_down(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "inactive"}
local_ep._device_is_up = True
local_ep._iptables_in_sync = True
local_ep._device_in_sync = True
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'down'}, async=True
)
def test_maybe_update_status_oper_down(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
local_ep.endpoint = {"state": "active"}
local_ep._device_is_up = False
local_ep._iptables_in_sync = True
local_ep._device_in_sync = False
local_ep._maybe_update_status()
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, {'status': 'down'}, async=True
)
def test_maybe_update_status_iptables_unreferenced(self):
self.config.REPORT_ENDPOINT_STATUS = True
combined_id = WloadEndpointId("host_id", "orchestrator_id",
"workload_id", "endpoint_id")
ip_type = futils.IPV4
local_ep = self.create_endpoint(combined_id, ip_type)
local_ep.on_unreferenced(async=True)
self.step_actor(local_ep)
self.m_status_rep.on_endpoint_status_changed.assert_called_once_with(
combined_id, futils.IPV4, None, async=True
)
class TestHostEndpoint(BaseTestCase):
def setUp(self):
super(TestHostEndpoint, self).setUp()
self.config = mock.Mock()
self.config.IFACE_PREFIX = ["tap"]
self.m_ipt_gen = Mock(spec=FelixIptablesGenerator)
self.config.plugins = {"iptables_generator": self.m_ipt_gen}
self.updates = ({"chain": ["rule"]}, {"chain": set(["deps"])})
self.m_ipt_gen.host_endpoint_updates.return_value = self.updates
self.m_ipt_gen.endpoint_updates.side_effect = AssertionError()
self.chain_names = {"foo", "bar"}
self.m_ipt_gen.endpoint_chain_names.return_value = self.chain_names
self.m_iptables_updater = Mock(spec=IptablesUpdater)
self.m_dispatch_chains = Mock(spec=WorkloadDispatchChains)
self.m_host_dispatch_chains = Mock(spec=HostEndpointDispatchChains)
self.m_rules_mgr = Mock(spec=RulesManager)
self.m_manager = Mock(spec=EndpointManager)
self.m_fip_manager = Mock(spec=FloatingIPManager)
self.m_status_rep = Mock(spec=EtcdStatusReporter)
def create_endpoint(self, resolved_id=None, ip_type=futils.IPV4):
if resolved_id is None:
resolved_id = ResolvedHostEndpointId("host_id",
"endpoint_id",
"eth0")
local_endpoint = endpoint.HostEndpoint(self.config,
resolved_id,
ip_type,
self.m_iptables_updater,
self.m_dispatch_chains,
self.m_rules_mgr,
self.m_fip_manager,
self.m_status_rep)
local_endpoint._manager = self.m_manager
return local_endpoint
def test_ipv4_mainline(self):
iface = "eth0"
host_ep = self.create_endpoint()
# Call with no data; should be ignored (no configuration to remove).
host_ep.on_endpoint_update(None, async=True)
self.step_actor(host_ep)
# Report an initial update (endpoint creation) and check that
# there are no calls to the workload endpoint configuration functions.
ips = ["1.2.3.4"]
data = {
'endpoint': "endpoint_id",
'name': iface,
'expected_ipv4_addrs': ips,
'profile_ids': ["prof1"],
}
with mock.patch('calico.felix.endpoint.devices',
autospec=True) as m_devices:
m_devices.interface_exists.return_value = True
m_devices.interface_up.return_value = True
host_ep.on_endpoint_update(data, async=True)
self.step_actor(host_ep)
# Second update should be a no-op
host_ep.on_endpoint_update(data, async=True)
self.step_actor(host_ep)
# Check that the workload config functions aren't called.
self.assertEqual(host_ep._mac, None)
self.assertFalse(m_devices.configure_interface_ipv4.called)
self.assertFalse(m_devices.set_routes.called)
self.assertFalse(m_devices.remove_conntrack_flows.called)
# Should be added to the dispatch chain.
self.m_dispatch_chains.on_endpoint_added.assert_called_once_with(
iface, async=True)
# Check that the iptables generator is called with the direction
# arguments. (Host endpoint chain directions are flipped.)
self.m_ipt_gen.host_endpoint_updates.assert_called_once_with(
ip_version=4, # IP version
endpoint_id="endpoint_id",
suffix="eth0",
profile_ids=["prof1"],
pol_ids_by_tier={},
)
# Check that the updates are actually committed.
self.m_iptables_updater.rewrite_chains.assert_called_once_with(
*self.updates, async=False
)
# Check the general state is "up".
self.assertTrue(host_ep._device_is_up)
self.assertTrue(host_ep._device_in_sync)
self.assertTrue(host_ep._admin_up)
self.assertEqual(host_ep.oper_status(),
('up', 'In sync and device is up'))
self.m_iptables_updater.reset_mock()
# Now tear down the interface.
with mock.patch('calico.felix.endpoint.devices',
autospec=True) as m_devices:
host_ep.on_endpoint_update(None, async=True)
self.step_actor(host_ep)
# Check that the updates are actually committed.
self.m_iptables_updater.delete_chains.assert_called_once_with(
self.chain_names,
async=False
)
# Should be no workload set-up calls.
self.assertFalse(m_devices.configure_interface_ipv4.called)
self.assertFalse(m_devices.set_routes.called)
self.assertFalse(m_devices.remove_conntrack_flows.called)
# General status should be down.
self.assertEqual(host_ep.oper_status(),
('down', 'No endpoint data'))
def test_ipv6_mainline(self):
iface = "eth0"
host_ep = self.create_endpoint(ip_type=futils.IPV6)
# Call with no data; should be ignored (no configuration to remove).
host_ep.on_endpoint_update(None, async=True)
self.step_actor(host_ep)
# Report an initial update (endpoint creation) and check that
# there are no calls to the workload endpoint configuration functions.
ips = ["2001::1"]
data = {
'endpoint': "endpoint_id",
'name': iface,
'expected_ipv6_addrs': ips,
'profile_ids': ["prof1"],
}
with mock.patch('calico.felix.endpoint.devices',
autospec=True) as m_devices:
m_devices.interface_exists.return_value = True
m_devices.interface_up.return_value = True
host_ep.on_endpoint_update(data, async=True)
self.step_actor(host_ep)
# Second update should be a no-op
host_ep.on_endpoint_update(data, async=True)
self.step_actor(host_ep)
# Check that the workload config functions aren't called.
self.assertEqual(host_ep._mac, None)
self.assertFalse(m_devices.configure_interface_ipv4.called)
self.assertFalse(m_devices.configure_interface_ipv6.called)
self.assertFalse(m_devices.set_routes.called)
self.assertFalse(m_devices.remove_conntrack_flows.called)
# Should be added to the dispatch chain.
self.m_dispatch_chains.on_endpoint_added.assert_called_once_with(
iface, async=True)
# Check that the iptables generator is called with the direction
# arguments. (Host endpoint chain directions are flipped.)
self.m_ipt_gen.host_endpoint_updates.assert_called_once_with(
ip_version=6, # IP version
endpoint_id="endpoint_id",
suffix="eth0",
profile_ids=["prof1"],
pol_ids_by_tier={},
)
# Check that the updates are actually committed.
self.m_iptables_updater.rewrite_chains.assert_called_once_with(
*self.updates, async=False
)
# Check the general state is "up".
self.assertTrue(host_ep._device_is_up)
self.assertTrue(host_ep._device_in_sync)
self.assertTrue(host_ep._admin_up)
self.assertEqual(host_ep.oper_status(),
('up', 'In sync and device is up'))
self.m_iptables_updater.reset_mock()
# Now tear down the interface.
with mock.patch('calico.felix.endpoint.devices',
autospec=True) as m_devices:
host_ep.on_endpoint_update(None, async=True)
self.step_actor(host_ep)
# Check that the updates are actually committed.
self.m_iptables_updater.delete_chains.assert_called_once_with(
self.chain_names,
async=False
)
# Should be no workload set-up calls.
self.assertFalse(m_devices.configure_interface_ipv4.called)
self.assertFalse(m_devices.set_routes.called)
self.assertFalse(m_devices.remove_conntrack_flows.called)
# General status should be down.
self.assertEqual(host_ep.oper_status(),
('down', 'No endpoint data'))
def test_on_profiles_ready_noop(self):
"""Cover the no-op _on_profiles_ready method."""
host_ep = self.create_endpoint()
host_ep._on_profiles_ready()
| 46.378189
| 96
| 0.575638
| 8,498
| 72,721
| 4.618263
| 0.06225
| 0.02943
| 0.027162
| 0.040259
| 0.827702
| 0.804286
| 0.777226
| 0.757275
| 0.721679
| 0.700428
| 0
| 0.014114
| 0.326756
| 72,721
| 1,567
| 97
| 46.407786
| 0.787496
| 0.087733
| 0
| 0.681423
| 0
| 0
| 0.109817
| 0.045411
| 0
| 0
| 0
| 0
| 0.146245
| 0
| null | null | 0.000791
| 0.016601
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e40b23c232aef9ab18727b17036490e8faa476e2
| 12,592
|
py
|
Python
|
cogs/servmanager.py
|
thekevinlab/saturn-discord-bot
|
416fa70ebfa10f7e0dd86315f139bb02d7fdfabc
|
[
"BSD-3-Clause"
] | 1
|
2021-02-05T15:42:20.000Z
|
2021-02-05T15:42:20.000Z
|
cogs/servmanager.py
|
thekevinlab/saturn-discord-bot
|
416fa70ebfa10f7e0dd86315f139bb02d7fdfabc
|
[
"BSD-3-Clause"
] | 6
|
2021-04-29T17:22:00.000Z
|
2021-05-13T13:44:50.000Z
|
cogs/servmanager.py
|
thekevinlab/saturn-discord-bot
|
416fa70ebfa10f7e0dd86315f139bb02d7fdfabc
|
[
"BSD-3-Clause"
] | null | null | null |
import typing as t
from assets import *
from discord.ext import commands
import discord
# noinspection SpellCheckingInspection
class Management(commands.Cog, name='Server Management'):
"""
The Server Management module. Useful for quickly doing things like adding roles and deleting channels and such.
Essentially does most of the things that will usually take time or are annoying, like mass adding roles.
"""
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger(__name__)
@commands.command(
name='addrole',
aliases=['addr', 'ar', 'arole'],
description="Adds a role to you or a specified member.")
@commands.cooldown(1, 5, commands.BucketType.member)
@commands.guild_only()
@commands.has_guild_permissions(manage_roles=True)
@commands.bot_has_guild_permissions(manage_roles=True)
async def add_roles(self, ctx, role: discord.Role, member: typing.Optional[discord.Member],
reason: typing.Optional[str] = 'no reason provided'):
member = member or ctx.author
if ctx.guild.me.top_role > member.top_role and (role.position < ctx.guild.me.top_role.position):
if ctx.author.top_role > member.top_role and member != ctx.author:
await member.add_roles(role, reason=reason)
em = SaturnEmbed(
description=f"{CHECK} Added {role.mention} to {member.mention}",
colour=GREEN)
await ctx.send(embed=em)
else:
em = SaturnEmbed(
description=f"{CROSS} You are not high enough in the role"
f" hierarchy to perform this action.",
colour=RED)
await ctx.send(embed=em)
return
else:
em = SaturnEmbed(
description=f"{CROSS} I am not high enough in the member"
f" hierarchy to perform this action.",
colour=RED)
await ctx.send(embed=em)
return
@commands.command(
name='massaddrole',
aliases=['maddr', 'mar', 'marole'],
description="Adds a role to you or a specified member.")
@commands.cooldown(1, 5, commands.BucketType.member)
@commands.guild_only()
@commands.has_guild_permissions(administrator=True)
@commands.bot_has_guild_permissions(manage_roles=True)
async def mass_add_roles(self, ctx, role: discord.Role, has_role: discord.Role,
reason: typing.Optional[str] = 'no reason provided'):
conf = await ConfirmationMenu(f'mass add {role.mention}').prompt(ctx)
if conf:
em = SaturnEmbed(
description=f"{INFO} This might take a while, please wait...",
colour=BLUE)
msg = await ctx.send(embed=em)
async with ctx.channel.typing():
added_roles = []
for member in ctx.guild.members:
if has_role in member.roles:
await member.add_roles(role, reason=reason, atomic=True)
added_roles.append(member)
else:
continue
else:
try:
await msg.delete()
except (discord.NotFound, discord.Forbidden):
pass
em = SaturnEmbed(
description=f"{CHECK} Added {role.mention} to `{len(added_roles)}` members.",
colour=GREEN)
await ctx.send(embed=em)
@commands.command(
name='massremoverole',
aliases=['mrmvr', 'mremover', 'mrrole'],
description="Removes a role from you or a specified member.")
@commands.cooldown(1, 5, commands.BucketType.member)
@commands.guild_only()
@commands.has_guild_permissions(administrator=True)
@commands.bot_has_guild_permissions(manage_roles=True)
async def mass_remove_roles(self, ctx, role: discord.Role, has_role: discord.Role, reason: typing.Optional[str]):
em = SaturnEmbed(
description=f"{INFO} This might take a while, please wait...",
colour=BLUE)
msg = await ctx.send(embed=em)
removed_roles = []
for member in ctx.guild.members:
if has_role in member.roles:
await member.remove_roles(role, reason=reason, atomic=True)
removed_roles.append(member)
else:
continue
else:
try:
await msg.delete()
except (discord.NotFound, discord.Forbidden):
pass
em = SaturnEmbed(
description=f"{CHECK} Removed {role.mention} from `{len(removed_roles)}` members.",
colour=GREEN)
await ctx.send(embed=em)
@commands.command(
name='removerole',
aliases=['rmvr', 'remover', 'rrole'],
description="Removes a role from you or a specified member.")
@commands.cooldown(1, 5, commands.BucketType.member)
@commands.guild_only()
@commands.has_guild_permissions(manage_roles=True)
@commands.bot_has_guild_permissions(manage_roles=True)
async def remove_roles(self, ctx, role: discord.Role,
member: typing.Optional[discord.Member], reason: typing.Optional[str]):
member = member or ctx.author
if ctx.guild.me.top_role > member.top_role and (member != ctx.author) \
and (role.position < ctx.guild.me.top_role.position):
if ctx.author.top_role > member.top_role and member != ctx.author:
await member.remove_roles(role, reason=reason)
em = SaturnEmbed(
description=f"{CHECK} Added {role.mention} to {member.mention}",
colour=GREEN)
await ctx.send(embed=em)
else:
em = SaturnEmbed(
description=f"{CROSS} You are not high enough in the role"
f" hierarchy to perform this action.",
colour=RED)
await ctx.send(embed=em)
return
else:
em = SaturnEmbed(
description=f"{CROSS} I am not high enough in the member"
f" hierarchy to perform this action.",
colour=RED)
await ctx.send(embed=em)
@commands.group(
name='create',
aliases=['make', 'new'],
description='The delete group of commands.',
invoke_without_command=True)
@commands.guild_only()
@commands.has_guild_permissions(manage_channels=True)
@commands.bot_has_guild_permissions(manage_channels=True)
async def create(self, ctx):
await ctx.invoke(self.bot.get_command('help'), entity='create')
@create.command(
name='category',
aliases=['cgry', 'ctgry'],
description='Creates a category.')
@commands.guild_only()
@commands.has_guild_permissions(manage_channels=True)
@commands.bot_has_guild_permissions(manage_channels=True)
async def create_category(self, ctx, *, name):
overwrites = {
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.guild.me: discord.PermissionOverwrite(read_messages=True)}
category = await ctx.guild.create_category(name=name, overwrites=overwrites)
em = SaturnEmbed(
description=f"{CHECK} Created category `{category.name}`",
colour=GREEN)
await ctx.send(embed=em)
@create.command(
name='channel',
aliases=['chnl'],
description='Creates a channel.')
@commands.guild_only()
@commands.has_guild_permissions(manage_channels=True)
@commands.bot_has_guild_permissions(manage_channels=True)
async def create_channel(self, ctx, *, name):
overwrites = {
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.guild.me: discord.PermissionOverwrite(read_messages=True)}
channel = await ctx.guild.create_text_channel(name=name, overwrites=overwrites)
em = SaturnEmbed(
description=f"{CHECK} Created channel {channel.mention}",
colour=GREEN)
await ctx.send(embed=em)
@create.command(
name='role',
aliases=['r', 'rle', 'ro'],
description='Creates a role. Colour is applied via a Hex Code (#FF000)')
@commands.guild_only()
@commands.has_guild_permissions(manage_roles=True)
@commands.bot_has_guild_permissions(manage_roles=True)
async def create_role(self, ctx, name, colour: typing.Optional[commands.ColourConverter], *,
reason: str = 'no reason provided'):
new_role = await ctx.guild.create_role(
name=name, colour=colour if colour else discord.Color.default(), reason=reason)
em = SaturnEmbed(
description=f"{CHECK} Created role {new_role.mention}",
colour=GREEN)
await ctx.send(embed=em)
@commands.group(
name='delete',
aliases=['del'],
description='The delete group of commands.',
invoke_without_command=True)
@commands.guild_only()
@commands.has_guild_permissions(manage_channels=True)
@commands.bot_has_guild_permissions(manage_channels=True)
async def delete(self, ctx):
await ctx.invoke(self.bot.get_command('help'), entity='delete')
@delete.command(
name='category',
aliases=['cgry'],
description='Deletes a category.')
@commands.guild_only()
@commands.has_guild_permissions(manage_channels=True)
@commands.bot_has_guild_permissions(manage_channels=True)
async def del_category(self, ctx, category: discord.CategoryChannel, *, reason: typing.Optional[str]):
await category.delete(reason=reason)
conf = await ConfirmationMenu(f'delete `{category.name}`').prompt(ctx)
if conf:
try:
await category.delete(reason=reason)
em = SaturnEmbed(
description=f"{CHECK} Deleted category `{category.name}`",
colour=GREEN)
await ctx.send(embed=em)
except discord.HTTPException:
em = SaturnEmbed(
description=f"{CROSS} I cannot delete that category.",
colour=RED)
await ctx.send(embed=em)
@delete.command(
name='channel',
aliases=['chnl'],
description='Deletes a channel.')
@commands.guild_only()
@commands.has_guild_permissions(manage_channels=True)
@commands.bot_has_guild_permissions(manage_channels=True)
async def del_channel(self, ctx, channel: typing.Optional[discord.TextChannel], *, reason: typing.Optional[str]):
channel = channel or ctx.channel
conf = await ConfirmationMenu(f'delete `{channel.name}`').prompt(ctx)
if conf:
try:
await channel.delete(reason=reason)
em = SaturnEmbed(
description=f"{CHECK} Deleted channel `{channel.name}`",
colour=GREEN)
await ctx.send(embed=em)
except discord.HTTPException:
em = SaturnEmbed(
description=f"{CROSS} I cannot delete that channel.",
colour=RED)
await ctx.send(embed=em)
@delete.command(
name='role',
aliases=['r', 'rle', 'ro'],
description='Deletes a role.')
@commands.guild_only()
@commands.has_guild_permissions(manage_roles=True)
@commands.bot_has_guild_permissions(manage_roles=True)
async def del_role(self, ctx, role: discord.Role, *, reason: typing.Optional[str]):
conf = await ConfirmationMenu(f'delete `{role.name}`').prompt(ctx)
if conf:
try:
await role.delete(reason=reason)
em = SaturnEmbed(
description=f"{CHECK} Deleted role `{role.name}`",
colour=GREEN)
await ctx.send(embed=em)
except discord.HTTPException:
em = SaturnEmbed(
description=f"{CROSS} I cannot delete that role.",
colour=RED)
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Management(bot))
| 41.285246
| 117
| 0.593472
| 1,384
| 12,592
| 5.286127
| 0.138728
| 0.026244
| 0.062329
| 0.075178
| 0.803171
| 0.77556
| 0.763942
| 0.723073
| 0.704757
| 0.677693
| 0
| 0.001251
| 0.301938
| 12,592
| 305
| 118
| 41.285246
| 0.831058
| 0.020251
| 0
| 0.701493
| 0
| 0
| 0.139877
| 0.001786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007463
| false
| 0.007463
| 0.014925
| 0
| 0.037313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c2d28979e9664ec285f09790b654e3bfd3d9e8a
| 1,305
|
py
|
Python
|
ClassFiles/Python201/IntermediateIterable/Enumeration.py
|
minefarmer/CompletePython
|
6de46e7ee29d9e4eaada60352c193f552afd6f15
|
[
"Unlicense"
] | null | null | null |
ClassFiles/Python201/IntermediateIterable/Enumeration.py
|
minefarmer/CompletePython
|
6de46e7ee29d9e4eaada60352c193f552afd6f15
|
[
"Unlicense"
] | null | null | null |
ClassFiles/Python201/IntermediateIterable/Enumeration.py
|
minefarmer/CompletePython
|
6de46e7ee29d9e4eaada60352c193f552afd6f15
|
[
"Unlicense"
] | null | null | null |
# animals = ["Gully", "Rhubarb", "Zephyr", "Henry"]
# for animal in enumerate(animals): # creates a list of Tuples
# print(animal) # (0, 'Gully')
# # (1, 'Rhubarb')
# # (2, 'Zephyr')
# # (3, 'Henry')
# animals = ["Gully", "Rhubarb", "Zephyr", "Henry"]
# for index, animal in enumerate(animals):
# print(animal) # Gully
# # Rhubarb
# # Zephyr
# # Henry
# animals = ["Gully", "Rhubarb", "Zephyr", "Henry"]
# for index, animal in enumerate(animals):
# print(index, animal) # 0 Gully
# # 1 Rhubarb
# # 2 Zephyr
# # 3 Henry
# animals = ["Gully", "Rhubarb", "Zephyr", "Henry"]
# for index, animal in enumerate(animals):
# if index % 2 == 0:
# continue
# print(index, animal) # 1 Rhubarb
# # 3 Henry
animals = ["Gully", "Rhubarb", "Zephyr", "Henry"]
for index, animal in enumerate(animals):
# if index % 2 == 0:
# continue
print(f"{index}.\t {animal}") # 0. Gully
# 1. Rhubarb
# 2. Zephyr
# 3. Henry
| 30.348837
| 64
| 0.42069
| 116
| 1,305
| 4.732759
| 0.206897
| 0.131148
| 0.196721
| 0.251366
| 0.810565
| 0.810565
| 0.750455
| 0.750455
| 0.750455
| 0.690346
| 0
| 0.02449
| 0.436782
| 1,305
| 42
| 65
| 31.071429
| 0.722449
| 0.747893
| 0
| 0
| 0
| 0
| 0.14841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7cc16b04ec9aafaa69c2b8f529f9515a17cf8cda
| 46,549
|
py
|
Python
|
sdk/python/pulumi_aws/appsync/graph_ql_api.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/appsync/graph_ql_api.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/appsync/graph_ql_api.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['GraphQLApiArgs', 'GraphQLApi']
@pulumi.input_type
class GraphQLApiArgs:
def __init__(__self__, *,
authentication_type: pulumi.Input[str],
additional_authentication_providers: Optional[pulumi.Input[Sequence[pulumi.Input['GraphQLApiAdditionalAuthenticationProviderArgs']]]] = None,
lambda_authorizer_config: Optional[pulumi.Input['GraphQLApiLambdaAuthorizerConfigArgs']] = None,
log_config: Optional[pulumi.Input['GraphQLApiLogConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
openid_connect_config: Optional[pulumi.Input['GraphQLApiOpenidConnectConfigArgs']] = None,
schema: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_pool_config: Optional[pulumi.Input['GraphQLApiUserPoolConfigArgs']] = None,
xray_enabled: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a GraphQLApi resource.
:param pulumi.Input[str] authentication_type: The authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`
:param pulumi.Input[Sequence[pulumi.Input['GraphQLApiAdditionalAuthenticationProviderArgs']]] additional_authentication_providers: One or more additional authentication providers for the GraphqlApi. Defined below.
:param pulumi.Input['GraphQLApiLambdaAuthorizerConfigArgs'] lambda_authorizer_config: Nested argument containing Lambda authorizer configuration. Defined below.
:param pulumi.Input['GraphQLApiLogConfigArgs'] log_config: Nested argument containing logging configuration. Defined below.
:param pulumi.Input[str] name: A user-supplied name for the GraphqlApi.
:param pulumi.Input['GraphQLApiOpenidConnectConfigArgs'] openid_connect_config: Nested argument containing OpenID Connect configuration. Defined below.
:param pulumi.Input[str] schema: The schema definition, in GraphQL schema language format. This provider cannot perform drift detection of this configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input['GraphQLApiUserPoolConfigArgs'] user_pool_config: The Amazon Cognito User Pool configuration. Defined below.
:param pulumi.Input[bool] xray_enabled: Whether tracing with X-ray is enabled. Defaults to false.
"""
pulumi.set(__self__, "authentication_type", authentication_type)
if additional_authentication_providers is not None:
pulumi.set(__self__, "additional_authentication_providers", additional_authentication_providers)
if lambda_authorizer_config is not None:
pulumi.set(__self__, "lambda_authorizer_config", lambda_authorizer_config)
if log_config is not None:
pulumi.set(__self__, "log_config", log_config)
if name is not None:
pulumi.set(__self__, "name", name)
if openid_connect_config is not None:
pulumi.set(__self__, "openid_connect_config", openid_connect_config)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_pool_config is not None:
pulumi.set(__self__, "user_pool_config", user_pool_config)
if xray_enabled is not None:
pulumi.set(__self__, "xray_enabled", xray_enabled)
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> pulumi.Input[str]:
"""
The authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`
"""
return pulumi.get(self, "authentication_type")
@authentication_type.setter
def authentication_type(self, value: pulumi.Input[str]):
pulumi.set(self, "authentication_type", value)
@property
@pulumi.getter(name="additionalAuthenticationProviders")
def additional_authentication_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GraphQLApiAdditionalAuthenticationProviderArgs']]]]:
"""
One or more additional authentication providers for the GraphqlApi. Defined below.
"""
return pulumi.get(self, "additional_authentication_providers")
@additional_authentication_providers.setter
def additional_authentication_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GraphQLApiAdditionalAuthenticationProviderArgs']]]]):
pulumi.set(self, "additional_authentication_providers", value)
@property
@pulumi.getter(name="lambdaAuthorizerConfig")
def lambda_authorizer_config(self) -> Optional[pulumi.Input['GraphQLApiLambdaAuthorizerConfigArgs']]:
"""
Nested argument containing Lambda authorizer configuration. Defined below.
"""
return pulumi.get(self, "lambda_authorizer_config")
@lambda_authorizer_config.setter
def lambda_authorizer_config(self, value: Optional[pulumi.Input['GraphQLApiLambdaAuthorizerConfigArgs']]):
pulumi.set(self, "lambda_authorizer_config", value)
@property
@pulumi.getter(name="logConfig")
def log_config(self) -> Optional[pulumi.Input['GraphQLApiLogConfigArgs']]:
"""
Nested argument containing logging configuration. Defined below.
"""
return pulumi.get(self, "log_config")
@log_config.setter
def log_config(self, value: Optional[pulumi.Input['GraphQLApiLogConfigArgs']]):
pulumi.set(self, "log_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A user-supplied name for the GraphqlApi.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="openidConnectConfig")
def openid_connect_config(self) -> Optional[pulumi.Input['GraphQLApiOpenidConnectConfigArgs']]:
"""
Nested argument containing OpenID Connect configuration. Defined below.
"""
return pulumi.get(self, "openid_connect_config")
@openid_connect_config.setter
def openid_connect_config(self, value: Optional[pulumi.Input['GraphQLApiOpenidConnectConfigArgs']]):
pulumi.set(self, "openid_connect_config", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input[str]]:
"""
The schema definition, in GraphQL schema language format. This provider cannot perform drift detection of this configuration.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userPoolConfig")
def user_pool_config(self) -> Optional[pulumi.Input['GraphQLApiUserPoolConfigArgs']]:
"""
The Amazon Cognito User Pool configuration. Defined below.
"""
return pulumi.get(self, "user_pool_config")
@user_pool_config.setter
def user_pool_config(self, value: Optional[pulumi.Input['GraphQLApiUserPoolConfigArgs']]):
pulumi.set(self, "user_pool_config", value)
@property
@pulumi.getter(name="xrayEnabled")
def xray_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether tracing with X-ray is enabled. Defaults to false.
"""
return pulumi.get(self, "xray_enabled")
@xray_enabled.setter
def xray_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "xray_enabled", value)
@pulumi.input_type
class _GraphQLApiState:
def __init__(__self__, *,
additional_authentication_providers: Optional[pulumi.Input[Sequence[pulumi.Input['GraphQLApiAdditionalAuthenticationProviderArgs']]]] = None,
arn: Optional[pulumi.Input[str]] = None,
authentication_type: Optional[pulumi.Input[str]] = None,
lambda_authorizer_config: Optional[pulumi.Input['GraphQLApiLambdaAuthorizerConfigArgs']] = None,
log_config: Optional[pulumi.Input['GraphQLApiLogConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
openid_connect_config: Optional[pulumi.Input['GraphQLApiOpenidConnectConfigArgs']] = None,
schema: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
uris: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_pool_config: Optional[pulumi.Input['GraphQLApiUserPoolConfigArgs']] = None,
xray_enabled: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering GraphQLApi resources.
:param pulumi.Input[Sequence[pulumi.Input['GraphQLApiAdditionalAuthenticationProviderArgs']]] additional_authentication_providers: One or more additional authentication providers for the GraphqlApi. Defined below.
:param pulumi.Input[str] arn: The ARN
:param pulumi.Input[str] authentication_type: The authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`
:param pulumi.Input['GraphQLApiLambdaAuthorizerConfigArgs'] lambda_authorizer_config: Nested argument containing Lambda authorizer configuration. Defined below.
:param pulumi.Input['GraphQLApiLogConfigArgs'] log_config: Nested argument containing logging configuration. Defined below.
:param pulumi.Input[str] name: A user-supplied name for the GraphqlApi.
:param pulumi.Input['GraphQLApiOpenidConnectConfigArgs'] openid_connect_config: Nested argument containing OpenID Connect configuration. Defined below.
:param pulumi.Input[str] schema: The schema definition, in GraphQL schema language format. This provider cannot perform drift detection of this configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] uris: Map of URIs associated with the APIE.g., `uris["GRAPHQL"] = https://ID.appsync-api.REGION.amazonaws.com/graphql`
:param pulumi.Input['GraphQLApiUserPoolConfigArgs'] user_pool_config: The Amazon Cognito User Pool configuration. Defined below.
:param pulumi.Input[bool] xray_enabled: Whether tracing with X-ray is enabled. Defaults to false.
"""
if additional_authentication_providers is not None:
pulumi.set(__self__, "additional_authentication_providers", additional_authentication_providers)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if authentication_type is not None:
pulumi.set(__self__, "authentication_type", authentication_type)
if lambda_authorizer_config is not None:
pulumi.set(__self__, "lambda_authorizer_config", lambda_authorizer_config)
if log_config is not None:
pulumi.set(__self__, "log_config", log_config)
if name is not None:
pulumi.set(__self__, "name", name)
if openid_connect_config is not None:
pulumi.set(__self__, "openid_connect_config", openid_connect_config)
if schema is not None:
pulumi.set(__self__, "schema", schema)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if uris is not None:
pulumi.set(__self__, "uris", uris)
if user_pool_config is not None:
pulumi.set(__self__, "user_pool_config", user_pool_config)
if xray_enabled is not None:
pulumi.set(__self__, "xray_enabled", xray_enabled)
@property
@pulumi.getter(name="additionalAuthenticationProviders")
def additional_authentication_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GraphQLApiAdditionalAuthenticationProviderArgs']]]]:
"""
One or more additional authentication providers for the GraphqlApi. Defined below.
"""
return pulumi.get(self, "additional_authentication_providers")
@additional_authentication_providers.setter
def additional_authentication_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GraphQLApiAdditionalAuthenticationProviderArgs']]]]):
pulumi.set(self, "additional_authentication_providers", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> Optional[pulumi.Input[str]]:
"""
The authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`
"""
return pulumi.get(self, "authentication_type")
@authentication_type.setter
def authentication_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authentication_type", value)
@property
@pulumi.getter(name="lambdaAuthorizerConfig")
def lambda_authorizer_config(self) -> Optional[pulumi.Input['GraphQLApiLambdaAuthorizerConfigArgs']]:
"""
Nested argument containing Lambda authorizer configuration. Defined below.
"""
return pulumi.get(self, "lambda_authorizer_config")
@lambda_authorizer_config.setter
def lambda_authorizer_config(self, value: Optional[pulumi.Input['GraphQLApiLambdaAuthorizerConfigArgs']]):
pulumi.set(self, "lambda_authorizer_config", value)
@property
@pulumi.getter(name="logConfig")
def log_config(self) -> Optional[pulumi.Input['GraphQLApiLogConfigArgs']]:
"""
Nested argument containing logging configuration. Defined below.
"""
return pulumi.get(self, "log_config")
@log_config.setter
def log_config(self, value: Optional[pulumi.Input['GraphQLApiLogConfigArgs']]):
pulumi.set(self, "log_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A user-supplied name for the GraphqlApi.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="openidConnectConfig")
def openid_connect_config(self) -> Optional[pulumi.Input['GraphQLApiOpenidConnectConfigArgs']]:
"""
Nested argument containing OpenID Connect configuration. Defined below.
"""
return pulumi.get(self, "openid_connect_config")
@openid_connect_config.setter
def openid_connect_config(self, value: Optional[pulumi.Input['GraphQLApiOpenidConnectConfigArgs']]):
pulumi.set(self, "openid_connect_config", value)
@property
@pulumi.getter
def schema(self) -> Optional[pulumi.Input[str]]:
"""
The schema definition, in GraphQL schema language format. This provider cannot perform drift detection of this configuration.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter
def uris(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of URIs associated with the APIE.g., `uris["GRAPHQL"] = https://ID.appsync-api.REGION.amazonaws.com/graphql`
"""
return pulumi.get(self, "uris")
@uris.setter
def uris(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "uris", value)
@property
@pulumi.getter(name="userPoolConfig")
def user_pool_config(self) -> Optional[pulumi.Input['GraphQLApiUserPoolConfigArgs']]:
"""
The Amazon Cognito User Pool configuration. Defined below.
"""
return pulumi.get(self, "user_pool_config")
@user_pool_config.setter
def user_pool_config(self, value: Optional[pulumi.Input['GraphQLApiUserPoolConfigArgs']]):
pulumi.set(self, "user_pool_config", value)
@property
@pulumi.getter(name="xrayEnabled")
def xray_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether tracing with X-ray is enabled. Defaults to false.
"""
return pulumi.get(self, "xray_enabled")
@xray_enabled.setter
def xray_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "xray_enabled", value)
class GraphQLApi(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_authentication_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GraphQLApiAdditionalAuthenticationProviderArgs']]]]] = None,
authentication_type: Optional[pulumi.Input[str]] = None,
lambda_authorizer_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiLambdaAuthorizerConfigArgs']]] = None,
log_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiLogConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
openid_connect_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiOpenidConnectConfigArgs']]] = None,
schema: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_pool_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiUserPoolConfigArgs']]] = None,
xray_enabled: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides an AppSync GraphQL API.
## Example Usage
### API Key Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example", authentication_type="API_KEY")
```
### AWS IAM Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example", authentication_type="AWS_IAM")
```
### AWS Cognito User Pool Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
authentication_type="AMAZON_COGNITO_USER_POOLS",
user_pool_config=aws.appsync.GraphQLApiUserPoolConfigArgs(
aws_region=data["aws_region"]["current"]["name"],
default_action="DENY",
user_pool_id=aws_cognito_user_pool["example"]["id"],
))
```
### OpenID Connect Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
authentication_type="OPENID_CONNECT",
openid_connect_config=aws.appsync.GraphQLApiOpenidConnectConfigArgs(
issuer="https://example.com",
))
```
### AWS Lambda Authorizer Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
authentication_type="AWS_LAMBDA",
lambda_authorizer_config=aws.appsync.GraphQLApiLambdaAuthorizerConfigArgs(
authorizer_uri="arn:aws:lambda:us-east-1:123456789012:function:custom_lambda_authorizer",
))
appsync_lambda_authorizer = aws.lambda_.Permission("appsyncLambdaAuthorizer",
action="lambda:InvokeFunction",
function="custom_lambda_authorizer",
principal="appsync.amazonaws.com",
source_arn=example.arn)
```
### With Multiple Authentication Providers
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
additional_authentication_providers=[aws.appsync.GraphQLApiAdditionalAuthenticationProviderArgs(
authentication_type="AWS_IAM",
)],
authentication_type="API_KEY")
```
### With Schema
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
authentication_type="AWS_IAM",
schema=\"\"\"schema {
query: Query
}
type Query {
test: Int
}
\"\"\")
```
### Enabling Logging
```python
import pulumi
import pulumi_aws as aws
example_role = aws.iam.Role("exampleRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "appsync.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
\"\"\")
example_role_policy_attachment = aws.iam.RolePolicyAttachment("exampleRolePolicyAttachment",
policy_arn="arn:aws:iam::aws:policy/service-role/AWSAppSyncPushToCloudWatchLogs",
role=example_role.name)
# ... other configuration ...
example_graph_ql_api = aws.appsync.GraphQLApi("exampleGraphQLApi", log_config=aws.appsync.GraphQLApiLogConfigArgs(
cloudwatch_logs_role_arn=example_role.arn,
field_log_level="ERROR",
))
```
### Associate Web ACL (v2)
```python
import pulumi
import pulumi_aws as aws
example_graph_ql_api = aws.appsync.GraphQLApi("exampleGraphQLApi", authentication_type="API_KEY")
example_web_acl = aws.wafv2.WebAcl("exampleWebAcl",
description="Example of a managed rule.",
scope="REGIONAL",
default_action=aws.wafv2.WebAclDefaultActionArgs(
allow=aws.wafv2.WebAclDefaultActionAllowArgs(),
),
rules=[aws.wafv2.WebAclRuleArgs(
name="rule-1",
priority=1,
override_action=aws.wafv2.WebAclRuleOverrideActionArgs(
block=[{}],
),
statement=aws.wafv2.WebAclRuleStatementArgs(
managed_rule_group_statement=aws.wafv2.WebAclRuleStatementManagedRuleGroupStatementArgs(
name="AWSManagedRulesCommonRuleSet",
vendor_name="AWS",
),
),
visibility_config=aws.wafv2.WebAclRuleVisibilityConfigArgs(
cloudwatch_metrics_enabled=False,
metric_name="friendly-rule-metric-name",
sampled_requests_enabled=False,
),
)],
visibility_config=aws.wafv2.WebAclVisibilityConfigArgs(
cloudwatch_metrics_enabled=False,
metric_name="friendly-metric-name",
sampled_requests_enabled=False,
))
example_web_acl_association = aws.wafv2.WebAclAssociation("exampleWebAclAssociation",
resource_arn=example_graph_ql_api.arn,
web_acl_arn=example_web_acl.arn)
```
## Import
AppSync GraphQL API can be imported using the GraphQL API ID, e.g.,
```sh
$ pulumi import aws:appsync/graphQLApi:GraphQLApi example 0123456789
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GraphQLApiAdditionalAuthenticationProviderArgs']]]] additional_authentication_providers: One or more additional authentication providers for the GraphqlApi. Defined below.
:param pulumi.Input[str] authentication_type: The authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`
:param pulumi.Input[pulumi.InputType['GraphQLApiLambdaAuthorizerConfigArgs']] lambda_authorizer_config: Nested argument containing Lambda authorizer configuration. Defined below.
:param pulumi.Input[pulumi.InputType['GraphQLApiLogConfigArgs']] log_config: Nested argument containing logging configuration. Defined below.
:param pulumi.Input[str] name: A user-supplied name for the GraphqlApi.
:param pulumi.Input[pulumi.InputType['GraphQLApiOpenidConnectConfigArgs']] openid_connect_config: Nested argument containing OpenID Connect configuration. Defined below.
:param pulumi.Input[str] schema: The schema definition, in GraphQL schema language format. This provider cannot perform drift detection of this configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[pulumi.InputType['GraphQLApiUserPoolConfigArgs']] user_pool_config: The Amazon Cognito User Pool configuration. Defined below.
:param pulumi.Input[bool] xray_enabled: Whether tracing with X-ray is enabled. Defaults to false.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GraphQLApiArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an AppSync GraphQL API.
## Example Usage
### API Key Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example", authentication_type="API_KEY")
```
### AWS IAM Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example", authentication_type="AWS_IAM")
```
### AWS Cognito User Pool Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
authentication_type="AMAZON_COGNITO_USER_POOLS",
user_pool_config=aws.appsync.GraphQLApiUserPoolConfigArgs(
aws_region=data["aws_region"]["current"]["name"],
default_action="DENY",
user_pool_id=aws_cognito_user_pool["example"]["id"],
))
```
### OpenID Connect Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
authentication_type="OPENID_CONNECT",
openid_connect_config=aws.appsync.GraphQLApiOpenidConnectConfigArgs(
issuer="https://example.com",
))
```
### AWS Lambda Authorizer Authentication
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
authentication_type="AWS_LAMBDA",
lambda_authorizer_config=aws.appsync.GraphQLApiLambdaAuthorizerConfigArgs(
authorizer_uri="arn:aws:lambda:us-east-1:123456789012:function:custom_lambda_authorizer",
))
appsync_lambda_authorizer = aws.lambda_.Permission("appsyncLambdaAuthorizer",
action="lambda:InvokeFunction",
function="custom_lambda_authorizer",
principal="appsync.amazonaws.com",
source_arn=example.arn)
```
### With Multiple Authentication Providers
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
additional_authentication_providers=[aws.appsync.GraphQLApiAdditionalAuthenticationProviderArgs(
authentication_type="AWS_IAM",
)],
authentication_type="API_KEY")
```
### With Schema
```python
import pulumi
import pulumi_aws as aws
example = aws.appsync.GraphQLApi("example",
authentication_type="AWS_IAM",
schema=\"\"\"schema {
query: Query
}
type Query {
test: Int
}
\"\"\")
```
### Enabling Logging
```python
import pulumi
import pulumi_aws as aws
example_role = aws.iam.Role("exampleRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "appsync.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
\"\"\")
example_role_policy_attachment = aws.iam.RolePolicyAttachment("exampleRolePolicyAttachment",
policy_arn="arn:aws:iam::aws:policy/service-role/AWSAppSyncPushToCloudWatchLogs",
role=example_role.name)
# ... other configuration ...
example_graph_ql_api = aws.appsync.GraphQLApi("exampleGraphQLApi", log_config=aws.appsync.GraphQLApiLogConfigArgs(
cloudwatch_logs_role_arn=example_role.arn,
field_log_level="ERROR",
))
```
### Associate Web ACL (v2)
```python
import pulumi
import pulumi_aws as aws
example_graph_ql_api = aws.appsync.GraphQLApi("exampleGraphQLApi", authentication_type="API_KEY")
example_web_acl = aws.wafv2.WebAcl("exampleWebAcl",
description="Example of a managed rule.",
scope="REGIONAL",
default_action=aws.wafv2.WebAclDefaultActionArgs(
allow=aws.wafv2.WebAclDefaultActionAllowArgs(),
),
rules=[aws.wafv2.WebAclRuleArgs(
name="rule-1",
priority=1,
override_action=aws.wafv2.WebAclRuleOverrideActionArgs(
block=[{}],
),
statement=aws.wafv2.WebAclRuleStatementArgs(
managed_rule_group_statement=aws.wafv2.WebAclRuleStatementManagedRuleGroupStatementArgs(
name="AWSManagedRulesCommonRuleSet",
vendor_name="AWS",
),
),
visibility_config=aws.wafv2.WebAclRuleVisibilityConfigArgs(
cloudwatch_metrics_enabled=False,
metric_name="friendly-rule-metric-name",
sampled_requests_enabled=False,
),
)],
visibility_config=aws.wafv2.WebAclVisibilityConfigArgs(
cloudwatch_metrics_enabled=False,
metric_name="friendly-metric-name",
sampled_requests_enabled=False,
))
example_web_acl_association = aws.wafv2.WebAclAssociation("exampleWebAclAssociation",
resource_arn=example_graph_ql_api.arn,
web_acl_arn=example_web_acl.arn)
```
## Import
AppSync GraphQL API can be imported using the GraphQL API ID, e.g.,
```sh
$ pulumi import aws:appsync/graphQLApi:GraphQLApi example 0123456789
```
:param str resource_name: The name of the resource.
:param GraphQLApiArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GraphQLApiArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
additional_authentication_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GraphQLApiAdditionalAuthenticationProviderArgs']]]]] = None,
authentication_type: Optional[pulumi.Input[str]] = None,
lambda_authorizer_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiLambdaAuthorizerConfigArgs']]] = None,
log_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiLogConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
openid_connect_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiOpenidConnectConfigArgs']]] = None,
schema: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_pool_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiUserPoolConfigArgs']]] = None,
xray_enabled: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GraphQLApiArgs.__new__(GraphQLApiArgs)
__props__.__dict__["additional_authentication_providers"] = additional_authentication_providers
if authentication_type is None and not opts.urn:
raise TypeError("Missing required property 'authentication_type'")
__props__.__dict__["authentication_type"] = authentication_type
__props__.__dict__["lambda_authorizer_config"] = lambda_authorizer_config
__props__.__dict__["log_config"] = log_config
__props__.__dict__["name"] = name
__props__.__dict__["openid_connect_config"] = openid_connect_config
__props__.__dict__["schema"] = schema
__props__.__dict__["tags"] = tags
__props__.__dict__["user_pool_config"] = user_pool_config
__props__.__dict__["xray_enabled"] = xray_enabled
__props__.__dict__["arn"] = None
__props__.__dict__["tags_all"] = None
__props__.__dict__["uris"] = None
super(GraphQLApi, __self__).__init__(
'aws:appsync/graphQLApi:GraphQLApi',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
additional_authentication_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GraphQLApiAdditionalAuthenticationProviderArgs']]]]] = None,
arn: Optional[pulumi.Input[str]] = None,
authentication_type: Optional[pulumi.Input[str]] = None,
lambda_authorizer_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiLambdaAuthorizerConfigArgs']]] = None,
log_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiLogConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
openid_connect_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiOpenidConnectConfigArgs']]] = None,
schema: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
uris: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_pool_config: Optional[pulumi.Input[pulumi.InputType['GraphQLApiUserPoolConfigArgs']]] = None,
xray_enabled: Optional[pulumi.Input[bool]] = None) -> 'GraphQLApi':
"""
Get an existing GraphQLApi resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GraphQLApiAdditionalAuthenticationProviderArgs']]]] additional_authentication_providers: One or more additional authentication providers for the GraphqlApi. Defined below.
:param pulumi.Input[str] arn: The ARN
:param pulumi.Input[str] authentication_type: The authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`
:param pulumi.Input[pulumi.InputType['GraphQLApiLambdaAuthorizerConfigArgs']] lambda_authorizer_config: Nested argument containing Lambda authorizer configuration. Defined below.
:param pulumi.Input[pulumi.InputType['GraphQLApiLogConfigArgs']] log_config: Nested argument containing logging configuration. Defined below.
:param pulumi.Input[str] name: A user-supplied name for the GraphqlApi.
:param pulumi.Input[pulumi.InputType['GraphQLApiOpenidConnectConfigArgs']] openid_connect_config: Nested argument containing OpenID Connect configuration. Defined below.
:param pulumi.Input[str] schema: The schema definition, in GraphQL schema language format. This provider cannot perform drift detection of this configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] uris: Map of URIs associated with the APIE.g., `uris["GRAPHQL"] = https://ID.appsync-api.REGION.amazonaws.com/graphql`
:param pulumi.Input[pulumi.InputType['GraphQLApiUserPoolConfigArgs']] user_pool_config: The Amazon Cognito User Pool configuration. Defined below.
:param pulumi.Input[bool] xray_enabled: Whether tracing with X-ray is enabled. Defaults to false.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GraphQLApiState.__new__(_GraphQLApiState)
__props__.__dict__["additional_authentication_providers"] = additional_authentication_providers
__props__.__dict__["arn"] = arn
__props__.__dict__["authentication_type"] = authentication_type
__props__.__dict__["lambda_authorizer_config"] = lambda_authorizer_config
__props__.__dict__["log_config"] = log_config
__props__.__dict__["name"] = name
__props__.__dict__["openid_connect_config"] = openid_connect_config
__props__.__dict__["schema"] = schema
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["uris"] = uris
__props__.__dict__["user_pool_config"] = user_pool_config
__props__.__dict__["xray_enabled"] = xray_enabled
return GraphQLApi(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="additionalAuthenticationProviders")
def additional_authentication_providers(self) -> pulumi.Output[Optional[Sequence['outputs.GraphQLApiAdditionalAuthenticationProvider']]]:
"""
One or more additional authentication providers for the GraphqlApi. Defined below.
"""
return pulumi.get(self, "additional_authentication_providers")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> pulumi.Output[str]:
"""
The authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`
"""
return pulumi.get(self, "authentication_type")
@property
@pulumi.getter(name="lambdaAuthorizerConfig")
def lambda_authorizer_config(self) -> pulumi.Output[Optional['outputs.GraphQLApiLambdaAuthorizerConfig']]:
"""
Nested argument containing Lambda authorizer configuration. Defined below.
"""
return pulumi.get(self, "lambda_authorizer_config")
@property
@pulumi.getter(name="logConfig")
def log_config(self) -> pulumi.Output[Optional['outputs.GraphQLApiLogConfig']]:
"""
Nested argument containing logging configuration. Defined below.
"""
return pulumi.get(self, "log_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A user-supplied name for the GraphqlApi.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="openidConnectConfig")
def openid_connect_config(self) -> pulumi.Output[Optional['outputs.GraphQLApiOpenidConnectConfig']]:
"""
Nested argument containing OpenID Connect configuration. Defined below.
"""
return pulumi.get(self, "openid_connect_config")
@property
@pulumi.getter
def schema(self) -> pulumi.Output[Optional[str]]:
"""
The schema definition, in GraphQL schema language format. This provider cannot perform drift detection of this configuration.
"""
return pulumi.get(self, "schema")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter
def uris(self) -> pulumi.Output[Mapping[str, str]]:
"""
Map of URIs associated with the APIE.g., `uris["GRAPHQL"] = https://ID.appsync-api.REGION.amazonaws.com/graphql`
"""
return pulumi.get(self, "uris")
@property
@pulumi.getter(name="userPoolConfig")
def user_pool_config(self) -> pulumi.Output[Optional['outputs.GraphQLApiUserPoolConfig']]:
"""
The Amazon Cognito User Pool configuration. Defined below.
"""
return pulumi.get(self, "user_pool_config")
@property
@pulumi.getter(name="xrayEnabled")
def xray_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether tracing with X-ray is enabled. Defaults to false.
"""
return pulumi.get(self, "xray_enabled")
| 47.019192
| 258
| 0.663731
| 4,789
| 46,549
| 6.226561
| 0.06494
| 0.07009
| 0.063081
| 0.022938
| 0.927764
| 0.921225
| 0.907307
| 0.903048
| 0.890942
| 0.879976
| 0
| 0.002502
| 0.23573
| 46,549
| 989
| 259
| 47.066734
| 0.835676
| 0.439172
| 0
| 0.783042
| 1
| 0
| 0.169127
| 0.11681
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164589
| false
| 0.002494
| 0.017456
| 0
| 0.281796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7ccd076cb370c8596406e8d20894c76ac7c0bbe3
| 22,886
|
py
|
Python
|
UI/Gpio.py
|
attify/attify-badge
|
2a1448172409cc719b7ff3ccfd8cf51519fc1ad3
|
[
"MIT"
] | 64
|
2017-02-22T09:40:03.000Z
|
2022-02-20T02:53:42.000Z
|
UI/Gpio.py
|
attify/attify-badge
|
2a1448172409cc719b7ff3ccfd8cf51519fc1ad3
|
[
"MIT"
] | 7
|
2018-06-04T10:48:41.000Z
|
2022-03-31T05:25:01.000Z
|
UI/Gpio.py
|
attify/attify-badge
|
2a1448172409cc719b7ff3ccfd8cf51519fc1ad3
|
[
"MIT"
] | 19
|
2017-02-22T18:14:25.000Z
|
2021-12-04T05:38:18.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Gpio-input.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(321, 454)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(217, 216, 216))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(236, 235, 235))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 108, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(145, 144, 144))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(217, 216, 216))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(236, 235, 235))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(217, 216, 216))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(236, 235, 235))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 108, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(145, 144, 144))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(217, 216, 216))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(236, 235, 235))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 108, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(217, 216, 216))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(236, 235, 235))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 108, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(145, 144, 144))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 108, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 108, 108))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(217, 216, 216))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(217, 216, 216))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(217, 216, 216))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
Form.setPalette(palette)
self.frame = QtGui.QFrame(Form)
self.frame.setGeometry(QtCore.QRect(10, 20, 301, 421))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 212))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 113, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 212))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 212))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 113, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 212))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 212, 212))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 113, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 85, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.frame.setPalette(palette)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Sunken)
self.frame.setObjectName(_fromUtf8("frame"))
self.label_1 = QtGui.QLabel(self.frame)
self.label_1.setGeometry(QtCore.QRect(30, 30, 68, 17))
self.label_1.setObjectName(_fromUtf8("label_1"))
self.label_2 = QtGui.QLabel(self.frame)
self.label_2.setGeometry(QtCore.QRect(30, 70, 68, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(self.frame)
self.label_3.setGeometry(QtCore.QRect(30, 110, 68, 17))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(self.frame)
self.label_4.setGeometry(QtCore.QRect(30, 150, 68, 17))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(self.frame)
self.label_5.setGeometry(QtCore.QRect(30, 190, 68, 17))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label_6 = QtGui.QLabel(self.frame)
self.label_6.setGeometry(QtCore.QRect(30, 230, 68, 17))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_7 = QtGui.QLabel(self.frame)
self.label_7.setGeometry(QtCore.QRect(30, 270, 68, 17))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_8 = QtGui.QLabel(self.frame)
self.label_8.setGeometry(QtCore.QRect(30, 310, 68, 17))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.D0_Status = QtGui.QLabel(self.frame)
self.D0_Status.setGeometry(QtCore.QRect(170, 30, 121, 17))
self.D0_Status.setObjectName(_fromUtf8("D0_Status"))
self.D1_Status = QtGui.QLabel(self.frame)
self.D1_Status.setGeometry(QtCore.QRect(170, 70, 121, 17))
self.D1_Status.setObjectName(_fromUtf8("D1_Status"))
self.D2_Status = QtGui.QLabel(self.frame)
self.D2_Status.setGeometry(QtCore.QRect(170, 110, 121, 17))
self.D2_Status.setObjectName(_fromUtf8("D2_Status"))
self.D3_Status = QtGui.QLabel(self.frame)
self.D3_Status.setGeometry(QtCore.QRect(170, 150, 121, 17))
self.D3_Status.setObjectName(_fromUtf8("D3_Status"))
self.D4_Status = QtGui.QLabel(self.frame)
self.D4_Status.setGeometry(QtCore.QRect(170, 190, 121, 17))
self.D4_Status.setObjectName(_fromUtf8("D4_Status"))
self.D5_Status = QtGui.QLabel(self.frame)
self.D5_Status.setGeometry(QtCore.QRect(170, 230, 121, 17))
self.D5_Status.setObjectName(_fromUtf8("D5_Status"))
self.D6_Status = QtGui.QLabel(self.frame)
self.D6_Status.setGeometry(QtCore.QRect(170, 270, 121, 17))
self.D6_Status.setObjectName(_fromUtf8("D6_Status"))
self.D7_Status = QtGui.QLabel(self.frame)
self.D7_Status.setGeometry(QtCore.QRect(170, 310, 121, 17))
self.D7_Status.setObjectName(_fromUtf8("D7_Status"))
self.line = QtGui.QFrame(self.frame)
self.line.setGeometry(QtCore.QRect(134, 30, 31, 301))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.StartMonitor = QtGui.QPushButton(self.frame)
self.StartMonitor.setGeometry(QtCore.QRect(150, 370, 71, 27))
self.StartMonitor.setObjectName(_fromUtf8("StartMonitor"))
self.pushButton_Exit = QtGui.QPushButton(self.frame)
self.pushButton_Exit.setGeometry(QtCore.QRect(220, 370, 71, 27))
self.pushButton_Exit.setObjectName(_fromUtf8("pushButton_Exit"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "GPIO Input Monitor", None))
self.label_1.setText(_translate("Form", " Pin D0", None))
self.label_2.setText(_translate("Form", " Pin D1", None))
self.label_3.setText(_translate("Form", " Pin D2", None))
self.label_4.setText(_translate("Form", " Pin D3", None))
self.label_5.setText(_translate("Form", " Pin D4", None))
self.label_6.setText(_translate("Form", " Pin D5", None))
self.label_7.setText(_translate("Form", " Pin D6", None))
self.label_8.setText(_translate("Form", " Pin D7", None))
self.D0_Status.setText(_translate("Form", "State : Inactive", None))
self.D1_Status.setText(_translate("Form", "State : Inactive", None))
self.D2_Status.setText(_translate("Form", "State : Inactive", None))
self.D3_Status.setText(_translate("Form", "State : Inactive", None))
self.D4_Status.setText(_translate("Form", "State : Inactive", None))
self.D5_Status.setText(_translate("Form", "State : Inactive", None))
self.D6_Status.setText(_translate("Form", "State : Inactive", None))
self.D7_Status.setText(_translate("Form", "State : Inactive", None))
self.StartMonitor.setText(_translate("Form", "Start", None))
self.pushButton_Exit.setText(_translate("Form", "Stop", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 56.930348
| 86
| 0.689242
| 2,784
| 22,886
| 5.614943
| 0.059626
| 0.151356
| 0.092119
| 0.120906
| 0.827917
| 0.803864
| 0.773669
| 0.773669
| 0.749616
| 0.749616
| 0
| 0.05183
| 0.182251
| 22,886
| 401
| 87
| 57.072319
| 0.783436
| 0.00804
| 0
| 0.722078
| 1
| 0
| 0.021107
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| false
| 0
| 0.005195
| 0.007792
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6b3856bc49d3efcc0145e3ff21557270310bf2f3
| 13,651
|
py
|
Python
|
kale/embed/video_se_res3d.py
|
19valentin99/pykale
|
4bfc7d49b255d00dbcba39dd9aaf787a8ae0b4ad
|
[
"MIT"
] | null | null | null |
kale/embed/video_se_res3d.py
|
19valentin99/pykale
|
4bfc7d49b255d00dbcba39dd9aaf787a8ae0b4ad
|
[
"MIT"
] | null | null | null |
kale/embed/video_se_res3d.py
|
19valentin99/pykale
|
4bfc7d49b255d00dbcba39dd9aaf787a8ae0b4ad
|
[
"MIT"
] | null | null | null |
# =============================================================================
# Author: Xianyuan Liu, xianyuan.liu@sheffield.ac.uk or xianyuan.liu@outlook.com
# =============================================================================
"""Add SELayers to MC3_18, R3D_18, R2plus1D_18"""
from torch.hub import load_state_dict_from_url
from kale.embed.video_res3d import (
BasicBlock,
BasicFLowStem,
BasicStem,
Conv2Plus1D,
Conv3DNoTemporal,
Conv3DSimple,
R2Plus1dFlowStem,
R2Plus1dStem,
VideoResNet,
)
from kale.embed.video_selayer import get_selayer, SELayerC, SELayerT
model_urls = {
"r3d_18": "https://download.pytorch.org/models/r3d_18-b3b3357e.pth",
"mc3_18": "https://download.pytorch.org/models/mc3_18-a90a0ba3.pth",
"r2plus1d_18": "https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth",
}
def _se_video_resnet_rgb(arch, attention, pretrained=False, progress=True, **kwargs):
"""Add the several SELayers to MC3_18, R3D_18, R2plus1D_18 for RGB input.
Args:
arch (string): the name of basic architecture. (Options: ["r3d_18", "mc3_18" and "r2plus1d_18"])
attention (string): the name of the SELayer.
(Options: ["SELayerC", "SELayerT", "SELayerCoC", "SELayerMC", "SELayerMAC", "SELayerCT", and "SELayerTC"])
pretrained (bool): choose if pretrained parameters are used. (Default: False)
progress (bool, optional): whether or not to display a progress bar to stderr. (Default: True)
Returns:
model (VideoResNet): 3D convolution-based model with SELayers.
"""
model = VideoResNet(**kwargs)
temporal_length = 16
# Add channel-wise SELayer
if attention in ["SELayerC", "SELayerCoC", "SELayerMC", "SELayerMAC"]:
se_layer = get_selayer(attention)
model.layer1._modules["0"].add_module(attention, se_layer(64))
model.layer1._modules["1"].add_module(attention, se_layer(64))
model.layer2._modules["0"].add_module(attention, se_layer(128))
model.layer2._modules["1"].add_module(attention, se_layer(128))
model.layer3._modules["0"].add_module(attention, se_layer(256))
model.layer3._modules["1"].add_module(attention, se_layer(256))
model.layer4._modules["0"].add_module(attention, se_layer(512))
model.layer4._modules["1"].add_module(attention, se_layer(512))
# Add temporal-wise SELayer
elif attention == "SELayerT":
se_layer = get_selayer(attention)
model.layer1._modules["0"].add_module(attention, se_layer(temporal_length))
model.layer1._modules["1"].add_module(attention, se_layer(temporal_length))
model.layer2._modules["0"].add_module(attention, se_layer(temporal_length // 2))
model.layer2._modules["1"].add_module(attention, se_layer(temporal_length // 2))
model.layer3._modules["0"].add_module(attention, se_layer(temporal_length // 4))
model.layer3._modules["1"].add_module(attention, se_layer(temporal_length // 4))
# Add channel-temporal-wise SELayer
elif attention == "SELayerCT":
model.layer1._modules["0"].add_module(attention + "c", SELayerC(64))
model.layer1._modules["1"].add_module(attention + "c", SELayerC(64))
model.layer2._modules["0"].add_module(attention + "c", SELayerC(128))
model.layer2._modules["1"].add_module(attention + "c", SELayerC(128))
model.layer3._modules["0"].add_module(attention + "c", SELayerC(256))
model.layer3._modules["1"].add_module(attention + "c", SELayerC(256))
model.layer4._modules["0"].add_module(attention + "c", SELayerC(512))
model.layer4._modules["1"].add_module(attention + "c", SELayerC(512))
model.layer1._modules["0"].add_module(attention + "t", SELayerT(temporal_length))
model.layer1._modules["1"].add_module(attention + "t", SELayerT(temporal_length))
model.layer2._modules["0"].add_module(attention + "t", SELayerT(temporal_length // 2))
model.layer2._modules["1"].add_module(attention + "t", SELayerT(temporal_length // 2))
model.layer3._modules["0"].add_module(attention + "t", SELayerT(temporal_length // 4))
model.layer3._modules["1"].add_module(attention + "t", SELayerT(temporal_length // 4))
# Add temporal-channel-wise SELayer
elif attention == "SELayerTC":
model.layer1._modules["0"].add_module(attention + "t", SELayerT(temporal_length))
model.layer1._modules["1"].add_module(attention + "t", SELayerT(temporal_length))
model.layer2._modules["0"].add_module(attention + "t", SELayerT(temporal_length // 2))
model.layer2._modules["1"].add_module(attention + "t", SELayerT(temporal_length // 2))
model.layer3._modules["0"].add_module(attention + "t", SELayerT(temporal_length // 4))
model.layer3._modules["1"].add_module(attention + "t", SELayerT(temporal_length // 4))
model.layer1._modules["0"].add_module(attention + "c", SELayerC(64))
model.layer1._modules["1"].add_module(attention + "c", SELayerC(64))
model.layer2._modules["0"].add_module(attention + "c", SELayerC(128))
model.layer2._modules["1"].add_module(attention + "c", SELayerC(128))
model.layer3._modules["0"].add_module(attention + "c", SELayerC(256))
model.layer3._modules["1"].add_module(attention + "c", SELayerC(256))
model.layer4._modules["0"].add_module(attention + "c", SELayerC(512))
model.layer4._modules["1"].add_module(attention + "c", SELayerC(512))
else:
raise ValueError("Wrong MODEL.ATTENTION. Current:{}".format(attention))
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
def _se_video_resnet_flow(arch, attention, pretrained=False, progress=True, **kwargs):
"""Add the several SELayers to MC3_18, R3D_18, R2plus1D_18 for optical flow input."""
model = VideoResNet(**kwargs)
temporal_length = 16
# Add channel-wise SELayer
if attention in ["SELayerC", "SELayerCoC", "SELayerMC", "SELayerMAC"]:
se_layer = get_selayer(attention)
model.layer1._modules["0"].add_module(attention, se_layer(64))
model.layer1._modules["1"].add_module(attention, se_layer(64))
model.layer2._modules["0"].add_module(attention, se_layer(128))
model.layer2._modules["1"].add_module(attention, se_layer(128))
model.layer3._modules["0"].add_module(attention, se_layer(256))
model.layer3._modules["1"].add_module(attention, se_layer(256))
model.layer4._modules["0"].add_module(attention, se_layer(512))
model.layer4._modules["1"].add_module(attention, se_layer(512))
# Add temporal-wise SELayer
elif attention == "SELayerT":
se_layer = get_selayer(attention)
model.layer1._modules["0"].add_module(attention, se_layer(temporal_length // 2))
model.layer1._modules["1"].add_module(attention, se_layer(temporal_length // 2))
model.layer2._modules["0"].add_module(attention, se_layer(temporal_length // 4))
model.layer2._modules["1"].add_module(attention, se_layer(temporal_length // 4))
# Add channel-temporal-wise SELayer
elif attention == "SELayerCT":
model.layer1._modules["0"].add_module(attention + "c", SELayerC(64))
model.layer1._modules["1"].add_module(attention + "c", SELayerC(64))
model.layer2._modules["0"].add_module(attention + "c", SELayerC(128))
model.layer2._modules["1"].add_module(attention + "c", SELayerC(128))
model.layer3._modules["0"].add_module(attention + "c", SELayerC(256))
model.layer3._modules["1"].add_module(attention + "c", SELayerC(256))
model.layer4._modules["0"].add_module(attention + "c", SELayerC(512))
model.layer4._modules["1"].add_module(attention + "c", SELayerC(512))
model.layer1._modules["0"].add_module(attention + "t", SELayerT(temporal_length // 2))
model.layer1._modules["1"].add_module(attention + "t", SELayerT(temporal_length // 2))
model.layer2._modules["0"].add_module(attention + "t", SELayerT(temporal_length // 4))
model.layer2._modules["1"].add_module(attention + "t", SELayerT(temporal_length // 4))
# Add temporal-channel-wise SELayer
elif attention == "SELayerTC":
model.layer1._modules["0"].add_module(attention + "t", SELayerT(temporal_length // 2))
model.layer1._modules["1"].add_module(attention + "t", SELayerT(temporal_length // 2))
model.layer2._modules["0"].add_module(attention + "t", SELayerT(temporal_length // 4))
model.layer2._modules["1"].add_module(attention + "t", SELayerT(temporal_length // 4))
model.layer1._modules["0"].add_module(attention + "c", SELayerC(64))
model.layer1._modules["1"].add_module(attention + "c", SELayerC(64))
model.layer2._modules["0"].add_module(attention + "c", SELayerC(128))
model.layer2._modules["1"].add_module(attention + "c", SELayerC(128))
model.layer3._modules["0"].add_module(attention + "c", SELayerC(256))
model.layer3._modules["1"].add_module(attention + "c", SELayerC(256))
model.layer4._modules["0"].add_module(attention + "c", SELayerC(512))
model.layer4._modules["1"].add_module(attention + "c", SELayerC(512))
else:
raise ValueError("Wrong MODEL.ATTENTION. Current:{}".format(attention))
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
state_dict.pop("stem.0.weight")
model.load_state_dict(state_dict, strict=False)
return model
def se_r3d_18_rgb(attention, pretrained=False, progress=True, **kwargs):
return _se_video_resnet_rgb(
"r3d_18",
attention,
pretrained,
progress,
block=BasicBlock,
conv_makers=[Conv3DSimple] * 4,
layers=[2, 2, 2, 2],
stem=BasicStem,
**kwargs,
)
def se_r3d_18_flow(attention, pretrained=False, progress=True, **kwargs):
return _se_video_resnet_flow(
"r3d_18",
attention,
pretrained,
progress,
block=BasicBlock,
conv_makers=[Conv3DSimple] * 4,
layers=[2, 2, 2, 2],
stem=BasicFLowStem,
**kwargs,
)
def se_mc3_18_rgb(attention, pretrained=False, progress=True, **kwargs):
return _se_video_resnet_rgb(
"mc3_18",
attention,
pretrained,
progress,
block=BasicBlock,
conv_makers=[Conv3DSimple] + [Conv3DNoTemporal] * 3,
layers=[2, 2, 2, 2],
stem=BasicStem,
**kwargs,
)
def se_mc3_18_flow(attention, pretrained=False, progress=True, **kwargs):
return _se_video_resnet_flow(
"mc3_18",
attention,
pretrained,
progress,
block=BasicBlock,
conv_makers=[Conv3DSimple] + [Conv3DNoTemporal] * 3,
layers=[2, 2, 2, 2],
stem=BasicFLowStem,
**kwargs,
)
def se_r2plus1d_18_rgb(attention, pretrained=False, progress=True, **kwargs):
return _se_video_resnet_rgb(
"r2plus1d_18",
attention,
pretrained,
progress,
block=BasicBlock,
conv_makers=[Conv2Plus1D] * 4,
layers=[2, 2, 2, 2],
stem=R2Plus1dStem,
**kwargs,
)
def se_r2plus1d_18_flow(attention, pretrained=False, progress=True, **kwargs):
return _se_video_resnet_flow(
"r2plus1d_18",
attention,
pretrained,
progress,
block=BasicBlock,
conv_makers=[Conv2Plus1D] * 4,
layers=[2, 2, 2, 2],
stem=R2Plus1dFlowStem,
**kwargs,
)
def se_r3d(attention, rgb=False, flow=False, pretrained=False, progress=True):
"""Get R3D_18 models with SELayers for different inputs.
Args:
attention (string): the name of the SELayer.
rgb (bool): choose if RGB model is needed. (Default: False)
flow (bool): choose if optical flow model is needed. (Default: False)
pretrained (bool): choose if pretrained parameters are used. (Default: False)
progress (bool, optional): whether or not to display a progress bar to stderr. (Default: True)
Returns:
models (dictionary): A dictionary contains models for RGB and optical flow.
"""
r3d_rgb = r3d_flow = None
if rgb:
r3d_rgb = se_r3d_18_rgb(attention=attention, pretrained=pretrained, progress=progress)
if flow:
r3d_flow = se_r3d_18_flow(attention=attention, pretrained=pretrained, progress=progress)
models = {"rgb": r3d_rgb, "flow": r3d_flow}
return models
def se_mc3(attention, rgb=False, flow=False, pretrained=False, progress=True):
"""Get MC3_18 models with SELayers for different inputs."""
mc3_rgb = mc3_flow = None
if rgb:
mc3_rgb = se_mc3_18_rgb(attention=attention, pretrained=pretrained, progress=progress)
if flow:
mc3_flow = se_mc3_18_flow(attention=attention, pretrained=pretrained, progress=progress)
models = {"rgb": mc3_rgb, "flow": mc3_flow}
return models
def se_r2plus1d(attention, rgb=False, flow=False, pretrained=False, progress=True):
"""Get R2+1D_18 models with SELayers for different inputs."""
r2plus1d_rgb = r2plus1d_flow = None
if rgb:
r2plus1d_rgb = se_r2plus1d_18_rgb(attention=attention, pretrained=pretrained, progress=progress)
if flow:
r2plus1d_flow = se_r2plus1d_18_flow(attention=attention, pretrained=pretrained, progress=progress)
models = {"rgb": r2plus1d_rgb, "flow": r2plus1d_flow}
return models
| 44.611111
| 118
| 0.662223
| 1,684
| 13,651
| 5.147862
| 0.089074
| 0.080978
| 0.161956
| 0.076479
| 0.885223
| 0.863191
| 0.852463
| 0.831468
| 0.828008
| 0.828008
| 0
| 0.049009
| 0.186873
| 13,651
| 305
| 119
| 44.757377
| 0.731982
| 0.133836
| 0
| 0.730942
| 0
| 0
| 0.050816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049327
| false
| 0
| 0.013453
| 0.026906
| 0.112108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
863685e215b7e235a0bfad1a90884617b0f9bbb6
| 12,762
|
py
|
Python
|
machine/qemu/sources/u-boot/test/py/tests/test_efi_secboot/test_signed.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | 1
|
2021-11-21T19:56:29.000Z
|
2021-11-21T19:56:29.000Z
|
machine/qemu/sources/u-boot/test/py/tests/test_efi_secboot/test_signed.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | null | null | null |
machine/qemu/sources/u-boot/test/py/tests/test_efi_secboot/test_signed.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2019, Linaro Limited
# Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
#
# U-Boot UEFI: Signed Image Authentication Test
"""
This test verifies image authentication for signed images.
"""
import pytest
@pytest.mark.boardspec('sandbox')
@pytest.mark.buildconfigspec('efi_secure_boot')
@pytest.mark.buildconfigspec('cmd_efidebug')
@pytest.mark.buildconfigspec('cmd_fat')
@pytest.mark.buildconfigspec('cmd_nvedit_efi')
@pytest.mark.slow
class TestEfiSignedImage(object):
def test_efi_signed_image_auth1(self, u_boot_console, efi_boot_env):
"""
Test Case 1 - Secure boot is not in force
"""
u_boot_console.restart_uboot()
disk_img = efi_boot_env
with u_boot_console.log.section('Test Case 1a'):
# Test Case 1a, run signed image if no PK
output = u_boot_console.run_command_list([
'host bind 0 %s' % disk_img,
'efidebug boot add 1 HELLO1 host 0:1 /helloworld.efi.signed ""',
'efidebug boot next 1',
'bootefi bootmgr'])
assert 'Hello, world!' in ''.join(output)
with u_boot_console.log.section('Test Case 1b'):
# Test Case 1b, run unsigned image if no PK
output = u_boot_console.run_command_list([
'efidebug boot add 2 HELLO2 host 0:1 /helloworld.efi ""',
'efidebug boot next 2',
'bootefi bootmgr'])
assert 'Hello, world!' in ''.join(output)
def test_efi_signed_image_auth2(self, u_boot_console, efi_boot_env):
"""
Test Case 2 - Secure boot is in force,
authenticated by db (TEST_db certificate in db)
"""
u_boot_console.restart_uboot()
disk_img = efi_boot_env
with u_boot_console.log.section('Test Case 2a'):
# Test Case 2a, db is not yet installed
output = u_boot_console.run_command_list([
'host bind 0 %s' % disk_img,
'fatload host 0:1 4000000 KEK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK',
'fatload host 0:1 4000000 PK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot add 1 HELLO1 host 0:1 /helloworld.efi.signed ""',
'efidebug boot next 1',
'efidebug test bootmgr'])
assert('\'HELLO1\' failed' in ''.join(output))
assert('efi_start_image() returned: 26' in ''.join(output))
output = u_boot_console.run_command_list([
'efidebug boot add 2 HELLO2 host 0:1 /helloworld.efi ""',
'efidebug boot next 2',
'efidebug test bootmgr'])
assert '\'HELLO2\' failed' in ''.join(output)
assert 'efi_start_image() returned: 26' in ''.join(output)
with u_boot_console.log.section('Test Case 2b'):
# Test Case 2b, authenticated by db
output = u_boot_console.run_command_list([
'fatload host 0:1 4000000 db.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize db'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot next 2',
'efidebug test bootmgr'])
assert '\'HELLO2\' failed' in ''.join(output)
assert 'efi_start_image() returned: 26' in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot next 1',
'bootefi bootmgr'])
assert 'Hello, world!' in ''.join(output)
def test_efi_signed_image_auth3(self, u_boot_console, efi_boot_env):
"""
Test Case 3 - rejected by dbx (TEST_db certificate in dbx)
"""
u_boot_console.restart_uboot()
disk_img = efi_boot_env
with u_boot_console.log.section('Test Case 3a'):
# Test Case 3a, rejected by dbx
output = u_boot_console.run_command_list([
'host bind 0 %s' % disk_img,
'fatload host 0:1 4000000 db.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx',
'fatload host 0:1 4000000 KEK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK',
'fatload host 0:1 4000000 PK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot add 1 HELLO host 0:1 /helloworld.efi.signed ""',
'efidebug boot next 1',
'efidebug test bootmgr'])
assert '\'HELLO\' failed' in ''.join(output)
assert 'efi_start_image() returned: 26' in ''.join(output)
with u_boot_console.log.section('Test Case 3b'):
# Test Case 3b, rejected by dbx even if db allows
output = u_boot_console.run_command_list([
'fatload host 0:1 4000000 db.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize db'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot next 1',
'efidebug test bootmgr'])
assert '\'HELLO\' failed' in ''.join(output)
assert 'efi_start_image() returned: 26' in ''.join(output)
def test_efi_signed_image_auth4(self, u_boot_console, efi_boot_env):
"""
Test Case 4 - revoked by dbx (digest of TEST_db certificate in dbx)
"""
u_boot_console.restart_uboot()
disk_img = efi_boot_env
with u_boot_console.log.section('Test Case 4'):
# Test Case 4, rejected by dbx
output = u_boot_console.run_command_list([
'host bind 0 %s' % disk_img,
'fatload host 0:1 4000000 dbx_hash.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx',
'fatload host 0:1 4000000 db.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize db',
'fatload host 0:1 4000000 KEK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK',
'fatload host 0:1 4000000 PK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot add 1 HELLO host 0:1 /helloworld.efi.signed ""',
'efidebug boot next 1',
'efidebug test bootmgr'])
assert '\'HELLO\' failed' in ''.join(output)
assert 'efi_start_image() returned: 26' in ''.join(output)
def test_efi_signed_image_auth5(self, u_boot_console, efi_boot_env):
"""
Test Case 5 - multiple signatures
one signed with TEST_db, and
one signed with TEST_db1
"""
u_boot_console.restart_uboot()
disk_img = efi_boot_env
with u_boot_console.log.section('Test Case 5a'):
# Test Case 5a, authenticated even if only one of signatures
# is verified
output = u_boot_console.run_command_list([
'host bind 0 %s' % disk_img,
'fatload host 0:1 4000000 db.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize db',
'fatload host 0:1 4000000 KEK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK',
'fatload host 0:1 4000000 PK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot add 1 HELLO host 0:1 /helloworld.efi.signed_2sigs ""',
'efidebug boot next 1',
'efidebug test bootmgr'])
assert 'Hello, world!' in ''.join(output)
with u_boot_console.log.section('Test Case 5b'):
# Test Case 5b, authenticated if both signatures are verified
output = u_boot_console.run_command_list([
'fatload host 0:1 4000000 db1.auth',
'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot next 1',
'efidebug test bootmgr'])
assert 'Hello, world!' in ''.join(output)
with u_boot_console.log.section('Test Case 5c'):
# Test Case 5c, not rejected if one of signatures (digest of
# certificate) is revoked
output = u_boot_console.run_command_list([
'fatload host 0:1 4000000 dbx_hash.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot next 1',
'efidebug test bootmgr'])
assert 'Hello, world!' in ''.join(output)
with u_boot_console.log.section('Test Case 5d'):
# Test Case 5d, rejected if both of signatures are revoked
output = u_boot_console.run_command_list([
'fatload host 0:1 4000000 dbx_hash1.auth',
'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize dbx'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot next 1',
'efidebug test bootmgr'])
assert '\'HELLO\' failed' in ''.join(output)
assert 'efi_start_image() returned: 26' in ''.join(output)
def test_efi_signed_image_auth6(self, u_boot_console, efi_boot_env):
"""
Test Case 6 - using digest of signed image in database
"""
u_boot_console.restart_uboot()
disk_img = efi_boot_env
with u_boot_console.log.section('Test Case 6a'):
# Test Case 6a, verified by image's digest in db
output = u_boot_console.run_command_list([
'host bind 0 %s' % disk_img,
'fatload host 0:1 4000000 db_hello_signed.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize db',
'fatload host 0:1 4000000 KEK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK',
'fatload host 0:1 4000000 PK.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot add 1 HELLO host 0:1 /helloworld.efi.signed ""',
'efidebug boot next 1',
'bootefi bootmgr'])
assert 'Hello, world!' in ''.join(output)
with u_boot_console.log.section('Test Case 6b'):
# Test Case 6b, rejected by TEST_db certificate in dbx
output = u_boot_console.run_command_list([
'fatload host 0:1 4000000 dbx_db.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot next 1',
'efidebug test bootmgr'])
assert '\'HELLO\' failed' in ''.join(output)
assert 'efi_start_image() returned: 26' in ''.join(output)
with u_boot_console.log.section('Test Case 6c'):
# Test Case 6c, rejected by image's digest in dbx
output = u_boot_console.run_command_list([
'fatload host 0:1 4000000 db.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize db',
'fatload host 0:1 4000000 dbx_hello_signed.auth',
'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx'])
assert 'Failed to set EFI variable' not in ''.join(output)
output = u_boot_console.run_command_list([
'efidebug boot next 1',
'efidebug test bootmgr'])
assert '\'HELLO\' failed' in ''.join(output)
assert 'efi_start_image() returned: 26' in ''.join(output)
| 49.084615
| 85
| 0.56872
| 1,660
| 12,762
| 4.210241
| 0.090964
| 0.039348
| 0.092717
| 0.072113
| 0.843898
| 0.834168
| 0.833739
| 0.833739
| 0.830877
| 0.801688
| 0
| 0.056794
| 0.325341
| 12,762
| 259
| 86
| 49.274131
| 0.754936
| 0.106331
| 0
| 0.807107
| 0
| 0
| 0.365136
| 0.014177
| 0
| 0
| 0
| 0
| 0.187817
| 1
| 0.030457
| false
| 0
| 0.005076
| 0
| 0.040609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8645021e159c520c4506daa7f0358ac68d313405
| 199
|
py
|
Python
|
python/testData/inspections/PyUnresolvedReferencesInspection/NamespacePackageNameDoesntMatchFileName/a.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection/NamespacePackageNameDoesntMatchFileName/a.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2022-02-19T09:45:05.000Z
|
2022-02-27T20:32:55.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection/NamespacePackageNameDoesntMatchFileName/a.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from google.protobuf import service
from <error descr="Unresolved reference 'foo'">foo</error>.bar import <error descr="Unresolved reference 'baz'">baz</error>
print(service.Service)
print(baz.Baz)
| 33.166667
| 123
| 0.773869
| 28
| 199
| 5.5
| 0.464286
| 0.12987
| 0.25974
| 0.376623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085427
| 199
| 5
| 124
| 39.8
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.261307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
8645cf916a3a7c936c056dce9ca699f0a99d84ad
| 19
|
py
|
Python
|
Alterstep/AS_BIoT_CircuitPyton/BOKRA_4RO/bokra_4ro.py
|
alsor62/Adafruit_CircuitPython_Bundle
|
c40f8ec11215cebc23cf36d4eb4432086c8a764d
|
[
"MIT"
] | null | null | null |
Alterstep/AS_BIoT_CircuitPyton/BOKRA_4RO/bokra_4ro.py
|
alsor62/Adafruit_CircuitPython_Bundle
|
c40f8ec11215cebc23cf36d4eb4432086c8a764d
|
[
"MIT"
] | null | null | null |
Alterstep/AS_BIoT_CircuitPyton/BOKRA_4RO/bokra_4ro.py
|
alsor62/Adafruit_CircuitPython_Bundle
|
c40f8ec11215cebc23cf36d4eb4432086c8a764d
|
[
"MIT"
] | null | null | null |
import mcp23008
| 3.8
| 15
| 0.736842
| 2
| 19
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.357143
| 0.263158
| 19
| 4
| 16
| 4.75
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
8672293c0262633037750b8e4e078b79dc3eb880
| 10,642
|
py
|
Python
|
ivy_tests/test_core/test_reductions.py
|
djl11/ivy
|
209f74b5a1a82ca69ad712788ae0469c3f8614d9
|
[
"Apache-2.0"
] | null | null | null |
ivy_tests/test_core/test_reductions.py
|
djl11/ivy
|
209f74b5a1a82ca69ad712788ae0469c3f8614d9
|
[
"Apache-2.0"
] | null | null | null |
ivy_tests/test_core/test_reductions.py
|
djl11/ivy
|
209f74b5a1a82ca69ad712788ae0469c3f8614d9
|
[
"Apache-2.0"
] | null | null | null |
"""
Collection of tests for unified reduction functions
"""
# global
import pytest
import numpy as np
# local
import ivy
import ivy.backends.numpy
import ivy_tests.helpers as helpers
# reduce_sum
@pytest.mark.parametrize(
"x", [[1., 2., 3.], [[1., 2., 3.]]])
@pytest.mark.parametrize(
"axis", [None, 0, -1, (0,), (-1,)])
@pytest.mark.parametrize(
"kd", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reduce_sum(x, axis, kd, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.reduce_sum(x, axis, kd)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [1]*len(x.shape) if kd else []
else:
axis_ = [axis] if isinstance(axis, int) else axis
axis_ = [item % len(x.shape) for item in axis_]
expected_shape = list(x.shape)
if kd:
expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]
else:
[expected_shape.pop(item) for item in axis_]
expected_shape = [1] if expected_shape == [] else expected_shape
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.reduce_sum, x), ivy.backends.numpy.reduce_sum(ivy.to_numpy(x)))
# compilation test
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.reduce_sum)
# reduce_prod
@pytest.mark.parametrize(
"x", [[1., 2., 3.], [[1., 2., 3.]]])
@pytest.mark.parametrize(
"axis", [None, 0, -1, (0,), (-1,)])
@pytest.mark.parametrize(
"kd", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reduce_prod(x, axis, kd, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.reduce_prod(x, axis, kd)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [1]*len(x.shape) if kd else []
else:
axis_ = [axis] if isinstance(axis, int) else axis
axis_ = [item % len(x.shape) for item in axis_]
expected_shape = list(x.shape)
if kd:
expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]
else:
[expected_shape.pop(item) for item in axis_]
expected_shape = [1] if expected_shape == [] else expected_shape
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.reduce_prod, x), ivy.backends.numpy.reduce_prod(ivy.to_numpy(x)))
# compilation test
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.reduce_prod)
# reduce_mean
@pytest.mark.parametrize(
"x", [[1., 2., 3.], [[1., 2., 3.]]])
@pytest.mark.parametrize(
"axis", [None, 0, -1, (0,), (-1,)])
@pytest.mark.parametrize(
"kd", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reduce_mean(x, axis, kd, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.reduce_mean(x, axis, kd)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [1]*len(x.shape) if kd else []
else:
axis_ = [axis] if isinstance(axis, int) else axis
axis_ = [item % len(x.shape) for item in axis_]
expected_shape = list(x.shape)
if kd:
expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]
else:
[expected_shape.pop(item) for item in axis_]
expected_shape = [1] if expected_shape == [] else expected_shape
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.reduce_mean, x), ivy.backends.numpy.reduce_mean(ivy.to_numpy(x)))
# compilation test
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.reduce_mean)
# reduce_var
@pytest.mark.parametrize(
"x", [[1., 2., 3.], [[1., 2., 3.]]])
@pytest.mark.parametrize(
"axis", [None, 0, -1, (0,), (-1,)])
@pytest.mark.parametrize(
"kd", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reduce_var(x, axis, kd, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.reduce_var(x, axis, kd)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [1]*len(x.shape) if kd else []
else:
axis_ = [axis] if isinstance(axis, int) else axis
axis_ = [item % len(x.shape) for item in axis_]
expected_shape = list(x.shape)
if kd:
expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]
else:
[expected_shape.pop(item) for item in axis_]
expected_shape = [1] if expected_shape == [] else expected_shape
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.reduce_var, x), ivy.backends.numpy.reduce_var(ivy.to_numpy(x)))
# compilation test
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.reduce_var)
# reduce_std
@pytest.mark.parametrize(
"x", [[1., 2., 3.], [[1., 2., 3.]]])
@pytest.mark.parametrize(
"axis", [None, 0, -1, (0,), (-1,)])
@pytest.mark.parametrize(
"kd", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reduce_std(x, axis, kd, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.reduce_std(x, axis, kd)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [1]*len(x.shape) if kd else []
else:
axis_ = [axis] if isinstance(axis, int) else axis
axis_ = [item % len(x.shape) for item in axis_]
expected_shape = list(x.shape)
if kd:
expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]
else:
[expected_shape.pop(item) for item in axis_]
expected_shape = [1] if expected_shape == [] else expected_shape
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.reduce_std, x), ivy.backends.numpy.reduce_var(ivy.to_numpy(x)) ** 0.5)
# compilation test
if call is helpers.torch_call:
# PyTorch cannot yet compile ivy.core only functions, without a direct backend implementation
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.reduce_std)
# reduce_min
@pytest.mark.parametrize(
"x", [[1., 2., 3.], [[1., 2., 3.]]])
@pytest.mark.parametrize(
"axis", [None, 0, -1, (0,), (-1,)])
@pytest.mark.parametrize(
"kd", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reduce_min(x, axis, kd, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.reduce_min(x, axis, kd)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [1]*len(x.shape) if kd else []
else:
axis_ = [axis] if isinstance(axis, int) else axis
axis_ = [item % len(x.shape) for item in axis_]
expected_shape = list(x.shape)
if kd:
expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]
else:
[expected_shape.pop(item) for item in axis_]
expected_shape = [1] if expected_shape == [] else expected_shape
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.reduce_min, x), ivy.backends.numpy.reduce_min(ivy.to_numpy(x)))
# compilation test
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.reduce_min)
# reduce_max
@pytest.mark.parametrize(
"x", [[1., 2., 3.], [[1., 2., 3.]]])
@pytest.mark.parametrize(
"axis", [None, 0, -1, (0,), (-1,)])
@pytest.mark.parametrize(
"kd", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reduce_max(x, axis, kd, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.reduce_max(x, axis, kd)
# type test
assert ivy.is_array(ret)
# cardinality test
if axis is None:
expected_shape = [1]*len(x.shape) if kd else []
else:
axis_ = [axis] if isinstance(axis, int) else axis
axis_ = [item % len(x.shape) for item in axis_]
expected_shape = list(x.shape)
if kd:
expected_shape = [1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape)]
else:
[expected_shape.pop(item) for item in axis_]
expected_shape = [1] if expected_shape == [] else expected_shape
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.reduce_max, x), ivy.backends.numpy.reduce_max(ivy.to_numpy(x)))
# compilation test
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.reduce_max)
# einsum
@pytest.mark.parametrize(
"eq_n_op_n_shp", [("ii", (np.arange(25).reshape(5, 5),), (1,)),
("ii->i", (np.arange(25).reshape(5, 5),), (5,)),
("ij,j", (np.arange(25).reshape(5, 5), np.arange(5)), (5,))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einsum(eq_n_op_n_shp, dtype, tensor_fn, dev, call):
# smoke test
eq, operands, true_shape = eq_n_op_n_shp
operands = [tensor_fn(op, dtype, dev) for op in operands]
ret = ivy.einsum(eq, *operands)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == true_shape
# value test
assert np.allclose(call(ivy.einsum, eq, *operands),
ivy.backends.numpy.einsum(eq, *[ivy.to_numpy(op) for op in operands]))
# compilation test
if call is helpers.torch_call:
# torch.jit functions can't take variable number of arguments
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.einsum)
| 35.006579
| 112
| 0.627608
| 1,544
| 10,642
| 4.177461
| 0.069301
| 0.126977
| 0.123721
| 0.021705
| 0.902636
| 0.873488
| 0.864651
| 0.860155
| 0.842326
| 0.835814
| 0
| 0.015183
| 0.226367
| 10,642
| 303
| 113
| 35.122112
| 0.76825
| 0.078087
| 0
| 0.808696
| 0
| 0
| 0.024713
| 0
| 0
| 0
| 0
| 0
| 0.13913
| 1
| 0.034783
| false
| 0
| 0.021739
| 0
| 0.065217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86b1b73b108437075eca6748f0dbc842664aa051
| 2,709
|
py
|
Python
|
QBG/AutoFormula/operations/two_num.py
|
GYMS-PKU/Daily-Frequency-Quant
|
808eda9930efecff04ecf98abf617404cadd0003
|
[
"MIT"
] | 3
|
2021-11-21T04:35:04.000Z
|
2022-03-04T09:19:53.000Z
|
QBG/AutoFormula/operations/two_num.py
|
GYMS-PKU/Daily-Frequency-Quant
|
808eda9930efecff04ecf98abf617404cadd0003
|
[
"MIT"
] | null | null | null |
QBG/AutoFormula/operations/two_num.py
|
GYMS-PKU/Daily-Frequency-Quant
|
808eda9930efecff04ecf98abf617404cadd0003
|
[
"MIT"
] | 5
|
2021-10-03T00:00:22.000Z
|
2022-03-07T09:02:00.000Z
|
# Copyright (c) 2021 Dai HBG
"""
该代码定义2_num_num型运算符
"""
import numpy as np
def tsregres(a, b, num): # 回溯num天时序回归残差
if len(a.shape) == 2:
s = np.zeros(a.shape)
tmp_a = np.zeros((num, a.shape[0], a.shape[1]))
tmp_a[0] = a.copy()
tmp_b = np.zeros((num, b.shape[0], b.shape[1]))
tmp_b[0] = b.copy()
for i in range(1, num):
tmp_a[i, i:, :] = a[:-i] # 第i行存放delay i天的数据
tmp_b[i, i:, :] = b[:-i] # 第i行存放delay i天的数据
tmp_a -= np.nanmean(tmp_a, axis=0)
tmp_b -= np.nanmean(tmp_b, axis=0)
beta = np.nansum(tmp_a * tmp_b, axis=0) / np.nansum(tmp_a ** 2, axis=0)
s[num - 1:] = (tmp_b[0] - beta * tmp_a[0])[num-1:]
return s
elif len(a.shape) == 3:
s = np.zeros(a.shape)
tmp_a = np.zeros((num, a.shape[0], a.shape[1], a.shape[2]))
tmp_a[0] = a.copy()
tmp_b = np.zeros((num, b.shape[0], b.shape[1], b.shape[2]))
tmp_b[0] = b.copy()
for i in range(1, num):
tmp_a[i, i:, :, :] = a[:-i] # 第i行存放delay i天的数据
tmp_b[i, i:, :, :] = b[:-i] # 第i行存放delay i天的数据
tmp_a -= np.nanmean(tmp_a, axis=0)
tmp_b -= np.nanmean(tmp_b, axis=0)
beta = np.nansum(tmp_a * tmp_b, axis=0) / np.nansum(tmp_a ** 2, axis=0)
s[num - 1:] = (tmp_b[0] - beta * tmp_a[0])[num-1:]
return s
def tscorr(a, b, num): # 日频的时序相关性
if len(a.shape) == 2:
s = np.zeros(a.shape)
tmp_a = np.zeros((num, a.shape[0], a.shape[1]))
tmp_a[0] = a.copy()
tmp_b = np.zeros((num, b.shape[0], b.shape[1]))
tmp_b[0] = b.copy()
for i in range(1, num):
tmp_a[i, i:, :] = a[:-i] # 第i行存放delay i天的数据
tmp_b[i, i:, :] = b[:-i] # 第i行存放delay i天的数据
tmp_a -= np.nanmean(tmp_a, axis=0)
tmp_b -= np.nanmean(tmp_b, axis=0)
s[num - 1:] = (np.nanmean(tmp_a * tmp_b,
axis=0) / (np.nanstd(tmp_a, axis=0) * np.nanstd(tmp_b, axis=0)))[num - 1:]
return s
elif len(a.shape) == 3:
s = np.zeros(a.shape)
tmp_a = np.zeros((num, a.shape[0], a.shape[1], a.shape[2]))
tmp_a[0] = a.copy()
tmp_b = np.zeros((num, b.shape[0], b.shape[1], b.shape[2]))
tmp_b[0] = b.copy()
for i in range(1, num):
tmp_a[i, i:, :, :] = a[:-i] # 第i行存放delay i天的数据
tmp_b[i, i:, :, :] = b[:-i] # 第i行存放delay i天的数据
tmp_a -= np.nanmean(tmp_a, axis=0)
tmp_b -= np.nanmean(tmp_b, axis=0)
s[num - 1:] = (np.nanmean(tmp_a * tmp_b,
axis=0) / (np.nanstd(tmp_a, axis=0) * np.nanstd(tmp_b, axis=0)))[num - 1:]
return s
| 37.625
| 108
| 0.483942
| 470
| 2,709
| 2.661702
| 0.091489
| 0.095923
| 0.095923
| 0.071942
| 0.919265
| 0.919265
| 0.919265
| 0.919265
| 0.919265
| 0.919265
| 0
| 0.039374
| 0.315615
| 2,709
| 71
| 109
| 38.15493
| 0.635383
| 0.075305
| 0
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.016949
| 0
| 0.118644
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86d343cec36a84e2e355610b15bfd467ec1af699
| 8,182
|
py
|
Python
|
MOHSIN.py
|
MohSinTheLegend/Dark-Pak
|
2ecd0492fa71ec56bcfb730897e48c08223edc8d
|
[
"Apache-2.0"
] | 2
|
2021-04-01T10:20:32.000Z
|
2021-12-22T01:20:23.000Z
|
MOHSIN.py
|
MohSinTheLegend/Dark-Pak
|
2ecd0492fa71ec56bcfb730897e48c08223edc8d
|
[
"Apache-2.0"
] | null | null | null |
MOHSIN.py
|
MohSinTheLegend/Dark-Pak
|
2ecd0492fa71ec56bcfb730897e48c08223edc8d
|
[
"Apache-2.0"
] | null | null | null |
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJztPE1zG0d2DYCfkChRki1an27ZK5mSJXyDH5K5WpAERUYUwBqSogUVizXENMkhMTPQzMAiFTFRxa5dO6dVSvZuvHZ5K4etTfILUnvb3HJIcsgttRftIZWcfHDtMUm/NzMABhh8cRWXIxkEBz398fq91++9ft3Tb/LE/nTz/x/xf+M/AoRI/OsjBUJy5bSP5HxO2k9yficdILmAk+4iuS4n3U1y3U66h+R6nHQvyfU66T6S63PS/STX76SDJBd00odI7hBhhGwfJpKffOgjvt0/IWyASAG4mV59j+SOENZFto8SiXfeTT7k2A0SdoxIvM9euGU+snacsEFya5Ynt4+T7RPkQ0J8D/6OrOwNOh29RnKvkRX1POlir5OdINFP+/jH7rkPe1Z95P1Kg5Mkd5I3GCs3YFUN+rEB85PtISIFrRur5FAZ1HeIy0h8NV2QPkwKbxDlFMmdIj64HyCF00Q5Q3JnrPsjUFc5S3JnOXHngM1IJmfxeSIdtW5Oku2zwOzcm4S9SbYpYResAn7zFoHit8n2D6CGNGgxyueTYmTTB9XXLhIpXpV+nXzEIV0i0klMvEOkIUwME+kNTFwm0ilMXCHSaZJ7l2wSIl6Fa+4apkNE4viHicSxjjhidQ4xks5z+ZDeJLkokSjJxYh0geTiRHqL5BJEepvkkmT3x4QlIb3jJ/pXATZCpB9A29woYfwbx/y/xPyLmD9GGP8mML8UkC5BtSEuhZZYvGP1OwygH3wRkC6DgFooXcGiB6cD0rsemSvSVaRynEjXMHGdSCFM3CBSGBPvESmCiQnCfkikKKLw+wC7jpKv4kgvDie4wsv/wz8Z4xBP6gq9pm/QkLlryoPv8KJb3wwS+c8/+zkxungx5Js+nhCNk/xalIsxKquGKRYKVGH5LVGVHzHjtdoinT0oMcOUoaUxxC9LW0yl5l6RXafFPXNLU2N0XTOMYahgBrGCzkRpQdMKVt5RfpnSVJXlTVlT07qu6VZBL79M6tpDg+kmYFgyN8bMPp5QxN01U1aYAeCWefG11CZTTUPgt9ki08XweGgsQodTqqRrsnSDYia9I6tyOB4LRUKxWDIRHkuG6PINKkuX6YLOSdDCsVA0FkrE4vQu0w2OS5jfRkewlxL0ImIv5zly02LhA3knHA2NhHg/87Ja2r0BwOweaSKUCMVu0Mz7ySSdLMkFKXw7u5RMRkYu0/szk6lMeGYykbrBU3fD0QiHATiNhEbGeNbk3XAiOR5JREci/G76TvhPJaYasrk3EQ9Frj6UJXNrIhoZi1zdYvLmljkRHY9F9nnN+amwbK7NLfGk4AIxJYQXNMNkd7R1ucB4xp2ZsGiUDOhr2kktZMJ5TQltiHm2rmk7oR3RFFURELgbTi0uL67lIpHUNL9fvBtOhgBsdiEcBeip8O7YyHVRV5i4Ll/7YFS8sZp3ph0YxikQDJAoLpO3Zk0u7NyE+FFQuZlZHPbzoozRi4IhqjtGaDgAgw/ZHGtIG3sGjj/blc1hAFu5GFBN2jR7+M8OK5RE/TLkdmHn3b48IAA3fQ4iixYi+z6Yb/b9ZPc64ShNr8bIfoA89sPE89hHTESRzzgm3nLF5FPPG3yyOTX0OECGRnnDByfICsfqMc5ai8OAQAYlVtwypUfbeRNQQH26IEMSFWS428ncRZR1UZU0BZtBUlZNJLjAxQxq5pkp7gx3OW3W8foQrxJe5XpOQG0xL+5cg7wg8qGH/w34IhY3AG6vw40/K3NjNwWETq+OA1N4apvYpAFbumDMOL94zknOIBNZA4zg5Sc5V/a7yIMBsvIYZ+hRfme36LFn7H1k5DbP7IYiPm8PoW2E4YeRyQiAqAGkX7hooCycuX/RuBFV5Ded4TxzP6IgjRciBtARRJYjm2RVYrsWE1mxwCXYkhpTFyAPGW2YklYysfZDXTYZDoRwHC4n4PK6w+NtYcBLwIRj/CcJ90dslg74jvqGfUHfcYutUC3gsBUuu++Rx8jcoenVCHC4LPmWQPns+wBqgom8sV2gB4fICvImgKqB1G5+/tfw+YebSDYSJpyGyxmHCxuFkrGFww+mEbOMAmNFVDCk7RFeWb3MQN1tsSCqNyGzDyk85hvkNKImu2ibsURGQlXZDJB9TmoESJ1eHUa16gICh265FN0afRAGIO+kTR7qjDX3UEqtRKicCFEDaBs4cz96YzyuLBSYaDC6IsomxSwuHGWVquLHObichwuIjkU8ap1syjvIAK2eAWhx5J3pis4c5cT3kQGCumsMoADGo0pGM+ndUkE1+q2cmAJ3lrJyBHXjrUE+WEEbQfr8s4/t76c/e/7ZU+dbybRIiUHNqnx3aaK6tALHVWfUE0JVhXgZGScn6Y2efetBQqVadc7TKhKsnGfPP/0pfD2osJu4+3paRYIbgv39so6Epx4k1KD36dNaEjhYV+VnjUnwxjBR010dE4CETz9vOgS1MJP16AEVjUh4WoEPXPry4CTUDtNTlyDxXsr8byhI9SQ8rSXfk4QK/C/rMYx58/BZWQw6EKQGEFqQ8JmbimoSXGhXfZ18lzpX89BpVSGhBpQbwmilQiNFqG2SbAgTcrxMUp2YNrIFjeoHqwfMJSvPqmj2HqpGqh50mbyaJnUG1C1qXhRB/aDLRDaFWacVXhRVYMbbgtmA9gb8TFbDrBe4BnLjJS5VA19rKd063XL2iFVwasO2ecmQt8GuqlyLZ4NpxGOW8EC1WYW40mzS8LDKbiBN+fkrlwZ7VHjmNhLeclxnpTxmBm+7VTf7/V/ysx5PD/PfMZ4t+dnA6rec+uJKMxvfVD6flQ1TUmlmmN2Tp7dprzKX3hXiTQ15nbGvnWGCjlW+5vWZFFKZac8Sx0g+f/I3L9OX4scibkSZTS2lhffpndTt9CK9k6YzqTvZZZ7Kzi7OZWhqfi4UuvoKMCKqTKfvZucX0kKQen1sfizNpul8+lY6M+3MyjOpqfRkNnvbu9mWaRaN6+Hww4cPKxtJeU0JW/BCwN/U/HzIGoXQ7VRoIbWQCs2k0/Mh3ldoIbvCMbIxXOG1FlMLC95dReKRkXg0Grty5YozE9+aW5pdnmxAkLZlyOrSFptnm0yVHBWZmbwlZJcXgg0R39S1UtEIJxOjY7GxyGh0PDKeHB0J39TZxoSxJersFRCWEWVuhi7PcmmYy9ym97LLNCWk6WRqmmay5dsVIZu5Re/QZYFOp6Zfaq5UPs0sLRratLepvRY0Yofd2wQtna8ar6rWsUp4Aqn1zrw9ZI91stfk5lqYuZwmz7VZx+6Ga5XVjBV1s7cHDnW+d/36qqk/Em21DPbAxAOHRkuAem40QaOlF9lkqf/Um11uIA22LSpeT7Sp1+PGoZbVCe/SBq5To2Xz7588ocFg9QzWTN5r99Na7m619H09pL7BzkYzx7S9HZ6W3m3DPZbmw1zrITfa5Gi6ddHYfW1nm8G1x+BZ2mJx7LEa8dhp8B7TRg2DNaarodh4GumqXbi6hsFGNq+yoO8U5/rthxqxabTR0vkmRKvNrs63Isri0el8+TL7E46XBa7UVCpDs5n5e3Qqu3CP3rlHF5fuzXNna3mJ2sXvLNHJNCxdQqFXgCttLlT4wqK9FYpdX0jlUk4HnK9Ly5Np7/qwArLbVC+EWiw2vFdPMVzSpGBJ46xexsrLl5d6FEcU2plwv+yy7djYOkNniY63EYRlw8oRWDZ8+48Xx1qtUao2+Ly2Wb8DjxfHWq1vqvco29kp/vYfL441XRslXePY1ibyt/94MdFqXdVsFL4bjxfHPIepCkILEpp43t/W48WxxtvH9Q8T29qVbrqX4dKbn9US5zYJTRS9bli818xN1KhNCI03KdqF4OWYt/C7ayA0fpzb0L8uVwhWzb1NHu15jUgDM+yq5pFfR3Xd4qzxzlGr4fbeJ0o0sIXe+x12R3XDV+Gw92C5nvJ6Ga3a7iodNVp41Ytx3bg3sC5ea7un3vSWmzTd1ok1NgMeqz2PzLomXvlNFn1ljwed5UbPpl5uX7D6yVSTB1Pcc6Yzc8Irs+5bSQmZucytr7/6/O+9V1nTWVguLAn36FLWWlF08qldLj7/4tetVphu/DLZpfTzn/+yzWUjPdC6kdYuHL/+6q9+Eax79PVSS8KIMlXQVFndpHPTNLuQzlDOouXMNFeWbCZNV9Lp2y83FzoSa1qZ9ptYW8+1JR6TtsrxEPHivamsdZpYLzEjjMddEbCtAEuaVqAQe6GKCnP28H/7m9/+xklXt6HVjRZEw3io6RKttCq3gSO889rmJpOorFKjlM8zw9goFQp7VDSoXD7NuyEWDGa87vSQUFZ0jcuIA9rIwAlsafOaVmTqi3kuXd+ZQ3w52sHvnJF+A9DE88+Sfda7Nt5BuARHkOHYsdBTPqO+Z5hMQfIK2qasNgh1gKPOBdmAmJCvSTnWwT6q7UJjxgONW7MSXOwD2QErRqmrLqcbcrorSPeUkTYuOnyIKvejq3ZqcSklLNHh8gIlk125bB9Z/9ubxjmnSVK5H1u1TeHywjTnNx0eD0UuG+crzL0fcYDS9K5scqCTYn7nsnXuHS/vOlLA2aRFK4e9TYhyKsoFeWutCQOFq/znG7gftDnXzf8G7H/koq+aiymLixYL9wme6vdjwNU7wC7OJ58VdXUSi7qwaILYQ65i8N8inlLPYHyU4+dNzWazi+nrzvJKOORIjq0LM3KhQOdUOqXpOsubhT08vB61omPg+LkuPlyT1WLJFEDTUFdzjO1YsVNwV2TiTj35UPm/4f4wknqY/3X7gvxaL0C/+CMFyJXTU5fTW5fTV5fTX8mx4iyrpfJQI6mk1BLIqfkseBF0RTa37Pm1HEhhXHCaxEAqq/eG6cwkTU1NZZczSxjw5shuHGpxH2wuM5M1jjsFI8r9xCr3ROambnMzUTWC95OrZQzSgpAVKmLOsRxZrXYuHEfEOOrSBLrOZb+sSDU6IIRIdawDGhERg+gaSH6M/3Rz8TDe8pD8mn+/bVEAKAgvhtT8ztemLvxLlS78G+jCdsAJ/zBBWKzo3SGegCHHUd7LImgc5U8IRNJ+4oMoVQhOioC+P/ZBkOqHHFYPdDK9egpilbg93O6DyKTtfsjFAJNPyMrKgz+QLjOIkbv/SiDs9IgVfWaHrnJbgUHCfyDvq28hsoOI7H8R87BFwmEXRdIxIh0nr/H7AWIegV4/9NnhUeZRIp1wwqF8cPca+ZhU7l4nTuqkJdPHILKVg/oJIT/xkQ0/xLf+mGCU1CCEuAJYTm432T4G4VImhjljs9PQTDoDLDtLbEyO25gMSefIkJ13wsnjFSHwFTp/0+ocNZjavLCidW2KUaXeQjt1smynRh07Zd9ZZgrmwvsXVmmdjRIiII1ljUyrJtOpqO7xmXlHNkxRpVYUIs1rEqOZkrLOdCvACfus1MfyKDU1mhhHs2myAlM1PbQtPnoUesTn39BDUZclNOIuTOl1isGDkbg7rFVHjbWx5n1rJp3RSqqEoTzB+zjL0FWsGpFBUzAQylbFa+iZOPPSkmaKBco7pypScJ1W1D6mTAHqe1qJ5rc0zWC8sGyc4grGL6XoyhbgsGiKukmndN4z93BDoZDxdqXLJY2Xa0W6oGvgBGGMqkGnTL3w7qMKatzRw/izQ7aaQjSbEehB1eSas3fMbcBX1GNcLfyoFldALTDo/Z+6uW6BLlmG1k8+5qIDIgyRXFwLhrh6DXFtHrIFvhtU8DFa6H0M6+JSutND9N9ByJ+rba/VlktbF4TZQ+C83xHuXqjq1MQegpbSQGGfFTfWT7aDUMepgsH1Q93cOEAc4WFyCrpd8nG70Ea3HOeeZt32lLs95NXtL7uqGu73NuFWb1vcOuT34lZvW9zqbc0tO8hS/c+Am1t/4ffiVn23Xtzqbc0tp1s9wC3ufl8TLvW1xaVowItLfW1xqa81l/psdP/R7+bSrwN8omijWy8u9bXmktPtvJ/PO/v9TbjU3xaXVrq8uNTfFpf6W3Op30b3S5+bS//c5cWl+m69uNTfmktOt5dg8tsPNuFSsC0u7Xd7cSnYFpeCrblkF6sPiZtL33R7cam+Wy8uBVtzyemWkBW117HpOJnDhGet5g3xA2Y85QlnLbx+TSzK7tWwwswtTQqLJXMrhOunmyIuv9dMbYepE7H46GhyHE5Aj0dHksmLsWQsOToV2YgmIqK4zqSN9ZGkmI+NiqPxcSZFxVhsJL4evbSh6YpoTmwbmnrJkHbWPrBerTARvcQUUS5M4OR+qaDlxQKbYOra8uKlor2KnzBmoYy3mpA149ImU5kummyNr/wBxFqeYy4zg4My5M2J+EYymdwYH+N4RDfy0qgoRvKJxEZybCMZi7ENxTyM7nGFoorrEFeG0+q2tneZUgMmUUof89RRm2/hfEFTmQSOhIDetOWvQOvaHQVclzF4g8WaYmziCsuepYenFi5TXKA6nlA0Fq92Xoazs7N82cwrWa+bULBGIole/ejYCP8OH3bcf1z9KjuSrON2APd6oE90e9atd2XAxgcmgPGYKGiiVIkW5zQZVpy8tmNYK4cibyJhVr64jmsrvBF166UC8AYMbArDE8UsSTRFdJge2JDW7eZGuWKsnIqXU4lyKlkJTs/jdafuXQZcSNTrsGb5FS5Ne3wB3zG+Punivz2+d3xH+cL1FL+e40vYN/yDVekjTUubtz146dnBgE8+7/Ieo9x7hBzHkZsVDTrJGHjNSrHATCZRcP3w3SnoWGbVgqyycHZjA37BnwXWhA3wradQFmkqn+e+q1kFbJFLqsTr1kisMQ42QDMBzD2tpFMHqmhB4Etj7sJnYZtM3ADnOxoBpzsWoZK4Zxivc38StyDLD5CCz589afatftxa/20LSqPGXuAaA6qB0uiRcGtYnoA8qW4Nrh1YHUBsH9wBgbaE6Am3NdCWEL1afQ+0juU13zYHyv0N1hY3Utv2pcppEmwBtzn0VnYk2Bq0ZwdtNwm2VdXTvrTdJNiBuTvYt7M+DkBHZ30A3LqzM69gH+3UPlgfB5LdjonoRAEP0EcH9uMAHbRr+A4A3cO/acf8tdQ4D7jtg+4MaPuj2gHEgwFtBq5TiC1gdSbLbTrCfyyUdqfBg0HpoH09iM4aV7fvuOWziufSYbOG4D5yIH5UnX4R0L18rBeK/mdN+njygvjj0c0LBO3m0vdwv4f7/wWubS4+euFwy99XHK6Ls7bdcUKeRhTrFbFUYbQgmkx3ztLMrNu7qHeyi7NzGSd7TyuZpXVmlzlb3y/mONjZ8iPymHLf/oVHyHbXq8NwcESIwiVBPE8uxQQ8WoRHeZgowa6cgVu4siTMOBUNU5eLuMk7l7U2eWFjdr3Ae1LEDVncxeNMwim4wG6yAA/wBZ+zeauIRSENObfgLAjiEHeOisi8S8O09oZ531h/d3dXWHa2ZIvWCRJASYD35gpLlb1ZJOoU7MvehUpVx4jg/Eg/niwJ8Dt49+xZ3xnMDfqO8Jygv9vXy2sc53dHeF4Q0uph3yC2uoz1um92B4I+fNfz2hrsAK+tIRrfzgufK119/9bnDt76PHyzLOf4OlbY88YhlLiqwjtpBXh3LsrylmhsFeR1lHadoQqY+FZwWd0UJh0BLekFqASl1qMeuIPmm8yEpwcVddJFleeBzKoCPLZBoVaVdR1VrPJiWETHfmO5gZo0pxS5TlmaBR2V33RuHakCJVZKBVMuWpv3cEyjqGkFS8NOVEELsd08K8LhK0NAjQk4ZOgMHr2Y8DDOYKbENkQOkKl5Dan9EVQ7ZpWt8a6lAlvTtXXNtJ6hzMBBV6dtuZxtcPndwgprYM4Qz9mlpQXBKrEfNXCaQIpFSdrivOUiLgCJApy0wRcaW8biosMwOGtmWYt7jslAAj4oFVTNMhSQtI6drZDyq9vx7I9zGrY6zzmOaz9/0opoD3EQSk71PvtRENQT4KXVlkGDU4p4Xg0NjW0JoAG3BGiVyhfnYRFUeU/RpFKB/RAPqz3hl59yw3PCf5z/dfuP+QP4TmPndwDTX/A0/+PmZgCN11kwZf7+rv6+/mB/d39Pj6/85x+40h3tuWqle/ARkPXXz9v1c7M2yP+P9fC2wf7e/tf6//2w738B9jCSAQ=="))))
| 2,727.333333
| 8,154
| 0.965656
| 259
| 8,182
| 30.505792
| 0.980695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156743
| 0.000367
| 8,182
| 2
| 8,155
| 4,091
| 0.809268
| 0
| 0
| 0
| 0
| 0.5
| 0.989489
| 0.989489
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
810a69d41ab269103b60a3142958a2f8f7506c08
| 12,857
|
py
|
Python
|
wallee/api/user_account_role_service_api.py
|
bluedynamics/wallee-python-sdk
|
7f20df96d2c3dba3b1ca5236e8deca578819eea2
|
[
"Apache-2.0"
] | 2
|
2020-01-16T13:24:06.000Z
|
2020-11-21T17:40:17.000Z
|
wallee/api/user_account_role_service_api.py
|
bluedynamics/wallee-python-sdk
|
7f20df96d2c3dba3b1ca5236e8deca578819eea2
|
[
"Apache-2.0"
] | 4
|
2019-10-14T17:33:23.000Z
|
2021-10-01T14:49:11.000Z
|
wallee/api/user_account_role_service_api.py
|
bluedynamics/wallee-python-sdk
|
7f20df96d2c3dba3b1ca5236e8deca578819eea2
|
[
"Apache-2.0"
] | 2
|
2019-10-15T14:17:10.000Z
|
2021-09-17T13:07:09.000Z
|
# coding: utf-8
from __future__ import absolute_import
import six
from wallee.api_client import ApiClient
class UserAccountRoleServiceApi:
def __init__(self, configuration):
self.api_client = ApiClient(configuration=configuration)
def add_role(self, user_id, account_id, role_id, **kwargs):
"""Add Role
This operation grants the role to the given user with in the given account.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_role(user_id, account_id, role_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int user_id: The id of the user to whom the role is assigned. (required)
:param int account_id: The account to which the role is mapped. (required)
:param int role_id: The role which is mapped to the user and account. (required)
:param bool applies_on_subaccount: Whether the role applies only on subaccount.
:return: UserAccountRole
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_role_with_http_info(user_id, account_id, role_id, **kwargs)
else:
(data) = self.add_role_with_http_info(user_id, account_id, role_id, **kwargs)
return data
def add_role_with_http_info(self, user_id, account_id, role_id, **kwargs):
"""Add Role
This operation grants the role to the given user with in the given account.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_role_with_http_info(user_id, account_id, role_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int user_id: The id of the user to whom the role is assigned. (required)
:param int account_id: The account to which the role is mapped. (required)
:param int role_id: The role which is mapped to the user and account. (required)
:param bool applies_on_subaccount: Whether the role applies only on subaccount.
:return: UserAccountRole
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'account_id', 'role_id', 'applies_on_subaccount']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `add_role`")
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `add_role`")
# verify the required parameter 'role_id' is set
if ('role_id' not in params or
params['role_id'] is None):
raise ValueError("Missing the required parameter `role_id` when calling `add_role`")
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in params:
query_params.append(('userId', params['user_id']))
if 'account_id' in params:
query_params.append(('accountId', params['account_id']))
if 'role_id' in params:
query_params.append(('roleId', params['role_id']))
if 'applies_on_subaccount' in params:
query_params.append(('appliesOnSubaccount', params['applies_on_subaccount']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/user-account-role/addRole', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserAccountRole',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list(self, user_id, account_id, **kwargs):
"""List Roles
List all the roles that are assigned to the given user in the given account.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list(user_id, account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int user_id: The id of the user to whom the role is assigned. (required)
:param int account_id: The account to which the role is mapped. (required)
:return: list[UserAccountRole]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_with_http_info(user_id, account_id, **kwargs)
else:
(data) = self.list_with_http_info(user_id, account_id, **kwargs)
return data
def list_with_http_info(self, user_id, account_id, **kwargs):
"""List Roles
List all the roles that are assigned to the given user in the given account.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_with_http_info(user_id, account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int user_id: The id of the user to whom the role is assigned. (required)
:param int account_id: The account to which the role is mapped. (required)
:return: list[UserAccountRole]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'account_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `list`")
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `list`")
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in params:
query_params.append(('userId', params['user_id']))
if 'account_id' in params:
query_params.append(('accountId', params['account_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/user-account-role/list', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[UserAccountRole]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_role(self, id, **kwargs):
"""Remove Role
This operation removes the specified user account role.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_role(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The id of user account role which should be removed (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_role_with_http_info(id, **kwargs)
else:
(data) = self.remove_role_with_http_info(id, **kwargs)
return data
def remove_role_with_http_info(self, id, **kwargs):
"""Remove Role
This operation removes the specified user account role.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_role_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The id of user account role which should be removed (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `remove_role`")
collection_formats = {}
path_params = {}
query_params = []
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/user-account-role/removeRole', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.725904
| 99
| 0.613518
| 1,564
| 12,857
| 4.804987
| 0.092072
| 0.023952
| 0.024218
| 0.027944
| 0.943846
| 0.924817
| 0.909647
| 0.909647
| 0.889953
| 0.871324
| 0
| 0.000444
| 0.299603
| 12,857
| 331
| 100
| 38.8429
| 0.834092
| 0.319048
| 0
| 0.726257
| 0
| 0
| 0.200764
| 0.05546
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039106
| false
| 0
| 0.01676
| 0
| 0.111732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8125081e40d1e9cec18fc39b03ee9207029a4f40
| 6,097
|
py
|
Python
|
runtime/components/Basic/test_smaller.py
|
ulise/hetida-designer
|
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
|
[
"MIT"
] | 41
|
2020-11-18T10:12:29.000Z
|
2022-03-28T21:46:41.000Z
|
runtime/components/Basic/test_smaller.py
|
ulise/hetida-designer
|
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
|
[
"MIT"
] | 4
|
2020-12-08T15:28:15.000Z
|
2022-02-01T11:40:17.000Z
|
runtime/components/Basic/test_smaller.py
|
ulise/hetida-designer
|
a6be8eb45abf950d5498e3ca756ea1d2e46b5c00
|
[
"MIT"
] | 14
|
2020-11-18T11:39:17.000Z
|
2022-03-21T15:05:11.000Z
|
import pandas as pd
from .smaller import main
def test_int_int():
assert main(left=5, right=6)["result"] == True
def test_series_int():
assert main(
left=pd.Series(
{
"2019-08-01T15:20:12": 1.2,
"2019-08-01T15:44:12": None,
"2019-08-03T16:20:15": 0.3,
"2019-08-05T12:00:34": 0.5,
}
),
right=1,
)["result"].equals(
pd.Series(
{
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": True,
"2019-08-05T12:00:34": True,
}
)
)
def test_df_int():
assert main(
left=pd.DataFrame(
{"a": [1.2, None, 0.3, 0.5], "b": [54.4, 4.3, 21.0, 7.5]},
index=[
"2019-08-01T15:20:12",
"2019-08-01T15:44:12",
"2019-08-03T16:20:15",
"2019-08-05T12:00:34",
],
),
right=0.8,
)["result"].equals(
pd.DataFrame(
{"a": [False, False, True, True], "b": [False, False, False, False]},
index=[
"2019-08-01T15:20:12",
"2019-08-01T15:44:12",
"2019-08-03T16:20:15",
"2019-08-05T12:00:34",
],
)
)
def test_series_series():
assert main(
left=pd.Series(
{
"2019-08-01T15:20:12": 1.2,
"2019-08-01T15:44:12": None,
"2019-08-03T16:20:15": 0.3,
"2019-08-05T12:00:34": 0.5,
}
),
right=pd.Series(
{
"2019-08-01T15:20:12": 1.0,
"2019-08-01T15:44:12": 27,
"2019-08-03T16:20:15": 3.6,
"2020-08-05T12:00:34": 17,
"2021-08-05T12:00:34": None,
}
),
)["result"].equals(
pd.Series(
{
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": True,
"2019-08-05T12:00:34": False,
"2020-08-05T12:00:34": False,
"2021-08-05T12:00:34": False,
}
)
)
def test_df_series():
assert main(
left=pd.DataFrame(
{"a": [1.2, None, 0.3, 0.5], "b": [54.4, 4.3, 21.0, 7.5]},
index=[
"2019-08-01T15:20:12",
"2019-08-01T15:44:12",
"2019-08-03T16:20:15",
"2019-08-05T12:00:34",
],
),
right=pd.Series(
{
"2019-08-01T15:20:12": 1.1,
"2019-08-01T15:44:12": 3.7,
"2019-08-03T16:20:15": None,
"2019-08-05T12:00:34": 5,
"2020-08-05T12:00:34": 10000,
}
),
)["result"].equals(
pd.DataFrame(
{
"2019-08-01T15:20:12": {
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2019-08-05T12:00:34": False,
},
"2019-08-01T15:44:12": {
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2019-08-05T12:00:34": False,
},
"2019-08-03T16:20:15": {
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2019-08-05T12:00:34": False,
},
"2019-08-05T12:00:34": {
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2019-08-05T12:00:34": False,
},
"2020-08-05T12:00:34": {
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2019-08-05T12:00:34": False,
},
"a": {
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2019-08-05T12:00:34": False,
},
"b": {
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2019-08-05T12:00:34": False,
},
}
)
)
def test_empty_series_series():
assert main(
left=pd.Series(dtype=float),
right=pd.Series(
{
"2019-08-01T15:20:12": 1.0,
"2019-08-01T15:44:12": 27,
"2019-08-03T16:20:15": 3.6,
"2020-08-05T12:00:34": 17,
"2021-08-05T12:00:34": None,
}
),
)["result"].equals(
pd.Series(
{
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2020-08-05T12:00:34": False,
"2021-08-05T12:00:34": False,
}
)
)
def test_series_empty_series():
assert main(
left=pd.Series(
{
"2019-08-01T15:20:12": 1.2,
"2019-08-01T15:44:12": None,
"2019-08-03T16:20:15": 0.3,
"2019-08-05T12:00:34": 0.5,
}
),
right=pd.Series(dtype=float),
)["result"].equals(
pd.Series(
{
"2019-08-01T15:20:12": False,
"2019-08-01T15:44:12": False,
"2019-08-03T16:20:15": False,
"2019-08-05T12:00:34": False,
}
)
)
| 29.597087
| 81
| 0.380843
| 714
| 6,097
| 3.229692
| 0.07563
| 0.210755
| 0.200347
| 0.133565
| 0.888985
| 0.857762
| 0.857762
| 0.830442
| 0.824805
| 0.814397
| 0
| 0.405082
| 0.45137
| 6,097
| 205
| 82
| 29.741463
| 0.284305
| 0
| 0
| 0.638743
| 0
| 0
| 0.291783
| 0
| 0
| 0
| 0
| 0
| 0.036649
| 1
| 0.036649
| true
| 0
| 0.010471
| 0
| 0.04712
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
812d334ea605f34617255c9b71ae5c7690c978a2
| 146
|
py
|
Python
|
easyrobust/easyrobust/models/__init__.py
|
thu-ml/realsafe
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 107
|
2020-06-15T09:55:11.000Z
|
2020-12-20T11:27:11.000Z
|
easyrobust/easyrobust/models/__init__.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 7
|
2020-06-14T03:00:18.000Z
|
2020-12-07T07:10:10.000Z
|
easyrobust/easyrobust/models/__init__.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 19
|
2020-06-14T08:35:33.000Z
|
2020-12-19T13:43:41.000Z
|
from .rvt import *
from .cnsn_resnet import *
from .wave_resnet import *
from .frelu_resnet import *
from .gp_resnet import *
from .DrViT import *
| 24.333333
| 27
| 0.760274
| 22
| 146
| 4.863636
| 0.409091
| 0.46729
| 0.598131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157534
| 146
| 6
| 28
| 24.333333
| 0.869919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
07c6b195e394070e2cb23474b8b394c5f4e59013
| 8,938
|
py
|
Python
|
script/tuck.py
|
HKUST-RML/Shallow_Depth_Insertion
|
c2559479285d69a514e81467c5582f6384fc5dc1
|
[
"MIT"
] | 3
|
2021-08-19T12:41:16.000Z
|
2021-09-09T09:51:50.000Z
|
script/tuck.py
|
HKUST-RML/Shallow_Depth_Insertion
|
c2559479285d69a514e81467c5582f6384fc5dc1
|
[
"MIT"
] | null | null | null |
script/tuck.py
|
HKUST-RML/Shallow_Depth_Insertion
|
c2559479285d69a514e81467c5582f6384fc5dc1
|
[
"MIT"
] | 1
|
2022-01-13T08:24:18.000Z
|
2022-01-13T08:24:18.000Z
|
#!/usr/bin/env python
import sys
import math
import rospy
import copy
import numpy as np
import tf
import moveit_commander
import helper
import motion_primitives
import tilt
import yaml
import actionlib
import dynamixel
from robotiq_2f_gripper_msgs.msg import CommandRobotiqGripperFeedback, CommandRobotiqGripperResult, CommandRobotiqGripperAction, CommandRobotiqGripperGoal
from robotiq_2f_gripper_control.robotiq_2f_gripper_driver import Robotiq2FingerGripperDriver as Robotiq
moveit_commander.roscpp_initialize(sys.argv)
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
group = moveit_commander.MoveGroupCommander("manipulator")
def rotate_tuck(axis, angle, fingertip2contactB, velocity):
'''Rotate tuck primitive motion of robot.
Parameters:
axis (list): 3-D vector of rotation axis (right-hand rule)
angle (double): angle of tucking
fingertip2contactB (double): distance from fingertip to contact B in meters
velocity (double): robot velocity between 0 and 1
Returns:
'''
with open("/home/john/catkin_ws/src/shallow_depth_insertion/config/sdi_config.yaml", 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
pose_target = group.get_current_pose().pose
pos_initial = [pose_target.position.x, pose_target.position.y, pose_target.position.z]
ori_initial = [pose_target.orientation.x, pose_target.orientation.y, pose_target.orientation.z, pose_target.orientation.w]
T_we = tf.TransformListener().fromTranslationRotation(pos_initial, ori_initial)
action_name = rospy.get_param('~action_name', 'command_robotiq_action')
robotiq_client = actionlib.SimpleActionClient(action_name, CommandRobotiqGripperAction)
# TODO: use pjg module
#msg = rospy.wait_for_message('/CModelRobotInput', inputMsg.CModel_robot_input, timeout = None)
#gripper_position = msg.gPO
gripper_position = 255 #TEMP DEBUG
#Robotiq.get_current_gripper_status(Robotiq())
#Robotiq.goto(robotiq_client, pos=object_thickness, speed=config['gripper_speed'], force=config['gripper_force'], block=False)
# gripper kinematics
opening_at_zero = config['max_opening']-2*config['finger_thickness']
gripper_opening = -config['opening_per_count']*gripper_position + opening_at_zero
contact_B_e = [config['tcp2fingertip']-fingertip2contactB, -gripper_opening/2.0, 0, 1]
contact_B_w = np.matmul(T_we, contact_B_e)
tilt.tilt(contact_B_w[:3], axis, angle, velocity)
def active_rotate_tuck(axis, angle, fingertip2contactB, velocity, active_distance):
'''Rotate tuck primitive motion of robot.
Parameters:
axis (list): 3-D vector of rotation axis (right-hand rule)
angle (double): angle of tucking
fingertip2contactB (double): distance from fingertip to contact B in meters
velocity (double): robot velocity between 0 and 1
Returns:
'''
with open("/home/john/catkin_ws/src/shallow_depth_insertion/config/sdi_config.yaml", 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
pose_target = group.get_current_pose().pose
pos_initial = [pose_target.position.x, pose_target.position.y, pose_target.position.z]
ori_initial = [pose_target.orientation.x, pose_target.orientation.y, pose_target.orientation.z, pose_target.orientation.w]
T_we = tf.TransformListener().fromTranslationRotation(pos_initial, ori_initial)
action_name = rospy.get_param('~action_name', 'command_robotiq_action')
robotiq_client = actionlib.SimpleActionClient(action_name, CommandRobotiqGripperAction)
# TODO: use pjg module
#msg = rospy.wait_for_message('/CModelRobotInput', inputMsg.CModel_robot_input, timeout = None)
#gripper_position = msg.gPO
gripper_position = 255 #TEMP DEBUG
#Robotiq.get_current_gripper_status(Robotiq())
#Robotiq.goto(robotiq_client, pos=object_thickness, speed=config['gripper_speed'], force=config['gripper_force'], block=False)
# gripper kinematics
opening_at_zero = config['max_opening']-2*config['finger_thickness']
gripper_opening = -config['opening_per_count']*gripper_position + opening_at_zero
contact_B_e = [config['tcp2fingertip']-fingertip2contactB, -gripper_opening/2.0, 0, 1]
contact_B_w = np.matmul(T_we, contact_B_e)
tilt.active_tilt(contact_B_w[:3], axis, angle, velocity, active_distance)
def push_tuck(axis, angle, fingertip2contactB, velocity, tuck):
'''Rotate tuck primitive motion of robot.
Parameters:
axis (list): 3-D vector of rotation axis (right-hand rule)
angle (double): angle of tucking
fingertip2contactB (double): distance from fingertip to contact B in meters
velocity (double): robot velocity between 0 and 1
Returns:
'''
with open("/home/john/catkin_ws/src/shallow_depth_insertion/config/sdi_config.yaml", 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
pose_target = group.get_current_pose().pose
pos_initial = [pose_target.position.x, pose_target.position.y, pose_target.position.z]
ori_initial = [pose_target.orientation.x, pose_target.orientation.y, pose_target.orientation.z, pose_target.orientation.w]
T_we = tf.TransformListener().fromTranslationRotation(pos_initial, ori_initial)
action_name = rospy.get_param('~action_name', 'command_robotiq_action')
robotiq_client = actionlib.SimpleActionClient(action_name, CommandRobotiqGripperAction)
# TODO: use pjg module
#msg = rospy.wait_for_message('/CModelRobotInput', inputMsg.CModel_robot_input, timeout = None)
#gripper_position = msg.gPO
gripper_position = 255 #TEMP DEBUG
#Robotiq.get_current_gripper_status(Robotiq())
#Robotiq.goto(robotiq_client, pos=object_thickness, speed=config['gripper_speed'], force=config['gripper_force'], block=False)
# gripper kinematics
opening_at_zero = config['max_opening']-2*config['finger_thickness']
gripper_opening = -config['opening_per_count']*gripper_position + opening_at_zero
contact_B_e = [config['tcp2fingertip']-fingertip2contactB, -0.035/2.0, 0, 1]
contact_B_w = np.matmul(T_we, contact_B_e)
dynamixel.set_length(tuck)
tilt.translate_tilt(contact_B_w[:3], axis, angle, velocity, 0.00)
def push_tuck2(axis, angle, fingertip2contactB, velocity, tuck):
'''Rotate tuck primitive motion of robot.
Parameters:
axis (list): 3-D vector of rotation axis (right-hand rule)
angle (double): angle of tucking
fingertip2contactB (double): distance from fingertip to contact B in meters
velocity (double): robot velocity between 0 and 1
Returns:
'''
with open("/home/john/catkin_ws/src/shallow_depth_insertion/config/sdi_config.yaml", 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
pose_target = group.get_current_pose().pose
pos_initial = [pose_target.position.x, pose_target.position.y, pose_target.position.z]
ori_initial = [pose_target.orientation.x, pose_target.orientation.y, pose_target.orientation.z, pose_target.orientation.w]
T_we = tf.TransformListener().fromTranslationRotation(pos_initial, ori_initial)
action_name = rospy.get_param('~action_name', 'command_robotiq_action')
robotiq_client = actionlib.SimpleActionClient(action_name, CommandRobotiqGripperAction)
# TODO: use pjg module
#msg = rospy.wait_for_message('/CModelRobotInput', inputMsg.CModel_robot_input, timeout = None)
#gripper_position = msg.gPO
gripper_position = 255 #TEMP DEBUG
#Robotiq.get_current_gripper_status(Robotiq())
#Robotiq.goto(robotiq_client, pos=object_thickness, speed=config['gripper_speed'], force=config['gripper_force'], block=False)
# gripper kinematics
opening_at_zero = config['max_opening']-2*config['finger_thickness']
gripper_opening = -config['opening_per_count']*gripper_position + opening_at_zero
contact_B_e = [config['tcp2fingertip']-fingertip2contactB, -gripper_opening/2.0, 0, 1]
contact_B_w = np.matmul(T_we, contact_B_e)
dynamixel.set_length(tuck)
tilt.translate_tilt(contact_B_w[:3], axis, angle, velocity, 0.003)
if __name__ == '__main__':
try:
rospy.init_node('tuck', anonymous=True)
group.set_max_velocity_scaling_factor(1.0)
motion_primitives.set_joint([0, -90, 90, 90, 90, 0])
p = group.get_current_pose().pose
tilt.tilt([p.position.x,p.position.y,p.position.z-0.275], [0,-1,0], 60, 0.5)
rotate_tuck([0,1,0], 50, 0.03, 0.1)
except rospy.ROSInterruptException: pass
| 43.6
| 154
| 0.728463
| 1,150
| 8,938
| 5.413913
| 0.16087
| 0.051397
| 0.053967
| 0.015259
| 0.860424
| 0.856087
| 0.841632
| 0.841632
| 0.831674
| 0.831674
| 0
| 0.015205
| 0.168494
| 8,938
| 204
| 155
| 43.813725
| 0.822524
| 0.288208
| 0
| 0.647059
| 0
| 0
| 0.109276
| 0.060223
| 0
| 0
| 0
| 0.019608
| 0
| 1
| 0.039216
| false
| 0.009804
| 0.147059
| 0
| 0.186275
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6af5f8626b0b34bda3ec6543c1dea3c5ae5f25f6
| 408,376
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_antivirus_profile.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_antivirus_profile.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_antivirus_profile.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_antivirus_profile
short_description: Configure AntiVirus profiles in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify antivirus feature and profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
antivirus_profile:
description:
- Configure AntiVirus profiles.
default: null
type: dict
suboptions:
analytics_accept_filetype:
description:
- Only submit files matching this DLP file-pattern to FortiSandbox. Source dlp.filepattern.id.
type: int
analytics_bl_filetype:
description:
- Only submit files matching this DLP file-pattern to FortiSandbox. Source dlp.filepattern.id.
type: int
analytics_db:
description:
- Enable/disable using the FortiSandbox signature database to supplement the AV signature databases.
type: str
choices:
- disable
- enable
analytics_ignore_filetype:
description:
- Do not submit files matching this DLP file-pattern to FortiSandbox. Source dlp.filepattern.id.
type: int
analytics_max_upload:
description:
- Maximum size of files that can be uploaded to FortiSandbox (1 - 395 MBytes).
type: int
analytics_wl_filetype:
description:
- Do not submit files matching this DLP file-pattern to FortiSandbox. Source dlp.filepattern.id.
type: int
av_block_log:
description:
- Enable/disable logging for AntiVirus file blocking.
type: str
choices:
- enable
- disable
av_virus_log:
description:
- Enable/disable AntiVirus logging.
type: str
choices:
- enable
- disable
cifs:
description:
- Configure CIFS AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable CIFS AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
comment:
description:
- Comment.
type: str
content_disarm:
description:
- AV Content Disarm and Reconstruction settings.
type: dict
suboptions:
cover_page:
description:
- Enable/disable inserting a cover page into the disarmed document.
type: str
choices:
- disable
- enable
detect_only:
description:
- Enable/disable only detect disarmable files, do not alter content.
type: str
choices:
- disable
- enable
error_action:
description:
- Action to be taken if CDR engine encounters an unrecoverable error.
type: str
choices:
- block
- log-only
- ignore
office_action:
description:
- Enable/disable stripping of PowerPoint action events in Microsoft Office documents.
type: str
choices:
- disable
- enable
office_dde:
description:
- Enable/disable stripping of Dynamic Data Exchange events in Microsoft Office documents.
type: str
choices:
- disable
- enable
office_embed:
description:
- Enable/disable stripping of embedded objects in Microsoft Office documents.
type: str
choices:
- disable
- enable
office_hylink:
description:
- Enable/disable stripping of hyperlinks in Microsoft Office documents.
type: str
choices:
- disable
- enable
office_linked:
description:
- Enable/disable stripping of linked objects in Microsoft Office documents.
type: str
choices:
- disable
- enable
office_macro:
description:
- Enable/disable stripping of macros in Microsoft Office documents.
type: str
choices:
- disable
- enable
original_file_destination:
description:
- Destination to send original file if active content is removed.
type: str
choices:
- fortisandbox
- quarantine
- discard
pdf_act_form:
description:
- Enable/disable stripping of actions that submit data to other targets in PDF documents.
type: str
choices:
- disable
- enable
pdf_act_gotor:
description:
- Enable/disable stripping of links to other PDFs in PDF documents.
type: str
choices:
- disable
- enable
pdf_act_java:
description:
- Enable/disable stripping of actions that execute JavaScript code in PDF documents.
type: str
choices:
- disable
- enable
pdf_act_launch:
description:
- Enable/disable stripping of links to external applications in PDF documents.
type: str
choices:
- disable
- enable
pdf_act_movie:
description:
- Enable/disable stripping of embedded movies in PDF documents.
type: str
choices:
- disable
- enable
pdf_act_sound:
description:
- Enable/disable stripping of embedded sound files in PDF documents.
type: str
choices:
- disable
- enable
pdf_embedfile:
description:
- Enable/disable stripping of embedded files in PDF documents.
type: str
choices:
- disable
- enable
pdf_hyperlink:
description:
- Enable/disable stripping of hyperlinks from PDF documents.
type: str
choices:
- disable
- enable
pdf_javacode:
description:
- Enable/disable stripping of JavaScript code in PDF documents.
type: str
choices:
- disable
- enable
ems_threat_feed:
description:
- Enable/disable use of EMS threat feed when performing AntiVirus scan.
type: str
choices:
- disable
- enable
extended_log:
description:
- Enable/disable extended logging for antivirus.
type: str
choices:
- enable
- disable
external_blocklist:
description:
- One or more external malware block lists.
type: list
suboptions:
name:
description:
- External blocklist. Source system.external-resource.name.
required: true
type: str
external_blocklist_archive_scan:
description:
- Enable/disable external-blocklist archive scanning.
type: str
choices:
- disable
- enable
external_blocklist_enable_all:
description:
- Enable/disable all external blocklists.
type: str
choices:
- disable
- enable
feature_set:
description:
- Flow/proxy feature set.
type: str
choices:
- flow
- proxy
fortiai_error_action:
description:
- Action to take if FortiAI encounters an error.
type: str
choices:
- log-only
- block
- ignore
ftgd_analytics:
description:
- Settings to control which files are uploaded to FortiSandbox.
type: str
choices:
- disable
- suspicious
- everything
ftp:
description:
- Configure FTP AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable FTP AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
http:
description:
- Configure HTTP AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
content_disarm:
description:
- Enable Content Disarm and Reconstruction for this protocol.
type: str
choices:
- disable
- enable
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable HTTP AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
imap:
description:
- Configure IMAP AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
content_disarm:
description:
- Enable Content Disarm and Reconstruction for this protocol.
type: str
choices:
- disable
- enable
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
executables:
description:
- Treat Windows executable files as viruses for the purpose of blocking or monitoring.
type: str
choices:
- default
- virus
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable IMAP AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
inspection_mode:
description:
- Inspection mode.
type: str
choices:
- proxy
- flow-based
mapi:
description:
- Configure MAPI AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
executables:
description:
- Treat Windows executable files as viruses for the purpose of blocking or monitoring.
type: str
choices:
- default
- virus
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable MAPI AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
mobile_malware_db:
description:
- Enable/disable using the mobile malware signature database.
type: str
choices:
- disable
- enable
nac_quar:
description:
- Configure AntiVirus quarantine settings.
type: dict
suboptions:
expiry:
description:
- Duration of quarantine.
type: str
infected:
description:
- Enable/Disable quarantining infected hosts to the banned user list.
type: str
choices:
- none
- quar-src-ip
log:
description:
- Enable/disable AntiVirus quarantine logging.
type: str
choices:
- enable
- disable
name:
description:
- Profile name.
required: true
type: str
nntp:
description:
- Configure NNTP AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable NNTP AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
outbreak_prevention:
description:
- Configure Virus Outbreak Prevention settings.
type: dict
suboptions:
external_blocklist:
description:
- Enable/disable external malware blocklist.
type: str
choices:
- disable
- enable
ftgd_service:
description:
- Enable/disable FortiGuard Virus outbreak prevention service.
type: str
choices:
- disable
- enable
outbreak_prevention_archive_scan:
description:
- Enable/disable outbreak-prevention archive scanning.
type: str
choices:
- disable
- enable
pop3:
description:
- Configure POP3 AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
content_disarm:
description:
- Enable Content Disarm and Reconstruction for this protocol.
type: str
choices:
- disable
- enable
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
executables:
description:
- Treat Windows executable files as viruses for the purpose of blocking or monitoring.
type: str
choices:
- default
- virus
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable POP3 AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
replacemsg_group:
description:
- Replacement message group customized for this profile. Source system.replacemsg-group.name.
type: str
scan_mode:
description:
- Choose between full scan mode and quick scan mode.
type: str
choices:
- quick
- full
- default
- legacy
smb:
description:
- Configure SMB AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: str
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: str
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
options:
description:
- Enable/disable SMB AntiVirus scanning, monitoring, and quarantine.
type: str
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
smtp:
description:
- Configure SMTP AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
content_disarm:
description:
- Enable Content Disarm and Reconstruction for this protocol.
type: str
choices:
- disable
- enable
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
executables:
description:
- Treat Windows executable files as viruses for the purpose of blocking or monitoring.
type: str
choices:
- default
- virus
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable SMTP AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable FortiGuard Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
ssh:
description:
- Configure SFTP and SCP AntiVirus options.
type: dict
suboptions:
archive_block:
description:
- Select the archive types to block.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
archive_log:
description:
- Select the archive types to log.
type: list
choices:
- encrypted
- corrupted
- partiallycorrupted
- multipart
- nested
- mailbomb
- fileslimit
- timeout
- unhandled
av_scan:
description:
- Enable AntiVirus scan service.
type: str
choices:
- disable
- block
- monitor
emulator:
description:
- Enable/disable the virus emulator.
type: str
choices:
- enable
- disable
external_blocklist:
description:
- Enable external-blocklist.
type: str
choices:
- disable
- block
- monitor
fortiai:
description:
- Enable/disable scanning of files by FortiAI server.
type: str
choices:
- disable
- block
- monitor
options:
description:
- Enable/disable SFTP and SCP AntiVirus scanning, monitoring, and quarantine.
type: list
choices:
- scan
- avmonitor
- quarantine
outbreak_prevention:
description:
- Enable Virus Outbreak Prevention service.
type: str
choices:
- disabled
- files
- full-archive
- disable
- block
- monitor
quarantine:
description:
- Enable/disable quarantine for infected files.
type: str
choices:
- disable
- enable
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_antivirus_profile
fortios_antivirus_profile:
vdom: root
state: present
antivirus_profile:
analytics_bl_filetype: 0
analytics_db: disable
analytics_max_upload: 10
analytics_wl_filetype: 0
av_block_log: enable
av_virus_log: enable
extended_log: disable
feature_set: flow
ftgd_analytics: disable
mobile_malware_db: enable
name: terr-anti-profile
scan_mode: default
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_antivirus_profile_data(json):
option_list = ['analytics_accept_filetype', 'analytics_bl_filetype', 'analytics_db',
'analytics_ignore_filetype', 'analytics_max_upload', 'analytics_wl_filetype',
'av_block_log', 'av_virus_log', 'cifs',
'comment', 'content_disarm', 'ems_threat_feed',
'extended_log', 'external_blocklist', 'external_blocklist_archive_scan',
'external_blocklist_enable_all', 'feature_set', 'fortiai_error_action',
'ftgd_analytics', 'ftp', 'http',
'imap', 'inspection_mode', 'mapi',
'mobile_malware_db', 'nac_quar', 'name',
'nntp', 'outbreak_prevention', 'outbreak_prevention_archive_scan',
'pop3', 'replacemsg_group', 'scan_mode',
'smb', 'smtp', 'ssh']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_single_path(data, path, index):
if not data or index == len(path) or path[index] not in data or not data[path[index]]:
return
if index == len(path) - 1:
data[path[index]] = ' '.join(str(elem) for elem in data[path[index]])
elif isinstance(data[path[index]], list):
for value in data[path[index]]:
flatten_single_path(value, path, index + 1)
else:
flatten_single_path(data[path[index]], path, index + 1)
def flatten_multilists_attributes(data):
multilist_attrs = [
[u'smtp', u'archive_block'],
[u'smtp', u'archive_log'],
[u'smtp', u'options'],
[u'ftp', u'archive_block'],
[u'ftp', u'archive_log'],
[u'ftp', u'options'],
[u'mapi', u'archive_block'],
[u'mapi', u'archive_log'],
[u'mapi', u'options'],
[u'nntp', u'archive_block'],
[u'nntp', u'archive_log'],
[u'nntp', u'options'],
[u'http', u'archive_block'],
[u'http', u'archive_log'],
[u'http', u'options'],
[u'cifs', u'archive_block'],
[u'cifs', u'archive_log'],
[u'cifs', u'options'],
[u'ssh', u'archive_block'],
[u'ssh', u'archive_log'],
[u'ssh', u'options'],
[u'imap', u'archive_block'],
[u'imap', u'archive_log'],
[u'imap', u'options'],
[u'pop3', u'archive_block'],
[u'pop3', u'archive_log'],
[u'pop3', u'options'],
]
for attr in multilist_attrs:
flatten_single_path(data, attr, 0)
return data
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def antivirus_profile(data, fos):
vdom = data['vdom']
state = data['state']
antivirus_profile_data = data['antivirus_profile']
antivirus_profile_data = flatten_multilists_attributes(antivirus_profile_data)
filtered_data = underscore_to_hyphen(filter_antivirus_profile_data(antivirus_profile_data))
if state == "present" or state is True:
return fos.set('antivirus',
'profile',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('antivirus',
'profile',
mkey=filtered_data['name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_antivirus(data, fos):
fos.do_member_operation('antivirus_profile')
if data['antivirus_profile']:
resp = antivirus_profile(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('antivirus_profile'))
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"comment": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"feature_set": {
"type": "string",
"options": [
{
"value": "flow",
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True
}
},
{
"value": "proxy",
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True
}
}
],
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True
}
},
"fortiai_error_action": {
"type": "string",
"options": [
{
"value": "log-only",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "ignore",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
},
"smtp": {
"type": "dict",
"children": {
"executables": {
"type": "string",
"options": [
{
"value": "default",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "virus",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"content_disarm": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "files",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "full-archive",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"analytics_db": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"analytics_ignore_filetype": {
"type": "integer",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"av_virus_log": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"external_blocklist_archive_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": False,
"v7.0.0": True
}
},
"replacemsg_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbreak_prevention_archive_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"analytics_bl_filetype": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"analytics_accept_filetype": {
"type": "integer",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"ftp": {
"type": "dict",
"children": {
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "files",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "full-archive",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"analytics_max_upload": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"mapi": {
"type": "dict",
"children": {
"executables": {
"type": "string",
"options": [
{
"value": "default",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "virus",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "files",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "full-archive",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"extended_log": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"content_disarm": {
"type": "dict",
"children": {
"office_action": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"pdf_act_launch": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"office_dde": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"office_hylink": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pdf_embedfile": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"office_macro": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"cover_page": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"office_linked": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pdf_javacode": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pdf_hyperlink": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"detect_only": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pdf_act_gotor": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pdf_act_form": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"original_file_destination": {
"type": "string",
"options": [
{
"value": "fortisandbox",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "discard",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"office_embed": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pdf_act_sound": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pdf_act_java": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pdf_act_movie": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"error_action": {
"type": "string",
"options": [
{
"value": "block",
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True
}
},
{
"value": "log-only",
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True
}
},
{
"value": "ignore",
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True
}
}
],
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": False
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"nntp": {
"type": "dict",
"children": {
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "files",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "full-archive",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"smb": {
"type": "dict",
"children": {
"archive_block": {
"type": "string",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
"archive_log": {
"type": "string",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "files",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "full-archive",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
"options": {
"type": "string",
"options": [
{
"value": "scan",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"analytics_wl_filetype": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"http": {
"type": "dict",
"children": {
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"content_disarm": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "files",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "full-archive",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"cifs": {
"type": "dict",
"children": {
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "corrupted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "multipart",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "nested",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "mailbomb",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "fileslimit",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "timeout",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "unhandled",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "corrupted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "multipart",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "nested",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "mailbomb",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "fileslimit",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "timeout",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "unhandled",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "files",
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "full-archive",
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"nac_quar": {
"type": "dict",
"children": {
"infected": {
"type": "string",
"options": [
{
"value": "none",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quar-src-ip",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"log": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"expiry": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ems_threat_feed": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"ssh": {
"type": "dict",
"children": {
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "corrupted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "multipart",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "nested",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "mailbomb",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "fileslimit",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "timeout",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "unhandled",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "corrupted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "multipart",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "nested",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "mailbomb",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "fileslimit",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "timeout",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "unhandled",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "files",
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "full-archive",
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"av_block_log": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"imap": {
"type": "dict",
"children": {
"executables": {
"type": "string",
"options": [
{
"value": "default",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "virus",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"content_disarm": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "files",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "full-archive",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"pop3": {
"type": "dict",
"children": {
"executables": {
"type": "string",
"options": [
{
"value": "default",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "virus",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"av_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"quarantine": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"content_disarm": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"emulator": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_block": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"archive_log": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "encrypted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "corrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "partiallycorrupted",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "multipart",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "nested",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "mailbomb",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fileslimit",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "timeout",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "unhandled",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"outbreak_prevention": {
"type": "string",
"options": [
{
"value": "disabled",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "files",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "full-archive",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"options": {
"multiple_values": True,
"type": "list",
"options": [
{
"value": "scan",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "avmonitor",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "quarantine",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fortiai": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True
}
},
{
"value": "block",
"revisions": {
"v7.0.1": True
}
},
{
"value": "monitor",
"revisions": {
"v7.0.1": True
}
}
],
"revisions": {
"v7.0.1": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"external_blocklist": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
},
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"ftgd_analytics": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "suspicious",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "everything",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"scan_mode": {
"type": "string",
"options": [
{
"value": "quick",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
{
"value": "full",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
{
"value": "default",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "legacy",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"mobile_malware_db": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"external_blocklist_enable_all": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"outbreak_prevention": {
"type": "dict",
"children": {
"external_blocklist": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ftgd_service": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v7.0.1": False,
"v7.0.0": False,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"inspection_mode": {
"type": "string",
"options": [
{
"value": "proxy",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "flow-based",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"antivirus_profile": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["antivirus_profile"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["antivirus_profile"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "antivirus_profile")
is_error, has_changed, result = fortios_antivirus(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 39.686686
| 144
| 0.202505
| 27,020
| 408,376
| 3.039822
| 0.020207
| 0.259326
| 0.14045
| 0.051232
| 0.881051
| 0.867196
| 0.853499
| 0.849067
| 0.84634
| 0.845038
| 0
| 0.125961
| 0.691385
| 408,376
| 10,289
| 145
| 39.690543
| 0.525752
| 0.001621
| 0
| 0.782243
| 0
| 0.000196
| 0.256142
| 0.002276
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000782
| false
| 0
| 0.000782
| 0.000098
| 0.002347
| 0.000098
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ed455e7eb0000793f0e261604c944bf879719a0f
| 74,960
|
py
|
Python
|
nipyapi/nifi/apis/versions_api.py
|
Riduidel/nipyapi
|
6f1c0cc12b712ce2b23b94d3df17fde4c2cc63c1
|
[
"Apache-2.0"
] | 3
|
2019-10-11T02:58:04.000Z
|
2022-02-26T06:48:24.000Z
|
nipyapi/nifi/apis/versions_api.py
|
Riduidel/nipyapi
|
6f1c0cc12b712ce2b23b94d3df17fde4c2cc63c1
|
[
"Apache-2.0"
] | 2
|
2021-03-09T19:35:35.000Z
|
2021-05-10T16:46:23.000Z
|
nipyapi/nifi/apis/versions_api.py
|
Riduidel/nipyapi
|
6f1c0cc12b712ce2b23b94d3df17fde4c2cc63c1
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.9.1
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class VersionsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_version_control_request(self, body, **kwargs):
"""
Create a version control request
Creates a request so that a Process Group can be placed under Version Control or have its Version Control configuration changed. Creating this request will prevent any other threads from simultaneously saving local changes to Version Control. It will not, however, actually save the local flow to the Flow Registry. A POST to /versions/process-groups/{id} should be used to initiate saving of the local flow to the Flow Registry. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_version_control_request(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CreateActiveRequestEntity body: The versioned flow details. (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_version_control_request_with_http_info(body, **kwargs)
else:
(data) = self.create_version_control_request_with_http_info(body, **kwargs)
return data
def create_version_control_request_with_http_info(self, body, **kwargs):
"""
Create a version control request
Creates a request so that a Process Group can be placed under Version Control or have its Version Control configuration changed. Creating this request will prevent any other threads from simultaneously saving local changes to Version Control. It will not, however, actually save the local flow to the Flow Registry. A POST to /versions/process-groups/{id} should be used to initiate saving of the local flow to the Flow Registry. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_version_control_request_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CreateActiveRequestEntity body: The versioned flow details. (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_version_control_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_version_control_request`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/active-requests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_revert_request(self, id, **kwargs):
"""
Deletes the Revert Request with the given ID
Deletes the Revert Request with the given ID. After a request is created via a POST to /versions/revert-requests/process-groups/{id}, it is expected that the client will properly clean up the request by DELETE'ing it, once the Revert process has completed. If the request is deleted before the request completes, then the Revert request will finish the step that it is currently performing and then will cancel any subsequent steps. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_revert_request(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Revert Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_revert_request_with_http_info(id, **kwargs)
else:
(data) = self.delete_revert_request_with_http_info(id, **kwargs)
return data
def delete_revert_request_with_http_info(self, id, **kwargs):
"""
Deletes the Revert Request with the given ID
Deletes the Revert Request with the given ID. After a request is created via a POST to /versions/revert-requests/process-groups/{id}, it is expected that the client will properly clean up the request by DELETE'ing it, once the Revert process has completed. If the request is deleted before the request completes, then the Revert request will finish the step that it is currently performing and then will cancel any subsequent steps. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_revert_request_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Revert Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_revert_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_revert_request`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/revert-requests/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_update_request(self, id, **kwargs):
"""
Deletes the Update Request with the given ID
Deletes the Update Request with the given ID. After a request is created via a POST to /versions/update-requests/process-groups/{id}, it is expected that the client will properly clean up the request by DELETE'ing it, once the Update process has completed. If the request is deleted before the request completes, then the Update request will finish the step that it is currently performing and then will cancel any subsequent steps. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_update_request(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_update_request_with_http_info(id, **kwargs)
else:
(data) = self.delete_update_request_with_http_info(id, **kwargs)
return data
def delete_update_request_with_http_info(self, id, **kwargs):
"""
Deletes the Update Request with the given ID
Deletes the Update Request with the given ID. After a request is created via a POST to /versions/update-requests/process-groups/{id}, it is expected that the client will properly clean up the request by DELETE'ing it, once the Update process has completed. If the request is deleted before the request completes, then the Update request will finish the step that it is currently performing and then will cancel any subsequent steps. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_update_request_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_update_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_update_request`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/update-requests/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_version_control_request(self, id, **kwargs):
"""
Deletes the version control request with the given ID
Deletes the Version Control Request with the given ID. This will allow other threads to save flows to the Flow Registry. See also the documentation for POSTing to /versions/active-requests for information regarding why this is done. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_version_control_request(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The request ID. (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_version_control_request_with_http_info(id, **kwargs)
else:
(data) = self.delete_version_control_request_with_http_info(id, **kwargs)
return data
def delete_version_control_request_with_http_info(self, id, **kwargs):
"""
Deletes the version control request with the given ID
Deletes the Version Control Request with the given ID. This will allow other threads to save flows to the Flow Registry. See also the documentation for POSTing to /versions/active-requests for information regarding why this is done. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_version_control_request_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The request ID. (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_version_control_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_version_control_request`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/active-requests/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_revert_request(self, id, **kwargs):
"""
Returns the Revert Request with the given ID
Returns the Revert Request with the given ID. Once a Revert Request has been created by performing a POST to /versions/revert-requests/process-groups/{id}, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revert_request(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Revert Request (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_revert_request_with_http_info(id, **kwargs)
else:
(data) = self.get_revert_request_with_http_info(id, **kwargs)
return data
def get_revert_request_with_http_info(self, id, **kwargs):
"""
Returns the Revert Request with the given ID
Returns the Revert Request with the given ID. Once a Revert Request has been created by performing a POST to /versions/revert-requests/process-groups/{id}, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revert_request_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Revert Request (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_revert_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_revert_request`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/revert-requests/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_update_request(self, id, **kwargs):
"""
Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /versions/update-requests/process-groups/{id}, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_update_request(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Update Request (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_update_request_with_http_info(id, **kwargs)
else:
(data) = self.get_update_request_with_http_info(id, **kwargs)
return data
def get_update_request_with_http_info(self, id, **kwargs):
"""
Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /versions/update-requests/process-groups/{id}, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_update_request_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Update Request (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_update_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_update_request`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/update-requests/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_version_information(self, id, **kwargs):
"""
Gets the Version Control information for a process group
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_version_information(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_version_information_with_http_info(id, **kwargs)
else:
(data) = self.get_version_information_with_http_info(id, **kwargs)
return data
def get_version_information_with_http_info(self, id, **kwargs):
"""
Gets the Version Control information for a process group
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_version_information_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_version_information" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_version_information`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/process-groups/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionControlInformationEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def initiate_revert_flow_version(self, id, body, **kwargs):
"""
Initiate the Revert Request of a Process Group with the given ID
For a Process Group that is already under Version Control, this will initiate the action of reverting any local changes that have been made to the Process Group since it was last synchronized with the Flow Registry. This will result in the flow matching the Versioned Flow that exists in the Flow Registry. This can be a lengthy process, as it will stop any Processors and disable any Controller Services necessary to perform the action and then restart them. As a result, the endpoint will immediately return a VersionedFlowUpdateRequestEntity, and the process of updating the flow will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /versions/revert-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /versions/revert-requests/{requestId}. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.initiate_revert_flow_version(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param VersionControlInformationEntity body: The controller service configuration details. (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.initiate_revert_flow_version_with_http_info(id, body, **kwargs)
else:
(data) = self.initiate_revert_flow_version_with_http_info(id, body, **kwargs)
return data
def initiate_revert_flow_version_with_http_info(self, id, body, **kwargs):
"""
Initiate the Revert Request of a Process Group with the given ID
For a Process Group that is already under Version Control, this will initiate the action of reverting any local changes that have been made to the Process Group since it was last synchronized with the Flow Registry. This will result in the flow matching the Versioned Flow that exists in the Flow Registry. This can be a lengthy process, as it will stop any Processors and disable any Controller Services necessary to perform the action and then restart them. As a result, the endpoint will immediately return a VersionedFlowUpdateRequestEntity, and the process of updating the flow will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /versions/revert-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /versions/revert-requests/{requestId}. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.initiate_revert_flow_version_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param VersionControlInformationEntity body: The controller service configuration details. (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method initiate_revert_flow_version" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `initiate_revert_flow_version`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `initiate_revert_flow_version`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/revert-requests/process-groups/{id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def initiate_version_control_update(self, id, body, **kwargs):
"""
Initiate the Update Request of a Process Group with the given ID
For a Process Group that is already under Version Control, this will initiate the action of changing from a specific version of the flow in the Flow Registry to a different version of the flow. This can be a lengthy process, as it will stop any Processors and disable any Controller Services necessary to perform the action and then restart them. As a result, the endpoint will immediately return a VersionedFlowUpdateRequestEntity, and the process of updating the flow will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /versions/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /versions/update-requests/{requestId}. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.initiate_version_control_update(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param VersionControlInformationEntity body: The controller service configuration details. (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.initiate_version_control_update_with_http_info(id, body, **kwargs)
else:
(data) = self.initiate_version_control_update_with_http_info(id, body, **kwargs)
return data
def initiate_version_control_update_with_http_info(self, id, body, **kwargs):
"""
Initiate the Update Request of a Process Group with the given ID
For a Process Group that is already under Version Control, this will initiate the action of changing from a specific version of the flow in the Flow Registry to a different version of the flow. This can be a lengthy process, as it will stop any Processors and disable any Controller Services necessary to perform the action and then restart them. As a result, the endpoint will immediately return a VersionedFlowUpdateRequestEntity, and the process of updating the flow will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /versions/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /versions/update-requests/{requestId}. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.initiate_version_control_update_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param VersionControlInformationEntity body: The controller service configuration details. (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method initiate_version_control_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `initiate_version_control_update`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `initiate_version_control_update`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/update-requests/process-groups/{id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_to_flow_registry(self, id, body, **kwargs):
"""
Save the Process Group with the given ID
Begins version controlling the Process Group with the given ID or commits changes to the Versioned Flow, depending on if the provided VersionControlInformation includes a flowId. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.save_to_flow_registry(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param StartVersionControlRequestEntity body: The versioned flow details. (required)
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.save_to_flow_registry_with_http_info(id, body, **kwargs)
else:
(data) = self.save_to_flow_registry_with_http_info(id, body, **kwargs)
return data
def save_to_flow_registry_with_http_info(self, id, body, **kwargs):
"""
Save the Process Group with the given ID
Begins version controlling the Process Group with the given ID or commits changes to the Versioned Flow, depending on if the provided VersionControlInformation includes a flowId. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.save_to_flow_registry_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param StartVersionControlRequestEntity body: The versioned flow details. (required)
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_to_flow_registry" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `save_to_flow_registry`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `save_to_flow_registry`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/process-groups/{id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionControlInformationEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def stop_version_control(self, id, **kwargs):
"""
Stops version controlling the Process Group with the given ID
Stops version controlling the Process Group with the given ID. The Process Group will no longer track to any Versioned Flow. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.stop_version_control(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param str version: The version is used to verify the client is working with the latest version of the flow.
:param str client_id: If the client id is not specified, a new one will be generated. This value (whether specified or generated) is included in the response.
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.stop_version_control_with_http_info(id, **kwargs)
else:
(data) = self.stop_version_control_with_http_info(id, **kwargs)
return data
def stop_version_control_with_http_info(self, id, **kwargs):
"""
Stops version controlling the Process Group with the given ID
Stops version controlling the Process Group with the given ID. The Process Group will no longer track to any Versioned Flow. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.stop_version_control_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param str version: The version is used to verify the client is working with the latest version of the flow.
:param str client_id: If the client id is not specified, a new one will be generated. This value (whether specified or generated) is included in the response.
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'version', 'client_id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method stop_version_control" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `stop_version_control`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'version' in params:
query_params.append(('version', params['version']))
if 'client_id' in params:
query_params.append(('clientId', params['client_id']))
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/process-groups/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionControlInformationEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_flow_version(self, id, body, **kwargs):
"""
Update the version of a Process Group with the given ID
For a Process Group that is already under Version Control, this will update the version of the flow to a different version. This endpoint expects that the given snapshot will not modify any Processor that is currently running or any Controller Service that is enabled. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_flow_version(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param VersionedFlowSnapshotEntity body: The controller service configuration details. (required)
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_flow_version_with_http_info(id, body, **kwargs)
else:
(data) = self.update_flow_version_with_http_info(id, body, **kwargs)
return data
def update_flow_version_with_http_info(self, id, body, **kwargs):
"""
Update the version of a Process Group with the given ID
For a Process Group that is already under Version Control, this will update the version of the flow to a different version. This endpoint expects that the given snapshot will not modify any Processor that is currently running or any Controller Service that is enabled. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_flow_version_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param VersionedFlowSnapshotEntity body: The controller service configuration details. (required)
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_flow_version" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_flow_version`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_flow_version`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/process-groups/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionControlInformationEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_version_control_request(self, id, body, **kwargs):
"""
Updates the request with the given ID
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_version_control_request(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The request ID. (required)
:param VersionControlComponentMappingEntity body: The version control component mapping. (required)
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_version_control_request_with_http_info(id, body, **kwargs)
else:
(data) = self.update_version_control_request_with_http_info(id, body, **kwargs)
return data
def update_version_control_request_with_http_info(self, id, body, **kwargs):
"""
Updates the request with the given ID
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_version_control_request_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The request ID. (required)
:param VersionControlComponentMappingEntity body: The version control component mapping. (required)
:return: VersionControlInformationEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_version_control_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_version_control_request`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_version_control_request`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/versions/active-requests/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionControlInformationEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 50.682894
| 963
| 0.610085
| 8,305
| 74,960
| 5.333654
| 0.042023
| 0.046957
| 0.016435
| 0.021131
| 0.977289
| 0.9716
| 0.966498
| 0.961554
| 0.959206
| 0.951057
| 0
| 0.000117
| 0.318477
| 74,960
| 1,478
| 964
| 50.717185
| 0.866952
| 0.434498
| 0
| 0.816298
| 0
| 0
| 0.175463
| 0.07146
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037293
| false
| 0
| 0.009669
| 0
| 0.10221
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed745c59217b0a83a0edf1a081b10e4c27af4ea4
| 11,786
|
py
|
Python
|
tests/unit/more/centos/database/test_mysql.py
|
timgates42/provy
|
ca3d5e96a2210daf3c1fd4b96e047efff152db14
|
[
"MIT"
] | 15
|
2015-01-28T15:49:28.000Z
|
2021-09-02T18:49:46.000Z
|
tests/unit/more/centos/database/test_mysql.py
|
timgates42/provy
|
ca3d5e96a2210daf3c1fd4b96e047efff152db14
|
[
"MIT"
] | null | null | null |
tests/unit/more/centos/database/test_mysql.py
|
timgates42/provy
|
ca3d5e96a2210daf3c1fd4b96e047efff152db14
|
[
"MIT"
] | 3
|
2016-12-05T07:08:11.000Z
|
2021-12-26T04:31:05.000Z
|
from mock import call, patch
from nose.tools import istest
from .fixtures import (
FOO_DB_WITH_JOHN_GRANTS,
FOO_DB_WITHOUT_JOHN_GRANTS,
FOO_DB_WITH_JOHN_GRANTS_AND_GRANT_OPTION,
HOSTS_FOR_USER,
DATABASES,
)
from provy.more.centos import YumRole, MySQLRole
from tests.unit.tools.helpers import ProvyTestCase
class MySQLRoleTest(ProvyTestCase):
def setUp(self):
super(MySQLRoleTest, self).setUp()
self.role = MySQLRole(prov=None, context={})
@istest
def has_no_grant_if_not_granted(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITHOUT_JOHN_GRANTS
self.assertFalse(self.role.has_grant('ALL', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('ALL', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_with_grant_option(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS_AND_GRANT_OPTION
self.assertTrue(self.role.has_grant('ALL', 'foo', 'john', '%', True))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_even_if_provided_full(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('ALL PRIVILEGES', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def has_grant_if_granted_even_if_provided_as_lowercase_string(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITH_JOHN_GRANTS
self.assertTrue(self.role.has_grant('all', 'foo', 'john', '%', False))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def can_get_user_grants(self):
with self.execute_mock() as execute:
execute.return_value = FOO_DB_WITHOUT_JOHN_GRANTS
expected = ["GRANT USAGE ON *.* TO 'john'@'%' IDENTIFIED BY PASSWORD '*B9EE00DF55E7C816911C6DA56F1E3A37BDB31093'"]
self.assertEqual(expected, self.role.get_user_grants('john', '%'))
execute.assert_called_with('''mysql -u root -E -e "SHOW GRANTS FOR 'john'@'%';" mysql''', sudo=True, stdout=False)
@istest
def installs_necessary_packages_to_provision(self):
with self.using_stub(YumRole) as mock_yum, self.execute_mock() as execute:
mock_yum.ensure_package_installed.return_value = 'some result'
self.role.provision()
self.assertEqual(execute.mock_calls, [
call("mysqladmin -u %s -p'temppass' password '%s'" % (self.role.mysql_root_user, self.role.mysql_root_pass),
stdout=False, sudo=True),
])
self.assertEqual(mock_yum.ensure_package_installed.mock_calls, [
call('mysql-server'),
call('mysql-devel'),
call('mysql-libs'),
])
@istest
def installs_necessary_packages_to_provision_again(self):
with self.using_stub(YumRole) as mock_yum, self.execute_mock() as execute:
mock_yum.ensure_package_installed.return_value = False
self.role.provision()
self.assertFalse(execute.called)
self.assertEqual(mock_yum.ensure_package_installed.mock_calls, [
call('mysql-server'),
call('mysql-devel'),
call('mysql-libs'),
])
@istest
def gets_user_hosts(self):
with self.execute_mock() as execute:
execute.return_value = HOSTS_FOR_USER
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [
'127.0.0.1',
'::1',
'my-desktop',
'localhost',
])
execute.assert_called_with('''mysql -u root -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def gets_user_hosts_using_password(self):
with self.execute_mock() as execute:
execute.return_value = HOSTS_FOR_USER
self.role.mysql_root_pass = 'mypass'
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [
'127.0.0.1',
'::1',
'my-desktop',
'localhost',
])
execute.assert_called_with('''mysql -u root --password="mypass" -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def gets_empty_user_hosts(self):
with self.execute_mock() as execute:
execute.return_value = ''
hosts = self.role.get_user_hosts('root')
self.assertEqual(hosts, [])
execute.assert_called_with('''mysql -u root -E -e "select Host from mysql.user where LOWER(User)='root'" mysql''',
sudo=True, stdout=False)
@istest
def checks_that_a_user_exists(self):
with patch.object(self.role, 'get_user_hosts') as get_user_hosts:
get_user_hosts.return_value = ['localhost']
self.assertTrue(self.role.user_exists('johndoe', 'localhost'))
get_user_hosts.assert_called_with('johndoe')
@istest
def checks_that_a_user_doesnt_exist(self):
with patch.object(self.role, 'get_user_hosts') as get_user_hosts:
get_user_hosts.return_value = ['localhost']
self.assertFalse(self.role.user_exists('johndoe', 'somewhere-else'))
get_user_hosts.assert_called_with('johndoe')
@istest
def creates_a_user_if_it_doesnt_exist_yet(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = False
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertTrue(result)
execute.assert_called_with("""mysql -u root -e "CREATE USER 'johndoe'@'localhost' IDENTIFIED BY 'mypass';" mysql""", sudo=True, stdout=False)
@istest
def doesnt_create_user_if_it_already_exists(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = True
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertFalse(result)
self.assertFalse(execute.called)
@istest
def creates_a_user_with_mysql_password(self):
with patch.object(self.role, 'user_exists') as user_exists, self.execute_mock() as execute:
user_exists.return_value = False
self.role.mysql_root_pass = 'otherpass'
result = self.role.ensure_user('johndoe', 'mypass', 'localhost')
self.assertTrue(result)
execute.assert_called_with("""mysql -u root --password="otherpass" -e "CREATE USER 'johndoe'@'localhost' IDENTIFIED BY 'mypass';" mysql""",
sudo=True, stdout=False)
@istest
def checks_that_a_database_is_present(self):
with self.execute_mock() as execute:
execute.return_value = DATABASES
result = self.role.is_database_present('performance_schema')
self.assertTrue(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def checks_that_a_database_is_not_present(self):
with self.execute_mock() as execute:
execute.return_value = DATABASES
result = self.role.is_database_present('bad_bad_database')
self.assertFalse(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def checks_that_a_database_is_not_present_when_there_is_none(self):
with self.execute_mock() as execute:
execute.return_value = ''
result = self.role.is_database_present('performance_schema')
self.assertFalse(result)
execute.assert_called_with('mysql -u root -E -e "SHOW DATABASES" mysql', stdout=False, sudo=True)
@istest
def creates_a_database_if_it_doesnt_exist_yet(self):
with patch.object(self.role, 'is_database_present') as is_database_present, self.execute_mock() as execute:
is_database_present.return_value = False
result = self.role.ensure_database('my_data')
self.assertTrue(result)
execute.assert_called_with('mysql -u root -e "CREATE DATABASE my_data" mysql', sudo=True, stdout=False)
@istest
def doesnt_create_a_database_if_it_already_exists(self):
with patch.object(self.role, 'is_database_present') as is_database_present, self.execute_mock() as execute:
is_database_present.return_value = True
result = self.role.ensure_database('my_data')
self.assertFalse(result)
self.assertFalse(execute.called)
@istest
def grants_privilege_if_not_granted_yet(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=False)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.* TO 'john'@'%'" mysql''', stdout=False, sudo=True)
@istest
def grants_privilege_if_not_granted_yet_for_table(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo.bar', username='john', login_from='%', with_grant_option=False)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.bar TO 'john'@'%'" mysql''', stdout=False, sudo=True)
@istest
def grants_privilege_with_grant_option_if_not_granted_yet(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = False
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=True)
self.assertTrue(result)
execute.assert_called_with('''mysql -u root -e "GRANT ALL PRIVILEGES ON foo.* TO 'john'@'%' WITH GRANT OPTION" mysql''', stdout=False, sudo=True)
@istest
def doesnt_grant_privilege_if_already_granted(self):
with patch.object(self.role, 'has_grant') as has_grant, self.execute_mock() as execute:
has_grant.return_value = True
result = self.role.ensure_grant('ALL PRIVILEGES', on='foo', username='john', login_from='%', with_grant_option=True)
self.assertFalse(result)
self.assertFalse(execute.called)
| 42.548736
| 157
| 0.641354
| 1,479
| 11,786
| 4.841109
| 0.098715
| 0.04581
| 0.048184
| 0.054609
| 0.867598
| 0.844972
| 0.835056
| 0.809497
| 0.787011
| 0.729469
| 0
| 0.004261
| 0.243255
| 11,786
| 276
| 158
| 42.702899
| 0.79852
| 0
| 0
| 0.657143
| 0
| 0.02381
| 0.168166
| 0.009079
| 0
| 0
| 0
| 0
| 0.238095
| 1
| 0.12381
| false
| 0.057143
| 0.02381
| 0
| 0.152381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ed7e29341e99eedfcc00beabbeb5f196e8115d38
| 38
|
py
|
Python
|
simpleui/__init__.py
|
a371057600/simpleui
|
cb0261c66254d211b3103e68075c607320af7d50
|
[
"MIT"
] | 1
|
2019-06-17T05:13:13.000Z
|
2019-06-17T05:13:13.000Z
|
simpleui/__init__.py
|
a371057600/simpleui
|
cb0261c66254d211b3103e68075c607320af7d50
|
[
"MIT"
] | null | null | null |
simpleui/__init__.py
|
a371057600/simpleui
|
cb0261c66254d211b3103e68075c607320af7d50
|
[
"MIT"
] | null | null | null |
def get_version():
return '2.1.3'
| 12.666667
| 18
| 0.605263
| 7
| 38
| 3.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.210526
| 38
| 2
| 19
| 19
| 0.633333
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
9c1f6b3983fba166b4978449ec6b12ac933fc0d7
| 142
|
py
|
Python
|
addons/legrand/models/models.py
|
csokt/odoo8
|
8994f53bf4ee4ad778d76015b8457d4a1224c7a4
|
[
"MIT"
] | null | null | null |
addons/legrand/models/models.py
|
csokt/odoo8
|
8994f53bf4ee4ad778d76015b8457d4a1224c7a4
|
[
"MIT"
] | null | null | null |
addons/legrand/models/models.py
|
csokt/odoo8
|
8994f53bf4ee4ad778d76015b8457d4a1224c7a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# from odoo import tools, models, fields, api, exceptions
from openerp import tools, models, fields, api, exceptions
| 28.4
| 58
| 0.711268
| 19
| 142
| 5.315789
| 0.631579
| 0.217822
| 0.336634
| 0.455446
| 0.712871
| 0.712871
| 0
| 0
| 0
| 0
| 0
| 0.008403
| 0.161972
| 142
| 4
| 59
| 35.5
| 0.840336
| 0.542254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
9c2a266756fa4a0ab63e8aa2732786e83f364edf
| 206
|
py
|
Python
|
Day-6/Reeborg_World/square.py
|
MihirMore/100daysofcode-Python
|
947d91842639c04ee7d23cc82bf04053d3982a85
|
[
"MIT"
] | 4
|
2021-04-09T20:01:22.000Z
|
2022-03-18T20:49:58.000Z
|
Day-6/Reeborg_World/square.py
|
MihirMore/100daysofcode-Python
|
947d91842639c04ee7d23cc82bf04053d3982a85
|
[
"MIT"
] | null | null | null |
Day-6/Reeborg_World/square.py
|
MihirMore/100daysofcode-Python
|
947d91842639c04ee7d23cc82bf04053d3982a85
|
[
"MIT"
] | null | null | null |
#
# def turn_right():
# turn_left()
# turn_left()
# turn_left()
# turn_left()
# move()
# move()
# turn_right()
# move()
# move()
# turn_right()
# move()
# move()
# turn_right()
# move()
# move()
| 12.117647
| 19
| 0.538835
| 25
| 206
| 4.12
| 0.2
| 0.349515
| 0.349515
| 0.466019
| 0.883495
| 0.883495
| 0.572816
| 0.572816
| 0.572816
| 0.572816
| 0
| 0
| 0.218447
| 206
| 17
| 20
| 12.117647
| 0.639752
| 0.834951
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9c5f880d0a2a3713736e8469f1b732efc4da512b
| 6,475
|
py
|
Python
|
regexlib/2021-5-15/python_re_test_file/regexlib_3833.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | 1
|
2022-01-24T14:43:23.000Z
|
2022-01-24T14:43:23.000Z
|
regexlib/2021-5-15/python_re_test_file/regexlib_3833.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | null | null | null |
regexlib/2021-5-15/python_re_test_file/regexlib_3833.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | null | null | null |
# 3833
# ^(?<address1>(?>\d{1,6}(?>\ 1\/[234])?( (N(orth)?|S(outh)?)? ?(E(ast)?|W(est)?))?((?> \d+ ?(th|rd|st|nd))|(?> [A-Z](?>[a-z])+)+) (?>(?i)THROUGHWAY|TRAFFICWAY|CROSSROADS|EXPRESSWAY|BOULEVARD|CROSSROAD|EXTENSION|JUNCTIONS|MOUNTAINS|STRAVENUE|UNDERPASS|CAUSEWAY|CRESCENT|CROSSING|JUNCTION|MOTORWAY|MOUNTAIN|OVERPASS|PARKWAYS|TURNPIKE|VILLIAGE|VILLAGES|CENTERS|CIRCLES|COMMONS|CORNERS|ESTATES|EXPRESS|FORESTS|FREEWAY|GARDENS|GATEWAY|HARBORS|HIGHWAY|HOLLOWS|ISLANDS|JUNCTON|LANDING|MEADOWS|MOUNTIN|ORCHARD|PARKWAY|PASSAGE|PRAIRIE|RANCHES|SPRINGS|SQUARES|STATION|STRAVEN|STRVNUE|STREETS|TERRACE|TRAILER|TUNNELS|VALLEYS|VIADUCT|VILLAGE|ALLEE|ARCADE|AVENUE|BLUFFS|BOTTOM|BRANCH|BRIDGE|BROOKS|BYPASS|CANYON|CAUSWA|CENTER|CENTRE|CIRCLE|CLIFFS|COMMON|CORNER|COURSE|COURTS|CRSENT|CRSSNG|DIVIDE|DRIVES|ESTATE|EXTNSN|FIELDS|FOREST|FORGES|FREEWY|GARDEN|GATEWY|GATWAY|GREENS|GROVES|HARBOR|HIGHWY|HOLLOW|ISLAND|ISLNDS|JCTION|JUNCTN|KNOLLS|LIGHTS|MANORS|MEADOW|MEDOWS|MNTAIN|ORCHRD|PARKWY|PLAINS|POINTS|RADIAL|RADIEL|RAPIDS|RIDGES|SHOALS|SHOARS|SHORES|SKYWAY|SPRING|SPRNGS|SQUARE|STRAVN|STREAM|STREME|STREET|SUMITT|SUMMIT|TRACES|TRACKS|TRAILS|TUNNEL|TURNPK|UNIONS|VALLEY|VIADCT|VILLAG|ALLEE|ALLEY|ANNEX|AVENU|AVNUE|BAYOO|BAYOU|BEACH|BLUFF|BOTTM|BOULV|BRNCH|BRDGE|BROOK|BURGS|BYPAS|CANYN|CENTR|CNTER|CIRCL|CRCLE|CLIFF|COURT|COVES|CREEK|CRSNT|CREST|CURVE|DRIVE|FALLS|FERRY|FIELD|FLATS|FORDS|FORGE|FORKS|FRWAY|GARDN|GRDEN|GRDNS|GTWAY|GLENS|GREEN|GROVE|HARBR|HRBOR|HAVEN|HIWAY|HILLS|HOLWS|ISLND|ISLES|JCTNS|KNOLL|LAKES|LNDNG|LIGHT|LOCKS|LODGE|LOOPS|MANOR|MILLS|MISSN|MOUNT|MNTNS|PARKS|PKWAY|PKWYS|PATHS|PIKES|PINES|PLAIN|PLAZA|POINT|PORTS|RANCH|RNCHS|RAPID|RIDGE|RIVER|ROADS|ROUTE|SHOAL|SHOAR|SHORE|SPRNG|SPNGS|SPURS|STATN|STRAV|STRVN|SUMIT|TRACE|TRACK|TRAIL|TRLRS|TUNEL|TUNLS|TUNNL|TRNPK|UNION|VALLY|VIEWS|VILLG|VILLE|VISTA|WALKS|WELLS|ALLY|ANEX|ANNX|AVEN|BEND|BLUF|BLVD|BOUL|BURG|BYPA|BYPS|CAMP|CNYN|CAPE|CSWY|CENT|CNTR|CIRC|CRCL|CLFS|CLUB|CORS|CRSE|COVE|CRES|XING|DALE|DRIV|ESTS|EXPR|EXPW|EXPY|EXTN|EXTS|FALL|FRRY|FLDS|FLAT|FLTS|FORD|FRST|FORG|FORK|FRKS|FORT|FRWY|GRDN|GDNS|GTWY|GLEN|GROV|HARB|HIWY|HWAY|HILL|HLLW|HOLW|INLT|ISLE|JCTN|JCTS|KEYS|KNOL|KNLS|LAKE|LAND|LNDG|LANE|LOAF|LOCK|LCKS|LDGE|LODG|LOOP|MALL|MNRS|MDWS|MEWS|MILL|MSSN|MNTN|MTIN|NECK|ORCH|OVAL|PARK|PKWY|PASS|PATH|PIKE|PINE|PNES|PLNS|PLZA|PORT|PRTS|RADL|RAMP|RNCH|RPDS|REST|RDGE|RDGS|RIVR|ROAD|SHLS|SHRS|SPNG|SPGS|SPUR|SQRE|SQRS|STRA|STRM|STRT|TERR|TRCE|TRAK|TRKS|TRLS|TRLR|TUNL|VLLY|VLYS|VDCT|VIEW|VILL|VLGS|VIST|VSTA|WALK|WALL|WAYS|WELL|ALY|ANX|ARC|AVE|AVN|BCH|BND|BLF|BOT|BTM|BRG|BRK|BYP|CMP|CPE|CEN|CTR|CIR|CLF|CLB|COR|CTS|CRK|DAM|DIV|DVD|DRV|EST|EXP|EXT|FLS|FRY|FLD|FLT|FRD|FRG|FRK|FRT|FWY|GLN|GRN|GRV|HBR|HVN|HTS|HWY|HLS|ISS|JCT|KEY|KYS|KNL|LKS|LGT|LCK|LDG|MNR|MDW|MNT|MTN|NCK|OVL|PRK|PKY|PLN|PLZ|PTS|PRT|PRR|RAD|RPD|RST|RDG|RIV|RVR|RDS|ROW|RUE|RUN|SHL|SHR|SPG|SQR|SQU|STA|STN|STR|SMT|TER|TRK|TRL|VLY|VIA|VWS|VLG|VIS|VST|WAY|WLS|AV|BR|CP|CT|CV|DL|DM|DV|DR|FT|HT|HL|IS|KY|LK|LN|LF|MT|PL|PT|PR|RD|SQ|ST|UN|VW|VL|WY))( (N(orth)?|S(outh)?)? ?(E(ast)?|W(est)?)?)?)$
# POLYNOMIAL
# nums:4
# POLYNOMIAL AttackString:"1"+" East"*80000+"! _1_POA(i)"
import re
from time import perf_counter
regex = """^(?<address1>(?>\d{1,6}(?>\ 1\/[234])?( (N(orth)?|S(outh)?)? ?(E(ast)?|W(est)?))?((?> \d+ ?(th|rd|st|nd))|(?> [A-Z](?>[a-z])+)+) (?>(?i)THROUGHWAY|TRAFFICWAY|CROSSROADS|EXPRESSWAY|BOULEVARD|CROSSROAD|EXTENSION|JUNCTIONS|MOUNTAINS|STRAVENUE|UNDERPASS|CAUSEWAY|CRESCENT|CROSSING|JUNCTION|MOTORWAY|MOUNTAIN|OVERPASS|PARKWAYS|TURNPIKE|VILLIAGE|VILLAGES|CENTERS|CIRCLES|COMMONS|CORNERS|ESTATES|EXPRESS|FORESTS|FREEWAY|GARDENS|GATEWAY|HARBORS|HIGHWAY|HOLLOWS|ISLANDS|JUNCTON|LANDING|MEADOWS|MOUNTIN|ORCHARD|PARKWAY|PASSAGE|PRAIRIE|RANCHES|SPRINGS|SQUARES|STATION|STRAVEN|STRVNUE|STREETS|TERRACE|TRAILER|TUNNELS|VALLEYS|VIADUCT|VILLAGE|ALLEE|ARCADE|AVENUE|BLUFFS|BOTTOM|BRANCH|BRIDGE|BROOKS|BYPASS|CANYON|CAUSWA|CENTER|CENTRE|CIRCLE|CLIFFS|COMMON|CORNER|COURSE|COURTS|CRSENT|CRSSNG|DIVIDE|DRIVES|ESTATE|EXTNSN|FIELDS|FOREST|FORGES|FREEWY|GARDEN|GATEWY|GATWAY|GREENS|GROVES|HARBOR|HIGHWY|HOLLOW|ISLAND|ISLNDS|JCTION|JUNCTN|KNOLLS|LIGHTS|MANORS|MEADOW|MEDOWS|MNTAIN|ORCHRD|PARKWY|PLAINS|POINTS|RADIAL|RADIEL|RAPIDS|RIDGES|SHOALS|SHOARS|SHORES|SKYWAY|SPRING|SPRNGS|SQUARE|STRAVN|STREAM|STREME|STREET|SUMITT|SUMMIT|TRACES|TRACKS|TRAILS|TUNNEL|TURNPK|UNIONS|VALLEY|VIADCT|VILLAG|ALLEE|ALLEY|ANNEX|AVENU|AVNUE|BAYOO|BAYOU|BEACH|BLUFF|BOTTM|BOULV|BRNCH|BRDGE|BROOK|BURGS|BYPAS|CANYN|CENTR|CNTER|CIRCL|CRCLE|CLIFF|COURT|COVES|CREEK|CRSNT|CREST|CURVE|DRIVE|FALLS|FERRY|FIELD|FLATS|FORDS|FORGE|FORKS|FRWAY|GARDN|GRDEN|GRDNS|GTWAY|GLENS|GREEN|GROVE|HARBR|HRBOR|HAVEN|HIWAY|HILLS|HOLWS|ISLND|ISLES|JCTNS|KNOLL|LAKES|LNDNG|LIGHT|LOCKS|LODGE|LOOPS|MANOR|MILLS|MISSN|MOUNT|MNTNS|PARKS|PKWAY|PKWYS|PATHS|PIKES|PINES|PLAIN|PLAZA|POINT|PORTS|RANCH|RNCHS|RAPID|RIDGE|RIVER|ROADS|ROUTE|SHOAL|SHOAR|SHORE|SPRNG|SPNGS|SPURS|STATN|STRAV|STRVN|SUMIT|TRACE|TRACK|TRAIL|TRLRS|TUNEL|TUNLS|TUNNL|TRNPK|UNION|VALLY|VIEWS|VILLG|VILLE|VISTA|WALKS|WELLS|ALLY|ANEX|ANNX|AVEN|BEND|BLUF|BLVD|BOUL|BURG|BYPA|BYPS|CAMP|CNYN|CAPE|CSWY|CENT|CNTR|CIRC|CRCL|CLFS|CLUB|CORS|CRSE|COVE|CRES|XING|DALE|DRIV|ESTS|EXPR|EXPW|EXPY|EXTN|EXTS|FALL|FRRY|FLDS|FLAT|FLTS|FORD|FRST|FORG|FORK|FRKS|FORT|FRWY|GRDN|GDNS|GTWY|GLEN|GROV|HARB|HIWY|HWAY|HILL|HLLW|HOLW|INLT|ISLE|JCTN|JCTS|KEYS|KNOL|KNLS|LAKE|LAND|LNDG|LANE|LOAF|LOCK|LCKS|LDGE|LODG|LOOP|MALL|MNRS|MDWS|MEWS|MILL|MSSN|MNTN|MTIN|NECK|ORCH|OVAL|PARK|PKWY|PASS|PATH|PIKE|PINE|PNES|PLNS|PLZA|PORT|PRTS|RADL|RAMP|RNCH|RPDS|REST|RDGE|RDGS|RIVR|ROAD|SHLS|SHRS|SPNG|SPGS|SPUR|SQRE|SQRS|STRA|STRM|STRT|TERR|TRCE|TRAK|TRKS|TRLS|TRLR|TUNL|VLLY|VLYS|VDCT|VIEW|VILL|VLGS|VIST|VSTA|WALK|WALL|WAYS|WELL|ALY|ANX|ARC|AVE|AVN|BCH|BND|BLF|BOT|BTM|BRG|BRK|BYP|CMP|CPE|CEN|CTR|CIR|CLF|CLB|COR|CTS|CRK|DAM|DIV|DVD|DRV|EST|EXP|EXT|FLS|FRY|FLD|FLT|FRD|FRG|FRK|FRT|FWY|GLN|GRN|GRV|HBR|HVN|HTS|HWY|HLS|ISS|JCT|KEY|KYS|KNL|LKS|LGT|LCK|LDG|MNR|MDW|MNT|MTN|NCK|OVL|PRK|PKY|PLN|PLZ|PTS|PRT|PRR|RAD|RPD|RST|RDG|RIV|RVR|RDS|ROW|RUE|RUN|SHL|SHR|SPG|SQR|SQU|STA|STN|STR|SMT|TER|TRK|TRL|VLY|VIA|VWS|VLG|VIS|VST|WAY|WLS|AV|BR|CP|CT|CV|DL|DM|DV|DR|FT|HT|HL|IS|KY|LK|LN|LF|MT|PL|PT|PR|RD|SQ|ST|UN|VW|VL|WY))( (N(orth)?|S(outh)?)? ?(E(ast)?|W(est)?)?)?)$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "1" + " East" * i * 10000 + "! _1_POA(i)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!")
| 340.789474
| 3,026
| 0.781467
| 1,133
| 6,475
| 4.459841
| 0.487202
| 0.003958
| 0.00475
| 0.007916
| 0.946368
| 0.946368
| 0.946368
| 0.946368
| 0.946368
| 0.946368
| 0
| 0.007073
| 0.017452
| 6,475
| 19
| 3,027
| 340.789474
| 0.787174
| 0.481081
| 0
| 0
| 0
| 0.090909
| 0.913834
| 0.868217
| 0.090909
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.090909
| 0.181818
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
92d1fe0cdc4ca52a22572cf572dbf27f65bf14ee
| 71,251
|
py
|
Python
|
resources/dot_PyCharm/system/python_stubs/cache/785ba7abdea3a54ceb589848c5c4548f205d175823f06e1b8323fe911316e613/pandas/_libs/tslibs/parsing.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | 1
|
2020-04-20T02:27:20.000Z
|
2020-04-20T02:27:20.000Z
|
resources/dot_PyCharm/system/python_stubs/cache/785ba7abdea3a54ceb589848c5c4548f205d175823f06e1b8323fe911316e613/pandas/_libs/tslibs/parsing.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
resources/dot_PyCharm/system/python_stubs/cache/785ba7abdea3a54ceb589848c5c4548f205d175823f06e1b8323fe911316e613/pandas/_libs/tslibs/parsing.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module pandas._libs.tslibs.parsing
# from C:\Python27\lib\site-packages\pandas\_libs\tslibs\parsing.pyd
# by generator 1.147
""" Parsing functions for datetime and datetime-like strings. """
# imports
import six as six # C:\Program Files (x86)\JetBrains\PyCharm 2019.3\plugins\python\helpers\six.py
import re as re # C:\Python27\lib\re.pyc
import numpy as np # C:\Python27\lib\site-packages\numpy\__init__.pyc
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
import sys as sys # <module 'sys' (built-in)>
import time as time # <module 'time' (built-in)>
from pandas._libs.tslibs.nattype import NaT
import datetime as __datetime
import dateutil.tz.tz as __dateutil_tz_tz
import dateutil.tz._common as __dateutil_tz__common
# Variables with simple values
_get_option = None
# functions
def du_parse(timestr, parserinfo=None, **kwargs): # reliably restored by inspect
pass
def get_option(*args, **kwargs): # real signature unknown
"""
Defer import of get_option to break an import cycle that caused
significant performance degradation in Period construction. See
GH#24118 for details
"""
pass
def parse_datetime_string(*args, **kwargs): # real signature unknown
"""
parse datetime string, only returns datetime.
Also cares special handling matching time patterns.
Returns
-------
datetime
"""
pass
def parse_time_string(*args, **kwargs): # real signature unknown
"""
Try hard to parse datetime string, leveraging dateutil plus some extra
goodies like quarter recognition.
Parameters
----------
arg : compat.string_types
freq : str or DateOffset, default None
Helps with interpreting time string if supplied
dayfirst : bool, default None
If None uses default from print_config
yearfirst : bool, default None
If None uses default from print_config
Returns
-------
datetime, datetime/dateutil.parser._result, str
"""
pass
def try_parse_dates(*args, **kwargs): # real signature unknown
pass
def try_parse_datetime_components(*args, **kwargs): # real signature unknown
pass
def try_parse_date_and_time(*args, **kwargs): # real signature unknown
pass
def try_parse_year_month_day(*args, **kwargs): # real signature unknown
pass
def _DATEUTIL_LEXER_SPLIT(*args, **kwargs): # real signature unknown
pass
def _does_string_look_like_datetime(*args, **kwargs): # real signature unknown
pass
def _format_is_iso(*args, **kwargs): # real signature unknown
"""
Does format match the iso8601 set that can be handled by the C parser?
Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different
but must be consistent. Leading 0s in dates and times are optional.
"""
pass
def _guess_datetime_format(*args, **kwargs): # real signature unknown
"""
Guess the datetime format of a given datetime string.
Parameters
----------
dt_str : string, datetime string to guess the format of
dayfirst : boolean, default False
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
dt_str_parse : function, defaults to `compat.parse_date` (dateutil)
This function should take in a datetime string and return
a `datetime.datetime` guess that the datetime string represents
dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
This function should take in a datetime string and return
a list of strings, the guess of the various specific parts
e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
Returns
-------
ret : datetime format string (for `strftime` or `strptime`)
"""
pass
def __pyx_unpickle_Enum(*args, **kwargs): # real signature unknown
pass
# classes
class binary_type(basestring):
"""
str(object='') -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.
"""
def capitalize(self): # real signature unknown; restored from __doc__
"""
S.capitalize() -> string
Return a copy of the string S with only its first character
capitalized.
"""
return ""
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.center(width[, fillchar]) -> string
Return S centered in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
return ""
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.count(sub[, start[, end]]) -> int
Return the number of non-overlapping occurrences of substring sub in
string S[start:end]. Optional arguments start and end are interpreted
as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.decode([encoding[,errors]]) -> object
Decodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return object()
def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.encode([encoding[,errors]]) -> object
Encodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that is able to handle UnicodeEncodeErrors.
"""
return object()
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.endswith(suffix[, start[, end]]) -> bool
Return True if S ends with the specified suffix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
S.expandtabs([tabsize]) -> string
Return a copy of S where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
return ""
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.find(sub [,start [,end]]) -> int
Return the lowest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def format(self, *args, **kwargs): # real signature unknown; restored from __doc__
"""
S.format(*args, **kwargs) -> string
Return a formatted version of S, using substitutions from args and kwargs.
The substitutions are identified by braces ('{' and '}').
"""
return ""
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.index(sub [,start [,end]]) -> int
Like S.find() but raise ValueError when the substring is not found.
"""
return 0
def isalnum(self): # real signature unknown; restored from __doc__
"""
S.isalnum() -> bool
Return True if all characters in S are alphanumeric
and there is at least one character in S, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
S.isalpha() -> bool
Return True if all characters in S are alphabetic
and there is at least one character in S, False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
S.isdigit() -> bool
Return True if all characters in S are digits
and there is at least one character in S, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
S.islower() -> bool
Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
S.isspace() -> bool
Return True if all characters in S are whitespace
and there is at least one character in S, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
S.istitle() -> bool
Return True if S is a titlecased string and there is at least one
character in S, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
S.isupper() -> bool
Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise.
"""
return False
def join(self, iterable): # real signature unknown; restored from __doc__
"""
S.join(iterable) -> string
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
"""
return ""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.ljust(width[, fillchar]) -> string
Return S left-justified in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
return ""
def lower(self): # real signature unknown; restored from __doc__
"""
S.lower() -> string
Return a copy of the string S converted to lowercase.
"""
return ""
def lstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.lstrip([chars]) -> string or unicode
Return a copy of the string S with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def partition(self, sep): # real signature unknown; restored from __doc__
"""
S.partition(sep) -> (head, sep, tail)
Search for the separator sep in S, and return the part before it,
the separator itself, and the part after it. If the separator is not
found, return S and two empty strings.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
S.replace(old, new[, count]) -> string
Return a copy of string S with all occurrences of substring
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return ""
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rfind(sub [,start [,end]]) -> int
Return the highest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rindex(sub [,start [,end]]) -> int
Like S.rfind() but raise ValueError when the substring is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.rjust(width[, fillchar]) -> string
Return S right-justified in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
return ""
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
S.rpartition(sep) -> (head, sep, tail)
Search for the separator sep in S, starting at the end of S, and return
the part before it, the separator itself, and the part after it. If the
separator is not found, return two empty strings and S.
"""
pass
def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.rsplit([sep [,maxsplit]]) -> list of strings
Return a list of the words in the string S, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return []
def rstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.rstrip([chars]) -> string or unicode
Return a copy of the string S with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.split([sep [,maxsplit]]) -> list of strings
Return a list of the words in the string S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are removed
from the result.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.startswith(prefix[, start[, end]]) -> bool
Return True if S starts with the specified prefix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.strip([chars]) -> string or unicode
Return a copy of the string S with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def swapcase(self): # real signature unknown; restored from __doc__
"""
S.swapcase() -> string
Return a copy of the string S with uppercase characters
converted to lowercase and vice versa.
"""
return ""
def title(self): # real signature unknown; restored from __doc__
"""
S.title() -> string
Return a titlecased version of S, i.e. words start with uppercase
characters, all remaining cased characters have lowercase.
"""
return ""
def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__
"""
S.translate(table [,deletechars]) -> string
Return a copy of the string S, where all characters occurring
in the optional argument deletechars are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256 or None.
If the table argument is None, no translation is applied and
the operation simply removes the characters in deletechars.
"""
return ""
def upper(self): # real signature unknown; restored from __doc__
"""
S.upper() -> string
Return a copy of the string S converted to uppercase.
"""
return ""
def zfill(self, width): # real signature unknown; restored from __doc__
"""
S.zfill(width) -> string
Pad a numeric string S with zeros on the left, to fill a field
of the specified width. The string S is never truncated.
"""
return ""
def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown
pass
def _formatter_parser(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
S.__format__(format_spec) -> string
Return a formatted version of S as described by format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class DateParseError(ValueError):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__qualname__ = 'DateParseError'
class relativedelta(object):
"""
The relativedelta type is designed to be applied to an existing datetime and
can replace specific components of that datetime, or represents an interval
of time.
It is based on the specification of the excellent work done by M.-A. Lemburg
in his
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an arithmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding arithmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc) available in the
relativedelta module. These instances may receive a parameter N,
specifying the Nth weekday, which could be positive or negative
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying
+1. You can also use an integer, where 0=MO. This argument is always
relative e.g. if the calculated date is already Monday, using MO(1)
or MO(-1) won't change the day. To effectively make it absolute, use
it in combination with the day argument (e.g. day=1, MO(1) for first
Monday of the month).
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
There are relative and absolute forms of the keyword
arguments. The plural is relative, and the singular is
absolute. For each argument in the order below, the absolute form
is applied first (by setting each attribute to that value) and
then the relative form (by adding the value to the attribute).
The order of attributes considered when this relativedelta is
added to a datetime is:
1. Year
2. Month
3. Day
4. Hours
5. Minutes
6. Seconds
7. Microseconds
Finally, weekday is applied, using the rule described above.
For example
>>> from datetime import datetime
>>> from dateutil.relativedelta import relativedelta, MO
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
>>> dt + delta
datetime.datetime(2018, 4, 2, 14, 37)
First, the day is set to 1 (the first of the month), then 25 hours
are added, to get to the 2nd day and 14th hour, finally the
weekday is applied, but since the 2nd is already a Monday there is
no effect.
"""
def normalized(self): # real signature unknown; restored from __doc__
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
pass
def _fix(self, *args, **kwargs): # real signature unknown
pass
def _set_months(self, *args, **kwargs): # real signature unknown
pass
def __abs__(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, *args, **kwargs): # real signature unknown
pass
def __bool__(self, *args, **kwargs): # real signature unknown
pass
def __div__(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, *args, **kwargs): # real signature unknown
pass
def __hash__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __mul__(self, *args, **kwargs): # real signature unknown
pass
def __neg__(self, *args, **kwargs): # real signature unknown
pass
def __ne__(self, *args, **kwargs): # real signature unknown
pass
def __nonzero__(self, *args, **kwargs): # real signature unknown
pass
def __radd__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
pass
def __rmul__(self, *args, **kwargs): # real signature unknown
pass
def __rsub__(self, *args, **kwargs): # real signature unknown
pass
def __sub__(self, *args, **kwargs): # real signature unknown
pass
def __truediv__(self, *args, **kwargs): # real signature unknown
pass
weeks = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is 'dict_proxy({\'__ne__\': <function __ne__ at 0x0000000005FF86D8>, \'__module__\': \'dateutil.relativedelta\', \'__dict__\': <attribute \'__dict__\' of \'relativedelta\' objects>, \'__radd__\': <function __radd__ at 0x000000000615BCF8>, \'__bool__\': <function __bool__ at 0x0000000005FF8358>, \'__truediv__\': <function __div__ at 0x0000000005FF8748>, \'__rsub__\': <function __rsub__ at 0x0000000005FF8048>, \'normalized\': <function normalized at 0x000000000615BDD8>, \'__add__\': <function __add__ at 0x000000000615BF98>, \'__rmul__\': <function __mul__ at 0x0000000005FF83C8>, \'__eq__\': <function __eq__ at 0x0000000005FF8438>, \'__init__\': <function __init__ at 0x0000000005FEE5F8>, \'__nonzero__\': <function __bool__ at 0x0000000005FF8358>, \'__weakref__\': <attribute \'__weakref__\' of \'relativedelta\' objects>, \'_set_months\': <function _set_months at 0x0000000005FEEF98>, \'__abs__\': <function __abs__ at 0x0000000005FF8278>, \'__div__\': <function __div__ at 0x0000000005FF8748>, \'__mul__\': <function __mul__ at 0x0000000005FF83C8>, \'_fix\': <function _fix at 0x0000000005FEEC88>, \'__repr__\': <function __repr__ at 0x0000000005FF8B38>, \'__hash__\': <function __hash__ at 0x0000000005FF8668>, \'__sub__\': <function __sub__ at 0x0000000005FF80B8>, \'weeks\': <property object at 0x00000000060082C8>, \'__doc__\': "\\n The relativedelta type is designed to be applied to an existing datetime and\\n can replace specific components of that datetime, or represents an interval\\n of time.\\n\\n It is based on the specification of the excellent work done by M.-A. Lemburg\\n in his\\n `mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.\\n However, notice that this type does *NOT* implement the same algorithm as\\n his work. Do *NOT* expect it to behave like mx.DateTime\'s counterpart.\\n\\n There are two different ways to build a relativedelta instance. The\\n first one is passing it two date/datetime classes::\\n\\n relativedelta(datetime1, datetime2)\\n\\n The second one is passing it any number of the following keyword arguments::\\n\\n relativedelta(arg1=x,arg2=y,arg3=z...)\\n\\n year, month, day, hour, minute, second, microsecond:\\n Absolute information (argument is singular); adding or subtracting a\\n relativedelta with absolute information does not perform an arithmetic\\n operation, but rather REPLACES the corresponding value in the\\n original datetime with the value(s) in relativedelta.\\n\\n years, months, weeks, days, hours, minutes, seconds, microseconds:\\n Relative information, may be negative (argument is plural); adding\\n or subtracting a relativedelta with relative information performs\\n the corresponding arithmetic operation on the original datetime value\\n with the information in the relativedelta.\\n\\n weekday: \\n One of the weekday instances (MO, TU, etc) available in the\\n relativedelta module. These instances may receive a parameter N,\\n specifying the Nth weekday, which could be positive or negative\\n (like MO(+1) or MO(-2)). Not specifying it is the same as specifying\\n +1. You can also use an integer, where 0=MO. This argument is always\\n relative e.g. if the calculated date is already Monday, using MO(1)\\n or MO(-1) won\'t change the day. To effectively make it absolute, use\\n it in combination with the day argument (e.g. day=1, MO(1) for first\\n Monday of the month).\\n\\n leapdays:\\n Will add given days to the date found, if year is a leap\\n year, and the date found is post 28 of february.\\n\\n yearday, nlyearday:\\n Set the yearday or the non-leap year day (jump leap days).\\n These are converted to day/month/leapdays information.\\n\\n There are relative and absolute forms of the keyword\\n arguments. The plural is relative, and the singular is\\n absolute. For each argument in the order below, the absolute form\\n is applied first (by setting each attribute to that value) and\\n then the relative form (by adding the value to the attribute).\\n\\n The order of attributes considered when this relativedelta is\\n added to a datetime is:\\n\\n 1. Year\\n 2. Month\\n 3. Day\\n 4. Hours\\n 5. Minutes\\n 6. Seconds\\n 7. Microseconds\\n\\n Finally, weekday is applied, using the rule described above.\\n\\n For example\\n\\n >>> from datetime import datetime\\n >>> from dateutil.relativedelta import relativedelta, MO\\n >>> dt = datetime(2018, 4, 9, 13, 37, 0)\\n >>> delta = relativedelta(hours=25, day=1, weekday=MO(1))\\n >>> dt + delta\\n datetime.datetime(2018, 4, 2, 14, 37)\\n\\n First, the day is set to 1 (the first of the month), then 25 hours\\n are added, to get to the 2nd day and 14th hour, finally the\\n weekday is applied, but since the 2nd is already a Monday there is\\n no effect.\\n\\n ", \'__neg__\': <function __neg__ at 0x0000000005FF82E8>})'
class StringIO:
"""
class StringIO([buffer])
When a StringIO object is created, it can be initialized to an existing
string by passing the string to the constructor. If no string is given,
the StringIO will start empty.
The StringIO object can accept either Unicode or 8-bit strings, but
mixing the two may take some care. If both are used, 8-bit strings that
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
a UnicodeError to be raised when getvalue() is called.
"""
def close(self, *args, **kwargs): # real signature unknown
""" Free the memory buffer. """
pass
def flush(self, *args, **kwargs): # real signature unknown
""" Flush the internal buffer """
pass
def getvalue(self): # real signature unknown; restored from __doc__
"""
Retrieve the entire contents of the "file" at any time before
the StringIO object's close() method is called.
The StringIO object can accept either Unicode or 8-bit strings,
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cause a UnicodeError to be raised when getvalue()
is called.
"""
pass
def isatty(self, *args, **kwargs): # real signature unknown
"""
Returns False because StringIO objects are not connected to a
tty-like device.
"""
pass
def next(self): # real signature unknown; restored from __doc__
"""
A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
in a for loop (for example, for line in f: print line), the next()
method is called repeatedly. This method returns the next input line,
or raises StopIteration when EOF is hit.
"""
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read at most size bytes from the file
(less if the read hits EOF before obtaining size bytes).
If the size argument is negative or omitted, read all data until EOF
is reached. The bytes are returned as a string object. An empty
string is returned when EOF is encountered immediately.
"""
pass
def readline(self, *args, **kwargs): # real signature unknown
"""
Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent
when a file ends with an incomplete line). If the size argument is
present and non-negative, it is a maximum byte count (including the
trailing newline) and an incomplete line may be returned.
An empty string is returned only when EOF is encountered immediately.
Note: Unlike stdio's fgets(), the returned string contains null
characters ('\0') if they occurred in the input.
"""
pass
def readlines(self, *args, **kwargs): # real signature unknown
"""
Read until EOF using readline() and return a list containing the
lines thus read.
If the optional sizehint argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (or more
to accommodate a final whole line).
"""
pass
def seek(self, *args, **kwargs): # real signature unknown
"""
Set the file's current position.
The mode argument is optional and defaults to 0 (absolute file
positioning); other values are 1 (seek relative to the current
position) and 2 (seek relative to the file's end).
There is no return value.
"""
pass
def tell(self, *args, **kwargs): # real signature unknown
""" Return the file's current position. """
pass
def truncate(self, *args, **kwargs): # real signature unknown
"""
Truncate the file's size.
If the optional size argument is present, the file is truncated to
(at most) that size. The size defaults to the current position.
The current file position is not changed unless the position
is beyond the new file size.
If the specified size exceeds the file's current size, the
file remains unchanged.
"""
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write a string to the file.
There is no return value.
"""
pass
def writelines(self): # real signature unknown; restored from __doc__
"""
Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. There
is no return value.
(The name is intended to match readlines(); writelines() does not add
line separators.)
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __iter__(self, *args, **kwargs): # real signature unknown
pass
class text_type(basestring):
"""
unicode(object='') -> unicode object
unicode(string[, encoding[, errors]]) -> unicode object
Create a new Unicode object from the given encoded string.
encoding defaults to the current default string encoding.
errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.
"""
def capitalize(self): # real signature unknown; restored from __doc__
"""
S.capitalize() -> unicode
Return a capitalized version of S, i.e. make the first character
have upper case and the rest lower case.
"""
return u""
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.center(width[, fillchar]) -> unicode
Return S centered in a Unicode string of length width. Padding is
done using the specified fill character (default is a space)
"""
return u""
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.count(sub[, start[, end]]) -> int
Return the number of non-overlapping occurrences of substring sub in
Unicode string S[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.decode([encoding[,errors]]) -> string or unicode
Decodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return ""
def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.encode([encoding[,errors]]) -> string or unicode
Encodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
"""
return ""
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.endswith(suffix[, start[, end]]) -> bool
Return True if S ends with the specified suffix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
S.expandtabs([tabsize]) -> unicode
Return a copy of S where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
return u""
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.find(sub [,start [,end]]) -> int
Return the lowest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def format(self, *args, **kwargs): # real signature unknown; restored from __doc__
"""
S.format(*args, **kwargs) -> unicode
Return a formatted version of S, using substitutions from args and kwargs.
The substitutions are identified by braces ('{' and '}').
"""
return u""
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.index(sub [,start [,end]]) -> int
Like S.find() but raise ValueError when the substring is not found.
"""
return 0
def isalnum(self): # real signature unknown; restored from __doc__
"""
S.isalnum() -> bool
Return True if all characters in S are alphanumeric
and there is at least one character in S, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
S.isalpha() -> bool
Return True if all characters in S are alphabetic
and there is at least one character in S, False otherwise.
"""
return False
def isdecimal(self): # real signature unknown; restored from __doc__
"""
S.isdecimal() -> bool
Return True if there are only decimal characters in S,
False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
S.isdigit() -> bool
Return True if all characters in S are digits
and there is at least one character in S, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
S.islower() -> bool
Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise.
"""
return False
def isnumeric(self): # real signature unknown; restored from __doc__
"""
S.isnumeric() -> bool
Return True if there are only numeric characters in S,
False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
S.isspace() -> bool
Return True if all characters in S are whitespace
and there is at least one character in S, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
S.istitle() -> bool
Return True if S is a titlecased string and there is at least one
character in S, i.e. upper- and titlecase characters may only
follow uncased characters and lowercase characters only cased ones.
Return False otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
S.isupper() -> bool
Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise.
"""
return False
def join(self, iterable): # real signature unknown; restored from __doc__
"""
S.join(iterable) -> unicode
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
"""
return u""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.ljust(width[, fillchar]) -> int
Return S left-justified in a Unicode string of length width. Padding is
done using the specified fill character (default is a space).
"""
return 0
def lower(self): # real signature unknown; restored from __doc__
"""
S.lower() -> unicode
Return a copy of the string S converted to lowercase.
"""
return u""
def lstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.lstrip([chars]) -> unicode
Return a copy of the string S with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def partition(self, sep): # real signature unknown; restored from __doc__
"""
S.partition(sep) -> (head, sep, tail)
Search for the separator sep in S, and return the part before it,
the separator itself, and the part after it. If the separator is not
found, return S and two empty strings.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
S.replace(old, new[, count]) -> unicode
Return a copy of S with all occurrences of substring
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return u""
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rfind(sub [,start [,end]]) -> int
Return the highest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rindex(sub [,start [,end]]) -> int
Like S.rfind() but raise ValueError when the substring is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.rjust(width[, fillchar]) -> unicode
Return S right-justified in a Unicode string of length width. Padding is
done using the specified fill character (default is a space).
"""
return u""
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
S.rpartition(sep) -> (head, sep, tail)
Search for the separator sep in S, starting at the end of S, and return
the part before it, the separator itself, and the part after it. If the
separator is not found, return two empty strings and S.
"""
pass
def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.rsplit([sep [,maxsplit]]) -> list of strings
Return a list of the words in S, using sep as the
delimiter string, starting at the end of the string and
working to the front. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified, any whitespace string
is a separator.
"""
return []
def rstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.rstrip([chars]) -> unicode
Return a copy of the string S with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.split([sep [,maxsplit]]) -> list of strings
Return a list of the words in S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are
removed from the result.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.startswith(prefix[, start[, end]]) -> bool
Return True if S starts with the specified prefix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.strip([chars]) -> unicode
Return a copy of the string S with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def swapcase(self): # real signature unknown; restored from __doc__
"""
S.swapcase() -> unicode
Return a copy of S with uppercase characters converted to lowercase
and vice versa.
"""
return u""
def title(self): # real signature unknown; restored from __doc__
"""
S.title() -> unicode
Return a titlecased version of S, i.e. words start with title case
characters, all remaining cased characters have lower case.
"""
return u""
def translate(self, table): # real signature unknown; restored from __doc__
"""
S.translate(table) -> unicode
Return a copy of the string S, where all characters have been mapped
through the given translation table, which must be a mapping of
Unicode ordinals to Unicode ordinals, Unicode strings or None.
Unmapped characters are left untouched. Characters mapped to None
are deleted.
"""
return u""
def upper(self): # real signature unknown; restored from __doc__
"""
S.upper() -> unicode
Return a copy of S converted to uppercase.
"""
return u""
def zfill(self, width): # real signature unknown; restored from __doc__
"""
S.zfill(width) -> unicode
Pad a numeric string S with zeros on the left, to fill a field
of the specified width. The string S is never truncated.
"""
return u""
def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown
pass
def _formatter_parser(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
S.__format__(format_spec) -> unicode
Return a formatted version of S as described by format_spec.
"""
return u""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class tzoffset(__datetime.tzinfo):
"""
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object).
"""
def dst(self, *args, **kwargs): # real signature unknown
pass
def fromutc(self, *args, **kwargs): # real signature unknown
pass
def is_ambiguous(self, *args, **kwargs): # real signature unknown
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
pass
def tzname(self, *args, **kwargs): # real signature unknown
pass
def utcoffset(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __ne__(self, *args, **kwargs): # real signature unknown
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_cache_lock = None # (!) real value is '<thread.lock object at 0x0000000006115C10>'
_TzOffsetFactory__instances = None # (!) real value is '<WeakValueDictionary at 102166024>'
_TzOffsetFactory__strong_cache = OrderedDict()
_TzOffsetFactory__strong_cache_size = 8
__dict__ = None # (!) real value is "dict_proxy({'__ne__': <function __ne__ at 0x0000000006165898>, '__module__': 'dateutil.tz.tz', '_TzOffsetFactory__strong_cache': OrderedDict(), 'fromutc': <function fromutc at 0x0000000006165748>, '__dict__': <attribute '__dict__' of 'tzoffset' objects>, '__weakref__': <attribute '__weakref__' of 'tzoffset' objects>, 'dst': <function dst at 0x0000000006165588>, '__reduce__': <method '__reduce__' of 'object' objects>, '_TzOffsetFactory__strong_cache_size': 8, '_cache_lock': <thread.lock object at 0x0000000006115C10>, 'is_ambiguous': <function is_ambiguous at 0x00000000061657B8>, 'utcoffset': <function utcoffset at 0x0000000006165518>, 'tzname': <function tzname at 0x0000000006165668>, '_TzOffsetFactory__instances': <WeakValueDictionary at 102166024>, '__hash__': None, '__eq__': <function __eq__ at 0x0000000006165828>, '__doc__': '\\n A simple class for representing a fixed offset from UTC.\\n\\n :param name:\\n The timezone name, to be returned when ``tzname()`` is called.\\n :param offset:\\n The time zone offset in seconds, or (since version 2.6.0, represented\\n as a :py:class:`datetime.timedelta` object).\\n ', '__init__': <function __init__ at 0x00000000061654A8>, '__repr__': <function __repr__ at 0x0000000006165908>})"
__hash__ = None
class _dateutil_tzlocal(__dateutil_tz__common._tzinfo):
""" A :class:`tzinfo` subclass built around the ``time`` timezone functions. """
def dst(self, *args, **kwargs): # real signature unknown
pass
def is_ambiguous(self, *args, **kwargs): # real signature unknown
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
pass
def tzname(self, *args, **kwargs): # real signature unknown
pass
def utcoffset(self, *args, **kwargs): # real signature unknown
pass
def _isdst(self, *args, **kwargs): # real signature unknown
pass
def _naive_is_dst(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __ne__(self, *args, **kwargs): # real signature unknown
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
pass
__hash__ = None
class _dateutil_tzstr(__dateutil_tz_tz.tzrange):
"""
``tzstr`` objects are time zone objects specified by a time-zone string as
it would be passed to a ``TZ`` variable on POSIX-style systems (see
the `GNU C Library: TZ Variable`_ for more details).
There is one notable exception, which is that POSIX-style time zones use an
inverted offset format, so normally ``GMT+3`` would be parsed as an offset
3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
behavior, pass a ``True`` value to ``posix_offset``.
The :class:`tzrange` object provides the same functionality, but is
specified using :class:`relativedelta.relativedelta` objects. rather than
strings.
:param s:
A time zone string in ``TZ`` variable format. This can be a
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x:
:class:`unicode`) or a stream emitting unicode characters
(e.g. :class:`StringIO`).
:param posix_offset:
Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
POSIX standard.
.. caution::
Prior to version 2.7.0, this function also supported time zones
in the format:
* ``EST5EDT,4,0,6,7200,10,0,26,7200,3600``
* ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600``
This format is non-standard and has been deprecated; this function
will raise a :class:`DeprecatedTZFormatWarning` until
support is removed in a future version.
.. _`GNU C Library: TZ Variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
"""
def _delta(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
pass
_TzStrFactory__cache_lock = None # (!) real value is '<thread.lock object at 0x0000000006115BB0>'
_TzStrFactory__instances = None # (!) real value is '<WeakValueDictionary at 102179848>'
_TzStrFactory__strong_cache = OrderedDict()
_TzStrFactory__strong_cache_size = 8
class _dateutil_tzutc(__datetime.tzinfo):
"""
This is a tzinfo object that represents the UTC time zone.
**Examples:**
.. doctest::
>>> from datetime import *
>>> from dateutil.tz import *
>>> datetime.now()
datetime.datetime(2003, 9, 27, 9, 40, 1, 521290)
>>> datetime.now(tzutc())
datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc())
>>> datetime.now(tzutc()).tzname()
'UTC'
.. versionchanged:: 2.7.0
``tzutc()`` is now a singleton, so the result of ``tzutc()`` will
always return the same object.
.. doctest::
>>> from dateutil.tz import tzutc, UTC
>>> tzutc() is tzutc()
True
>>> tzutc() is UTC
True
"""
def dst(self, *args, **kwargs): # real signature unknown
pass
def fromutc(self): # real signature unknown; restored from __doc__
"""
Fast track version of fromutc() returns the original ``dt`` object for
any valid :py:class:`datetime.datetime` object.
"""
pass
def is_ambiguous(self, *args, **kwargs): # real signature unknown
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
pass
def tzname(self, *args, **kwargs): # real signature unknown
pass
def utcoffset(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __ne__(self, *args, **kwargs): # real signature unknown
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_TzSingleton__instance = tzutc()
__dict__ = None # (!) real value is 'dict_proxy({\'__ne__\': <function __ne__ at 0x00000000061653C8>, \'__module__\': \'dateutil.tz.tz\', \'_TzSingleton__instance\': tzutc(), \'fromutc\': <function fromutc at 0x00000000061652E8>, \'__dict__\': <attribute \'__dict__\' of \'tzutc\' objects>, \'__weakref__\': <attribute \'__weakref__\' of \'tzutc\' objects>, \'dst\': <function dst at 0x00000000061650B8>, \'__reduce__\': <method \'__reduce__\' of \'object\' objects>, \'is_ambiguous\': <function is_ambiguous at 0x0000000006165208>, \'utcoffset\': <function utcoffset at 0x0000000006165048>, \'tzname\': <function tzname at 0x0000000006165198>, \'__hash__\': None, \'__eq__\': <function __eq__ at 0x0000000006165358>, \'__doc__\': "\\n This is a tzinfo object that represents the UTC time zone.\\n\\n **Examples:**\\n\\n .. doctest::\\n\\n >>> from datetime import *\\n >>> from dateutil.tz import *\\n\\n >>> datetime.now()\\n datetime.datetime(2003, 9, 27, 9, 40, 1, 521290)\\n\\n >>> datetime.now(tzutc())\\n datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc())\\n\\n >>> datetime.now(tzutc()).tzname()\\n \'UTC\'\\n\\n .. versionchanged:: 2.7.0\\n ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will\\n always return the same object.\\n\\n .. doctest::\\n\\n >>> from dateutil.tz import tzutc, UTC\\n >>> tzutc() is tzutc()\\n True\\n >>> tzutc() is UTC\\n True\\n ", \'__repr__\': <function __repr__ at 0x0000000006165438>})'
__hash__ = None
class _timelex(object):
# no doc
def get_tokens(self, *args, **kwargs): # real signature unknown
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
pass
@classmethod
def split(cls, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "dict_proxy({'__module__': 'pandas._libs.tslibs.parsing', '__qualname__': '_timelex', 'split': <classmethod object at 0x0000000003A32CA8>, 'get_tokens': <cyfunction _timelex.get_tokens at 0x0000000003A37108>, '__dict__': <attribute '__dict__' of '_timelex' objects>, '__weakref__': <attribute '__weakref__' of '_timelex' objects>, '__doc__': None, '__init__': <cyfunction _timelex.__init__ at 0x0000000003A37048>})"
__qualname__ = '_timelex'
# variables with complex values
DEFAULTPARSER = None # (!) real value is '<dateutil.parser._parser.parser object at 0x0000000006172E08>'
MONTH_NUMBERS = {
'APR': 3,
'AUG': 7,
'DEC': 11,
'FEB': 1,
'JAN': 0,
'JUL': 6,
'JUN': 5,
'MAR': 2,
'MAY': 4,
'NOV': 10,
'OCT': 9,
'SEP': 8,
}
nat_strings = None # (!) real value is "set(['nat', 'NaT', 'NAN', 'nan', 'NaN', 'NAT'])"
_DEFAULT_DATETIME = None # (!) real value is 'datetime.datetime(1, 1, 1, 0, 0)'
__test__ = {}
| 40.186689
| 5,250
| 0.613142
| 8,892
| 71,251
| 4.727958
| 0.104701
| 0.066483
| 0.102281
| 0.084584
| 0.751623
| 0.724935
| 0.702195
| 0.678171
| 0.655027
| 0.61735
| 0
| 0.021904
| 0.29455
| 71,251
| 1,772
| 5,251
| 40.209368
| 0.814499
| 0.685338
| 0
| 0.787149
| 0
| 0
| 0.003856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.433735
| false
| 0.281125
| 0.02008
| 0
| 0.674699
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
130502a152f90be77a8c9800cf95de6a1be4fcf5
| 58
|
py
|
Python
|
notifications/views.py
|
kbilak/Talker
|
ff1ed19d080e913da6852f4955602c920ac7411c
|
[
"MIT"
] | null | null | null |
notifications/views.py
|
kbilak/Talker
|
ff1ed19d080e913da6852f4955602c920ac7411c
|
[
"MIT"
] | null | null | null |
notifications/views.py
|
kbilak/Talker
|
ff1ed19d080e913da6852f4955602c920ac7411c
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def index():
pass
| 14.5
| 35
| 0.741379
| 8
| 58
| 5.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189655
| 58
| 4
| 36
| 14.5
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
1332153cb8531c616a954b2d851ad75f33e4cd58
| 63
|
py
|
Python
|
courses/examples/Beginners_python/loops.py
|
Chris35Wills/Chris35Wills.github.io
|
eb3990caae6c8bde16a609a60f8a7860859f2095
|
[
"MIT"
] | 1
|
2021-09-15T17:19:03.000Z
|
2021-09-15T17:19:03.000Z
|
courses/examples/Beginners_python/loops.py
|
Chris35Wills/Chris35Wills.github.io
|
eb3990caae6c8bde16a609a60f8a7860859f2095
|
[
"MIT"
] | null | null | null |
courses/examples/Beginners_python/loops.py
|
Chris35Wills/Chris35Wills.github.io
|
eb3990caae6c8bde16a609a60f8a7860859f2095
|
[
"MIT"
] | 2
|
2020-05-06T21:04:26.000Z
|
2021-09-15T17:19:05.000Z
|
# prints: 1,2,3,4,5,6,7,8,9
for i in range(1,10, 2):
print i
| 12.6
| 27
| 0.571429
| 19
| 63
| 1.894737
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.254902
| 0.190476
| 63
| 4
| 28
| 15.75
| 0.45098
| 0.396825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
13f76b553ab99ee559c82401d04bed3ae9c6ee2d
| 76,961
|
py
|
Python
|
pair_fast_forecast_distributed/pairwise_fusion_kd/compare/model_compare.py
|
Chezacar/CollaborationWithLatency
|
da06abea16f1ffcafc35d27cb69ae3116a345965
|
[
"MIT"
] | null | null | null |
pair_fast_forecast_distributed/pairwise_fusion_kd/compare/model_compare.py
|
Chezacar/CollaborationWithLatency
|
da06abea16f1ffcafc35d27cb69ae3116a345965
|
[
"MIT"
] | null | null | null |
pair_fast_forecast_distributed/pairwise_fusion_kd/compare/model_compare.py
|
Chezacar/CollaborationWithLatency
|
da06abea16f1ffcafc35d27cb69ae3116a345965
|
[
"MIT"
] | null | null | null |
from os import cpu_count
from numpy.core.fromnumeric import shape
import torch.nn.functional as F
import torch.nn as nn
import torch
from data.config_com import Config
from utils.model import STPN, STPN_KD, MapExtractor, MotionNet, forecast_lstm, lidar_encoder, lidar_decoder, lidar_decoder_kd, conv2DBatchNormRelu, Sparsemax, adafusionlayer,sigmoidfusionlayer, pairfusionlayer,pairfusionlayer_1, pairfusionlayer_2, pairfusionlayer_3 ,pairfusionlayer_4
import numpy as np
import copy
import torchgeometry as tgm
from matplotlib import pyplot as plt
class ClassificationHead(nn.Module):
def __init__(self, config):
super(ClassificationHead, self).__init__()
category_num = config.category_num
channel = 32
if config.use_map:
channel += 6
if config.use_vis:
channel += 13
anchor_num_per_loc = len(config.anchor_size)
self.conv1 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(channel, category_num*anchor_num_per_loc, kernel_size=1, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(channel)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.conv2(x)
return x
class MotionStateHead(nn.Module):
def __init__(self, config):
super(MotionStateHead, self).__init__()
category_num = 3 #ignore: 0 static: 1 moving: 2
channel = 32
if config.use_map:
channel += 6
if config.use_vis:
channel += 13
anchor_num_per_loc = len(config.anchor_size)
self.conv1 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(channel, category_num*anchor_num_per_loc, kernel_size=1, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(channel)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.conv2(x)
return x
class FeatEncoder(nn.Module):
def __init__(self, height_feat_size=13):
super(FeatEncoder, self).__init__()
self.stpn = STPN(height_feat_size=height_feat_size)
def forward(self, bevs):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x = self.stpn(bevs)
return x
class RegressionHead(nn.Module):
def __init__(self,config):
super(RegressionHead,self).__init__()
category_num = config.category_num
channel = 32
if config.use_map:
channel += 6
if config.use_vis:
channel += 13
anchor_num_per_loc = len(config.anchor_size)
box_code_size = config.box_code_size
self.box_prediction = nn.Sequential(
nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(channel),
nn.ReLU(),
nn.Conv2d(channel, anchor_num_per_loc * box_code_size, kernel_size=1, stride=1, padding=0))
def forward(self,x):
box = self.box_prediction(x)
return x
class SingleRegressionHead(nn.Module):
def __init__(self,config):
super(SingleRegressionHead,self).__init__()
category_num = config.category_num
channel = 32
if config.use_map:
channel += 6
if config.use_vis:
channel += 13
anchor_num_per_loc = len(config.anchor_size)
box_code_size = config.box_code_size
if config.only_det:
out_seq_len = 1
else:
out_seq_len = config.pred_len
if config.binary:
if config.only_det:
self.box_prediction = nn.Sequential(
nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(channel),
nn.ReLU(),
nn.Conv2d(channel, anchor_num_per_loc * box_code_size * out_seq_len, kernel_size=1, stride=1, padding=0))
else:
self.box_prediction = nn.Sequential(
nn.Conv2d(channel, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, anchor_num_per_loc * box_code_size * out_seq_len, kernel_size=1, stride=1, padding=0))
def forward(self,x):
box = self.box_prediction(x)
return box
class FaFMGDA(nn.Module):
def __init__(self, config):
super(FaFMGDA, self).__init__()
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.motion_state = config.motion_state
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
#self.RegressionList = nn.ModuleList([RegressionHead for i in range(seq_len)])
self.regression = SingleRegressionHead(config)
if self.motion_state:
self.motion_cls = MotionStateHead(config)
def forward(self, x):
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
class FaFNet(nn.Module):
def __init__(self, config):
super(FaFNet, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
#self.RegressionList = nn.ModuleList([RegressionHead for i in range(seq_len)])
self.regression = SingleRegressionHead(config)
#self.fusion_method = config.fusion_method
# if self.use_map:
# if self.fusion_method == 'early_fusion':
# self.stpn = STPN(height_feat_size=config.map_dims[2]+config.map_channel)
# elif self.fusion_method == 'middle_fusion':
# self.stpn = STPN(height_feat_size=config.map_dims[2],use_map=True)
# elif self.fusion_method == 'late_fusion':
# self.map_encoder = MapExtractor(map_channel=config.map_channel)
# self.stpn = STPN(height_feat_size=config.map_dims[2])
# else:
self.stpn = STPN_KD(height_feat_size=config.map_dims[2])
if self.motion_state:
self.motion_cls = MotionStateHead(config)
def forward(self, bevs, maps=None,vis=None):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
# vis = vis.permute(0, 3, 1, 2)
x_8, x_7, x_6, x_5, x_3 = self.stpn(bevs)
return x_8, x_7, x_6, x_5, x_3
# x = x_8
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
# cls_preds = self.classification(x)
# cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
# cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
#
#
# # Detection head
# loc_preds =self.regression(x)
# loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
# loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
# #loc_pred (N * T * W * H * loc)
# result = {'loc': loc_preds,
# 'cls': cls_preds}
#
# #MotionState head
# if self.motion_state:
# motion_cat = 3
# motion_cls_preds = self.motion_cls(x)
# motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
# motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
# result['state'] = motion_cls_preds
# return result, x_8, x_7, x_6, x_5
'''''''''''''''''''''''''''''''''''''''''''''''''''
Online warp of layer 4, Yiming Li, April, 15, 2021
'''''''''''''''''''''''''''''''''''''''''''''''''''
class FaFMIMONet_512_16_16(nn.Module):
def __init__(self, config, n_classes=21, in_channels=13, feat_channel=512, feat_squeezer=-1, attention='additive',
has_query=True, sparse=False, agent_num=5, shuffle_flag=False, image_size=512,
shared_img_encoder='unified', key_size=1024, query_size=128):
super(FaFMIMONet_512_16_16, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.sparse = sparse
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, vis=None, training=True, MO_flag=True, inference='activated', batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
'''''''''''''''''''''''''''''''''''''''''''''''''''
Visualization debugging of online warp, need to firstly rotate, then translate using pytorch grid
padded_voxel_points_global = torch.squeeze(padded_voxel_points_global).cpu().numpy()
plt.clf()
plt.xlim(0, 768)
plt.ylim(0, 768)
plt.imshow(np.max(padded_voxel_points_global, axis=2), alpha=1.0, zorder=12)
plt.pause(0.1)
plt.show()
tmp0 = torch.squeeze(bevs[0]).cpu().numpy()
tmp1 = torch.squeeze(bevs[1]).cpu().numpy()
# tmp0 = np.flip(tmp0, axis=1)
# pass encoder
plt.clf()
plt.xlim(0, 256)
plt.ylim(0, 256)
plt.imshow(np.max(tmp0, axis=0), origin='lower', alpha=1.0, zorder=12)
plt.pause(0.1)
plt.show()
plt.imshow(np.max(tmp1, axis=0), origin='lower', alpha=1.0, zorder=12)
plt.pause(0.1)
plt.show()
device = bevs.device
size = (1, 13, 256, 256)
nb_warp = trans_matrices[0, 0, 1]
nb_agent = torch.unsqueeze(torch.squeeze(bevs[1]), 0)
x_trans = (4*nb_warp[0, 3])/128 #左+右-
y_trans = -(4*nb_warp[1, 3])/128
# z_trans = (4*nb_warp[2, 3])/128
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_img_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_img_trans = F.grid_sample(warp_img_rot, grid_trans, mode='bilinear')
warp_img = warp_img_trans
nb2tg_map_vis = torch.squeeze(warp_img).cpu().numpy()
# visualize the warped feature map
plt.imshow(np.max(nb2tg_map_vis, axis=0), origin='lower', alpha=1.0, zorder=12)
plt.pause(0.1)
plt.show()
plt.imshow(np.max(nb2tg_map_vis + tmp0, axis=0), origin='lower', alpha=1.0, zorder=12)
plt.pause(0.1)
plt.show()
'''''''''''''''''''''''''''''''''''''''''''''''''''
x,x_1,x_2,x_3,feat_maps = self.u_encoder(bevs)
device = bevs.device
size_16 = (1, 512, 16, 16)
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size_16)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size_16)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
tg_agent = tg_agent + warp_feat.type(dtype=torch.float32)
local_com_mat_update[b, i] = tg_agent
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
x = self.decoder(x,x_1,x_2,x_3,feat_fuse_mat,batch_size)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
# with knowledge distillation
class FaFMIMONet_512_16_16_KD(nn.Module):
def __init__(self, config, n_classes=21, in_channels=13, feat_channel=512, feat_squeezer=-1, attention='additive',
has_query=True, sparse=False, agent_num=5, shuffle_flag=False, image_size=512,
shared_img_encoder='unified', key_size=1024, query_size=128):
super(FaFMIMONet_512_16_16_KD, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.sparse = sparse
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
# Detection decoder
self.decoder = lidar_decoder_kd(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, vis=None, training=True, MO_flag=True, inference='activated', batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x,x_1,x_2,x_3,feat_maps = self.u_encoder(bevs)
device = bevs.device
size_16 = (1, 512, 16, 16)
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size_16)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size_16)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
tg_agent = tg_agent + warp_feat.type(dtype=torch.float32)
local_com_mat_update[b, i] = tg_agent
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
x_8, x_7, x_6, x_5 = self.decoder(x, x_1, x_2, x_3, feat_fuse_mat, batch_size)
x = x_8
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result, x_8, x_7, x_6, x_5
'''''''''''''''''''''''''''''''''''''''''''''''''''
Online warp of layer 3
'''''''''''''''''''''''''''''''''''''''''''''''''''
class FaFMIMONet_256_32_32(nn.Module):
def __init__(self, config, in_channels=13, shared_img_encoder='unified', forecast_num = 3):
super(FaFMIMONet_256_32_32, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
if config.forecast_loss == 'True' :
self.Forecast_loss = nn.SmoothL1Loss(reduction='sum')
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.u_encoder.eval()
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
if config.forecast == 'LSTM':
self.forecast = forecast_lstm()
self.forecast_flag = 'LSTM'
elif config.forecast == 'MotionNet':
self.forecast = MotionNet(forecast_num = forecast_num)
self.forecast_flag = 'MotionNet'
elif config.forecast == 'Baseline':
self.forecast_flag = 'Baseline'
self.adafusion = pairfusionlayer_3(input_channel=256)
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def feat_trans2center_now(self, device, feat, transmatrix, center_agent = 0, size = (1,256,32,32)):
nb_agent = torch.unsqueeze(feat, 0) # [1 512 16 16]
nb_warp = transmatrix # [4 4]
# size = (1,256,32,32)
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
# tg_agent.append(warp_feat.type(dtype=torch.float32))
return warp_feat.type(dtype=torch.float32)
def trans2center_now(self, device, bevs, trans2center_now, batch_size,num_agent_tensor, center_agent = 0):
bevs_update = torch.zeros(bevs.shape)
size = bevs[0,0].shape
for b in range(batch_size):
try:
center_agent_int = int(center_agent[b])
except:
center_agent_int = center_agent
num_agent = int(num_agent_tensor[b, center_agent_int])
if num_agent == 0:
break
i = int(center_agent_int)
# for i in range(num_agent):
# tg_agent = []
# tg_agent.append(local_com_mat[b, i])
# all_warp = trans_matrices[b, i, -1] # transformation [2 5 5 4 4]
for j in range(num_agent):
for k in range(len(trans2center_now[0])):
if (j != i or k == (len(trans2center_now[0]) - 1)) and torch.max(bevs[j*batch_size + b][k]) > 0:
# nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_agent = bevs[j*batch_size + b][k]
# nb_warp = all_warp[j] # [4 4]
nb_warp = trans2center_now[j][k][b]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
# tg_agent.append(warp_feat.type(dtype=torch.float32))
bevs_update[j*batch_size + b] = warp_feat.type(dtype=torch.float32)
if torch.max(bevs[j*batch_size + b][k]) == 0:
bevs_update[j*batch_size + b] = bevs[j*batch_size + b]
return bevs_update
def test_transfer(self, center_data ,data, trans_matrices, batch_size = 4, center_agent = 0, forecast_num = 1):
device = data.device
transfer_data = torch.zeros(data.shape)
for i in range(len(data)):
transfer_data[i][0] = self.feat_trans2center_now(device, data[i][0], trans_matrices[i % batch_size][center_agent[i % batch_size]][forecast_num - 1][int(i / batch_size)], center_agent = 0,size = (1,13,256,256))
a = torch.squeeze(transfer_data, 1)
imgs_1_13 = np.array(a)
imgs_1_13= imgs_1_13.transpose(0,2,3,1)
s_1, s_2, s_3, s_4 = imgs_1_13.shape
# imgs_1 = np.zeros((int(s_1/batch_size), s_2, s_3))
# for t in range(int(len(imgs_1_13) / batch_size)):
imgs_1 = np.zeros((batch_size, s_2, s_3))
for t in range(batch_size):
i = batch_size + t
# imgs_1[i] = np.array(imgs_1[i].cpu())
# imgs_1[i] = imgs_1[i].transpose((1,2,0))
imgs_1[t] = imgs_1_13[i].sum(axis = 2)
print(np.max(imgs_1[t]))
imgs_1[t] /= np.max(imgs_1[t])
imgs_1[t] *= 255
imgs_1[t] = imgs_1[t].astype(int)
np.save('./trans/test_transfer.npy', np.array(imgs_1))
b = torch.squeeze(center_data[:,-1,-1], 1).cpu()
imgs_2_13 = np.array(b)
imgs_2_13= imgs_2_13.transpose(0,2,3,1)
s_1, s_2, s_3, s_4 = imgs_2_13.shape
# imgs_2 = np.zeros((int(s_1/batch_size), s_2, s_3))
imgs_2 = np.zeros((batch_size, s_2, s_3))
# for t in range(int(len(imgs_2) / batch_size)):
for t in range(batch_size):
i = t
# imgs_2[i] = np.array(imgs_1[i].cpu())
# imgs_2[i] = imgs_2[i].transpose((1,2,0))
imgs_2[t] = imgs_2_13[i].sum(axis = 2)
print(np.max(imgs_2[t]))
imgs_2[t] /= np.max(imgs_2[t])
imgs_2[t] *= 255
imgs_2[t] = imgs_2[t].astype(int)
np.save('./trans/test_center.npy', np.array(imgs_2))
imgs_3 = np.array(imgs_1) + np.array(imgs_2)
for i in range(len(imgs_3)):
imgs_3[i] = np.array(imgs_3[i])
# imgs_3[i] = imgs_3[i].transpose((1,2,0))
# imgs_3[i] = imgs_3[i].sum(axis = 2)
print(np.max(imgs_3[i]))
# imgs_3[i] = imgs_3[i] / np.max(imgs_3[i])
# imgs_3[i] *= 255
imgs_3[i] = imgs_3[i].astype(int)
np.save('./trans/test_raw.npy', np.array(imgs_3))
def forward(self, bevs, trans_matrices, num_agent_tensor, to_new_trans_mat_list, supervise_data, vis=None, training=True, MO_flag=True, inference='activated', batch_size=2, center_agent = 0, delta_t = [0,10,10,10,10], rank = 0, forecast_num = 1, mode = 'train', config = Config):
self.u_encoder.eval()
device = bevs.device
# to_new_trans_mat_list[agent, forecast_num,batch,4x4]
bevs = bevs.permute(0, 1, 2, 5, 3, 4) # (Batch, seq, z, h, w)
if mode == 'train':
supervise_data['bev_seq'] = supervise_data['bev_seq'].permute(0,1,2,5,3,4)
supervise_data['bev_seq'] = self.agents2batch(supervise_data['bev_seq'])
x_s, x_s_1, x_s_2, x_s_3_temp, x_s_4 = self.u_encoder(supervise_data['bev_seq'])
x_s_3 = torch.zeros(x_s_3_temp.shape).to(device)
for i in range(len(x_s_3_temp)):
x_s_3[i] = self.feat_trans2center_now(device, x_s_3_temp[i], trans_matrices[i % batch_size][center_agent[i % batch_size]][forecast_num - 1][int(i / batch_size)], center_agent = 0)
for batch in range(batch_size):
for inbatch in range(num_agent_tensor[batch][0], len(num_agent_tensor[0])):
x_s_3[batch_size * inbatch + batch] = 0
# self.test_transfer(bevs, supervise_data['bev_seq'], trans_matrices, batch_size, center_agent, forecast_num)
# bev_test = self.trans2center_now(device, bevs, to_new_trans_mat_list, batch_size, num_agent_tensor, center_agent)
x,x_1,x_2,x_3,x_4 = self.u_encoder(bevs[:,-1])
# x_3 = torch.zeros(x_3.shape)
# x_s_3 = x_s_3_temp
# x_s_3_shape_1, x_s_3_shape_2, x_s_3_shape_3, x_s_3_shape_4 = x_s_3.shape
if self.forecast_flag == 'LSTM':
x_feature_list = []
for batch in range(batch_size):
for inbatch in range(len(delta_t[0])):
# x_temp, x_1_temp, x_2_temp, x_3_temp, x_4_temp = self.u_encoder(bevs[batch * len(delta_t[0]) + inbatch])
x_temp, x_1_temp, x_2_temp, x_3_temp_1, x_4_temp = self.u_encoder(bevs[batch_size * inbatch + batch])
x_3_temp = torch.zeros(x_3_temp_1.shape).to(device)
# if delta_t[batch][inbatch] > 0:
if inbatch != 0:
for i in range(x_3_temp.shape[0]):
x_3_temp[i] = self.feat_trans2center_now(device, x_3_temp_1[i], to_new_trans_mat_list[inbatch][i][batch], center_agent = 0)
if delta_t[batch][inbatch] > 0 and forecast_num > 1:
x_feature_list.append(self.forecast(x_3_temp, delta_t[batch][inbatch]))
else:
x_feature_list.append(torch.unsqueeze(x_3_temp[-1], 0))
else:
x_feature_list.append(torch.unsqueeze(x_3_temp_1[-1], 0))
x_3 = torch.cat(tuple(x_feature_list), 0) #(10,256,32,32)
feat_list = []
_,a,b,c = x_3.shape
for batch in range(batch_size):
temp_tensor = torch.zeros((num_agent_tensor[batch][0],a,b,c)).to(device)
for inbatch in range(num_agent_tensor[batch][0]):
temp_tensor[inbatch] = (x_3[batch * len(delta_t[0]) + inbatch])
feat_list.append(temp_tensor)
if self.forecast_flag == 'MotionNet':
x_feature_list = [] # 按照[b0a0,b0a1,...,b1a0,...bna5]排列
x_center_feat_list = []
for batch in range(batch_size):
for inbatch in range(len(delta_t[0])):
x_temp, x_1_temp, x_2_temp, x_3_temp_1, x_4_temp = self.u_encoder(bevs[batch_size * inbatch + batch])
x_3_temp = torch.zeros(x_3_temp_1.shape).to(device)
# forecast_agent_list = []
for i in range(x_3_temp.shape[0]):
x_3_temp[i] = self.feat_trans2center_now(device, x_3_temp_1[i], to_new_trans_mat_list[inbatch][i][batch], center_agent = 0)
# x_3_temp = x_3_temp_1
if inbatch == int(center_agent[batch]):
x_center_feat_list.append(x_3_temp.unsqueeze(0))
if inbatch != int(center_agent[batch]):
x_feature_list.append(x_3_temp.unsqueeze(0))
# forecast_agent_list.append(i)
x_feature_center = torch.cat(x_center_feat_list)
x_feature_toforecast = torch.cat(x_feature_list, 0)
if torch.sum(delta_t) > 1:
x_3_feature = self.forecast(x_feature_toforecast, delta_t)
else:
x_3_feature = x_feature_toforecast[:,-1]
# if delta_t[batch][inbatch] > 0 and forecast_num > 1:
# x_feature_list.append(self.forecast(x_feature_toforecast, delta_t, x_feature_list))
# x_feature_list.append()
# else:
# x_feature_list.append(torch.unsqueeze(x_3_temp[-1], 0))
# device = bevs.device
size = (1, 256, 32, 32)
padding_feat = torch.zeros((256,32,32)).cuda()
# feat_maps = x_3
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
# feat_map = {}
# feat_list = []
# for i in range(self.agent_num):
# feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
# if torch.max(feat_map[i]) > 0:
# feat_list.append(feat_map[i])
feat_list = []
_, a,b,c = x_3_feature.shape
for i in range(batch_size):
temp_tensor = torch.zeros((num_agent_tensor[i][0], a, b, c)).to(device)
center_flag = 0
for j in range(num_agent_tensor[i][0]):
if j == center_agent[i]:
temp_tensor[j] = x_feature_center[i][-1]
center_flag = 1
else:
# temp_tensor[j] = x_3_feature[(j - center_flag) * batch_size + i]
temp_tensor[j] = x_3_feature[(j - center_flag) + i* (len(delta_t[0])-1)]
feat_list.append(temp_tensor)
if self.forecast_flag =='Baseline':
feat_list = []
_, a,b,c = x_3.shape
for i in range(batch_size):
temp_tensor = torch.zeros((num_agent_tensor[i][0], a, b, c)).to(device)
center_flag = 0
for j in range(num_agent_tensor[i][0]):
temp_tensor[j] = self.feat_trans2center_now(device, x_3[i + j* (len(delta_t[:,0]))], trans_matrices[i][0][-1][j], center_agent = 0)
feat_list.append(temp_tensor)
if config.forecast_loss == 'True':
forecast_loss = 0
count = 0
if mode == 'train':
for i in range(batch_size):
for j in range(num_agent_tensor[i][0]):
forecast_loss += self.Forecast_loss(x_s_3[j * batch_size + i], feat_list[i][j])
count += 1
forecast_loss /= count
[a,b,c] = feat_list[0][0].shape
# local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
# (a,_,b,c,d) = local_com_mat.shape
local_com_mat_update = torch.zeros((batch_size,a,b,c)).to(device) # to avoid the inplace operation
for i in range(batch_size):
temp = self.adafusion(feat_list[i][0:num_agent_tensor[i][0]])
local_com_mat_update[i] = temp
# self.adafusion(tg_agent, rank)
# for b in range(batch_size):
# try:
# center_agent_int = int(center_agent[b])
# except:
# center_agent_int = center_agent
# num_agent = int(num_agent_tensor[b, center_agent_int])
# if num_agent == 0:
# break
# i = int(center_agent_int)
# # for i in range(num_agent):
# tg_agent = []
# tg_agent.append(local_com_mat[b, i])
# all_warp = trans_matrices[b, i, -1] # transformation [2 5 5 4 4]
# for j in range(num_agent):
# if j != i:
# nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
# nb_warp = all_warp[j] # [4 4]
# # normalize the translation vector
# x_trans = (4*nb_warp[0, 3])/128
# y_trans = -(4*nb_warp[1, 3])/128
# theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
# theta_rot = torch.unsqueeze(theta_rot, 0)
# grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
# theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
# theta_trans = torch.unsqueeze(theta_trans, 0)
# grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
# #first rotate the feature map, then translate it
# warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
# warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
# warp_feat = torch.squeeze(warp_feat_trans)
# tg_agent.append(warp_feat.type(dtype=torch.float32))
# # for k in range(5-num_agent):
# # tg_agent.append(padding_feat)
# tg_agent=torch.stack(tg_agent)
# tg_agent = self.adafusion(tg_agent, rank)
# local_com_mat_update[b, i] = tg_agent
# weighted feature maps is passed to decoder
# feat_fuse_mat = self.agents2batch(local_com_mat_update)
# feat_fuse_mat = torch.unsqueeze(local_com_mat_update, 1)
x = self.decoder(x[0:batch_size],x_1[0:batch_size],x_2[0:batch_size],local_com_mat_update,x_4[0:batch_size],batch_size)
# x = self.decoder(x,x_1,x_2,feat_fuse_mat,x_4,batch_size)
# x = torch.stack([x[i * self.agent_num] for i in range(batch_size)])
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
if config.forecast_loss == 'True':
result = {'loc': loc_preds,
'cls': cls_preds,
'forecast_loss': forecast_loss}
else:
result = {'loc': loc_preds,
'cls': cls_preds,}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
# with knowledge distillation
class FaFMIMONet_256_32_32_KD(nn.Module):
def __init__(self, config, in_channels=13, shared_img_encoder='unified'):
super(FaFMIMONet_256_32_32_KD, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
self.adafusion = pairfusionlayer_3(input_channel=256)
# Detection decoder
self.decoder = lidar_decoder_kd(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, vis=None, training=True, MO_flag=True, inference='activated', batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
size = (1, 256, 32, 32)
padding_feat = torch.zeros((256,32,32)).cuda()
feat_maps = x_3
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = []
tg_agent.append(local_com_mat[b, i])
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
tg_agent.append(warp_feat.type(dtype=torch.float32))
# for k in range(5-num_agent):
# tg_agent.append(padding_feat)
tg_agent=torch.stack(tg_agent)
tg_agent = self.adafusion(tg_agent)
# local_com_mat_update[b, i] = tg_agent[0]
local_com_mat_update[b, i] = tg_agent
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
x_8, x_7, x_6, x_5 = self.decoder(x, x_1, x_2, feat_fuse_mat, x_4, batch_size)
x = x_8
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result, x_8, x_7, x_6, x_5, feat_fuse_mat
'''''''''''''''''''''''''''''''''''''''''''''''''''
Online warp of layer 2
'''''''''''''''''''''''''''''''''''''''''''''''''''
class FaFMIMONet_128_64_64(nn.Module):
def __init__(self, config, in_channels=13, shared_img_encoder='unified'):
super(FaFMIMONet_128_64_64, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
self.adafusion = adafusionlayer(input_channel=128)
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, vis=None, batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
size = (1, 128, 64, 64)
padding_feat = torch.zeros((128,64,64)).cuda()
# print('padding size:{}'.format(padding_feat.size()))
feat_maps = x_2
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = []
tg_agent.append(local_com_mat[b, i])
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
tg_agent.append(warp_feat.type(dtype=torch.float32))
for k in range(5-num_agent):
tg_agent.append(padding_feat)
tg_agent=torch.stack(tg_agent)
tg_agent = self.adafusion(tg_agent)
local_com_mat_update[b, i] = tg_agent
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
x = self.decoder(x,x_1,feat_fuse_mat,x_3,x_4,batch_size)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
# with knowledge distillation
class FaFMIMONet_128_64_64_KD(nn.Module):
def __init__(self, config, in_channels=13, shared_img_encoder='unified'):
super(FaFMIMONet_128_64_64_KD, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
self.adafusion = adafusionlayer(input_channel=128)
# Detection decoder
self.decoder = lidar_decoder_kd(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, vis=None, training=True, MO_flag=True, inference='activated', batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
size = (1, 128, 64, 64)
padding_feat = torch.zeros((128,64,64)).cuda()
feat_maps = x_2
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = []
tg_agent.append(local_com_mat[b, i])
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
tg_agent.append(warp_feat.type(dtype=torch.float32))
for k in range(5-num_agent):
tg_agent.append(padding_feat)
tg_agent=torch.stack(tg_agent)
tg_agent = self.adafusion(tg_agent)
local_com_mat_update[b, i] = tg_agent
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
x_8, x_7, x_6, x_5 = self.decoder(x, x_1, feat_fuse_mat, x_3, x_4, batch_size)
x = x_8
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result, x_8, x_7, x_6, x_5
'''''''''''''''''''''''''''''''''''''''''''''''''''
Online warp of layer 1
'''''''''''''''''''''''''''''''''''''''''''''''''''
class FaFMIMONet_64_128_128(nn.Module):
def __init__(self, config, in_channels=13, shared_img_encoder='unified'):
super(FaFMIMONet_64_128_128, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
self.adafusion = adafusionlayer(input_channel=128)
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, vis=None, training=True, MO_flag=True, inference='activated', batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
size = (1, 64, 128, 128)
padding_feat = torch.zeros((128,64,64)).cuda()
feat_maps = x_1
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = []
tg_agent.append(local_com_mat[b, i])
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
tg_agent = tg_agent + warp_feat.type(dtype=torch.float32)
local_com_mat_update[b, i] = tg_agent
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
x = self.decoder(x,feat_fuse_mat,x_2,x_3,x_4,batch_size)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
'''''''''''''''''''''''''''''''''''''''''''''''''''
Online warp of layer 0
'''''''''''''''''''''''''''''''''''''''''''''''''''
class FaFMIMONet_32_256_256(nn.Module):
def __init__(self, config, in_channels=13, shared_img_encoder='unified'):
super(FaFMIMONet_32_256_256, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, vis=None, training=True, MO_flag=True, inference='activated', batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
size = (1, 32, 256, 256)
feat_maps = x
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
tg_agent = tg_agent + warp_feat.type(dtype=torch.float32)
local_com_mat_update[b, i] = tg_agent
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
x = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
'''''''''''''''''''''''''''''''''''''''''''''''''''
Online warp of layer 3 and 4
'''''''''''''''''''''''''''''''''''''''''''''''''''
class FaFMIMONet_layer_3_and_4(nn.Module):
def __init__(self, config, in_channels=13, shared_img_encoder='unified'):
super(FaFMIMONet_layer_3_and_4, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.shared_img_encoder = shared_img_encoder
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x,x_1,x_2,feat_maps_32,feat_maps = self.u_encoder(bevs)
device = bevs.device
size_16 = (1, 512, 16, 16)
size_32 = (1, 256, 32, 32)
# print(feat_maps.shape, feat_maps_32.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
feat_map_32 = {}
feat_list_32 = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
feat_map_32[i] = torch.unsqueeze(feat_maps_32[batch_size * i:batch_size * (i + 1)], 1)
feat_list_32.append(feat_map_32[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
local_com_mat_32 = torch.cat(tuple(feat_list_32), 1) # [2 5 256 32 32] [batch, agent, channel, height, width]
local_com_mat_update_32 = torch.cat(tuple(feat_list_32), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
tg_agent_32 = local_com_mat_32[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_agent_32 = torch.unsqueeze(local_com_mat_32[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size_16)) # 得到grid 用于grid sample
grid_rot_32 = F.affine_grid(theta_rot, size=torch.Size(size_32))
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size_16)) # 得到grid 用于grid sample
grid_trans_32 = F.affine_grid(theta_trans, size=torch.Size(size_32))
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
warp_feat_rot_32 = F.grid_sample(nb_agent_32, grid_rot_32)
warp_feat_trans_32 = F.grid_sample(warp_feat_rot_32, grid_trans_32)
warp_feat_32 = torch.squeeze(warp_feat_trans_32)
tg_agent = tg_agent + warp_feat.type(dtype=torch.float32)
tg_agent_32 = tg_agent_32 + warp_feat_32.type(dtype=torch.float32)
local_com_mat_update[b, i] = tg_agent
local_com_mat_update_32[b, i] = tg_agent_32
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
feat_fuse_mat_32 = self.agents2batch(local_com_mat_update_32)
x = self.decoder(x,x_1,x_2, feat_fuse_mat_32, feat_fuse_mat, batch_size)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
| 45.191427
| 284
| 0.581372
| 10,952
| 76,961
| 3.807706
| 0.029949
| 0.029735
| 0.017673
| 0.010551
| 0.896096
| 0.873555
| 0.859167
| 0.83972
| 0.831903
| 0.818186
| 0
| 0.044734
| 0.296202
| 76,961
| 1,703
| 285
| 45.191427
| 0.725173
| 0.162355
| 0
| 0.777877
| 0
| 0.006244
| 0.050799
| 0.011599
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039251
| false
| 0.000892
| 0.009813
| 0
| 0.087422
| 0.002676
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b93bfb512de9ce86d7838288b27bae14a6a11280
| 208
|
py
|
Python
|
heyroom/views.py
|
shaojintian/heyroom
|
52df4cfcd7c3ef3f2f308535107b541a75feaa92
|
[
"MIT"
] | 3
|
2019-11-13T12:18:35.000Z
|
2019-11-23T13:30:29.000Z
|
heyroom/views.py
|
shaojintian/heyroom
|
52df4cfcd7c3ef3f2f308535107b541a75feaa92
|
[
"MIT"
] | null | null | null |
heyroom/views.py
|
shaojintian/heyroom
|
52df4cfcd7c3ef3f2f308535107b541a75feaa92
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render_to_response
def show_index(request):
return render_to_response('heyroom/index.html')
def get_double11(request):
return render_to_response('heyroom/demo.html')
| 20.8
| 51
| 0.788462
| 29
| 208
| 5.37931
| 0.586207
| 0.153846
| 0.307692
| 0.269231
| 0.461538
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0.010929
| 0.120192
| 208
| 9
| 52
| 23.111111
| 0.84153
| 0
| 0
| 0
| 0
| 0
| 0.169903
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
b941a780da10304defcc2161dad3ad0e422a5335
| 65,921
|
py
|
Python
|
ub/modules/group.py
|
parv779/javes-3.0
|
d510717b2756a65b39ff18d9f53d4adc46d8e23f
|
[
"MIT"
] | 15
|
2020-12-13T17:37:05.000Z
|
2021-06-23T00:00:49.000Z
|
ub/modules/group.py
|
parv779/javes-3.0
|
d510717b2756a65b39ff18d9f53d4adc46d8e23f
|
[
"MIT"
] | 2
|
2021-01-11T16:39:31.000Z
|
2021-01-25T22:35:28.000Z
|
ub/modules/group.py
|
parv779/javes-3.0
|
d510717b2756a65b39ff18d9f53d4adc46d8e23f
|
[
"MIT"
] | 78
|
2020-12-13T17:52:51.000Z
|
2022-03-24T03:43:09.000Z
|
from asyncio import sleep
from os import remove
from ub.modules.sql_helper.mute_sql import is_muted, mute, unmute
from asyncio import sleep
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
import asyncio
from telethon import events
from datetime import datetime, timedelta
from telethon.utils import get_display_name
from telethon.tl.types import ChannelParticipantCreator as owner
from telethon.tl.types import UserStatusEmpty, UserStatusLastMonth, UserStatusLastWeek, UserStatusOffline, UserStatusOnline, UserStatusRecently, ChannelParticipantsKicked, ChatBannedRights
from telethon.tl import functions, types
from time import sleep
from telethon import events
from telethon.utils import pack_bot_file_id
from ub.modules.sql_helper.rkwelcome_sql import get_current_rkwelcome_settings, \
add_rkwelcome_setting, rm_rkwelcome_setting, update_previous_rkwelcome
from telethon import events, utils
from telethon.tl import types
from ub import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot
from ub.events import javes05
from ub import CMD_HELP, bot, LOGS, CLEAN_WELCOME, BOTLOG_CHATID
from telethon.events import ChatAction
import datetime
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetHistoryRequest, CheckChatInviteRequest, GetFullChatRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.tl.types import ChannelParticipantCreator
from telethon.errors import (ChannelInvalidError, ChannelPrivateError, ChannelPublicGroupNaError, InviteHashEmptyError, InviteHashExpiredError, InviteHashInvalidError)
from telethon.utils import get_input_location
from ub import CMD_HELP
from telethon.tl import functions, types
from telethon import functions
from ub.events import javes05
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
import html
from telethon.tl.functions.channels import EditBannedRequest
import ub.modules.sql_helper.warns_sql as sql
from telethon.tl.types import MessageEntityMentionName
from os import remove
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from telethon.errors import (BadRequestError, ChatAdminRequiredError,ImageProcessFailedError, PhotoCropSizeSmallError,UserAdminInvalidError)
from telethon.errors.rpcerrorlist import (UserIdInvalidError,MessageTooLongError)
from telethon.tl.functions.channels import (EditAdminRequest,EditBannedRequest,EditPhotoRequest)
from telethon.tl.functions.messages import UpdatePinnedMessageRequest
from telethon.tl.types import (PeerChannel, ChannelParticipantsAdmins,ChatAdminRights, ChatBannedRights,MessageEntityMentionName, MessageMediaPhoto,ChannelParticipantsBots)
from ub import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot
from ub import CMD_HELP, bot, LOGS, CLEAN_WELCOME, BOTLOG_CHATID
from telethon.events import ChatAction
from asyncio import sleep
import asyncio
import io
import re
import ub.modules.sql_helper.blacklist_sql as sql
from telethon import events, utils
from telethon.tl import types, functions
from ub import CMD_HELP, bot
from ub import BOTLOG, BOTLOG_CHATID, CMD_HELP
from asyncio import sleep
from telethon.tl.functions.messages import EditChatDefaultBannedRightsRequest
from telethon.tl.types import ChatBannedRights
from ub import CMD_HELP
from re import fullmatch, IGNORECASE, escape
from ub import BOTLOG, BOTLOG_CHATID, CMD_HELP
from requests import get
from telethon.events import ChatAction
from telethon.tl.types import ChannelParticipantsAdmins, Message
import asyncio
import re
from ub.events import javes05
from telethon import events, utils
from telethon.tl import types
from ub.modules.sql_helper.rkfilter_sql import get_filter, add_filter, remove_filter, get_all_rkfilters, remove_all_rkfilters
from ub import BOTLOG, BOTLOG_CHATID, CMD_HELP, ANTI_SPAMBOT, ANTI_SPAMBOT_SHOUT, bot
from telethon.errors import (BadRequestError, ChatAdminRequiredError,ImageProcessFailedError, PhotoCropSizeSmallError,UserAdminInvalidError)
from telethon.errors.rpcerrorlist import (UserIdInvalidError,MessageTooLongError)
from telethon.tl.functions.channels import (EditAdminRequest,EditBannedRequest,EditPhotoRequest)
from telethon.tl.functions.messages import UpdatePinnedMessageRequest
from telethon.tl.types import (PeerChat, PeerChannel,ChannelParticipantsAdmins, ChatAdminRights,ChatBannedRights, MessageEntityMentionName,MessageMediaPhoto, ChannelParticipantsBots)
from ub import BOTLOG, BOTLOG_CHATID, CMD_HELP, bot
from ub import bot
from ub.events import rekcah05, command
from ub.events import javes05
from telethon.tl.functions.messages import EditChatDefaultBannedRightsRequest
from telethon.tl.types import ChatBannedRights
from ub import CMD_HELP
from ub import CMD_HELP, ALIVE_NAME, PM_MESSAGE, JAVES_NAME, JAVES_MSG, ORI_MSG
JAVES_NNAME = str(JAVES_NAME) if JAVES_NAME else str(JAVES_MSG)
javes = bot
from telethon.events import *
# =================== CONSTANT ===================
PP_TOO_SMOL = f"`{JAVES_NNAME}:`**The image is too small**"
PP_ERROR = f"`{JAVES_NNAME}:`**Failure while processing the image**"
NO_ADMIN = f"`{JAVES_NNAME}:`**Sorry, I can't able to get admin rights here!**"
NO_PERM = f"`{JAVES_NNAME}:`**I don't have sufficient permissions!**"
NO_SQL = f"`{JAVES_NNAME}:`**Running on Non-SQL mode!**"
CHAT_PP_CHANGED = f"`{JAVES_NNAME}:`**Chat Picture Changed**"
CHAT_PP_ERROR = f"`{JAVES_NNAME}:`**Some issue with updating the pic,**" \
"**maybe coz I'm not an admin,**" \
"**or don't have enough rights.**"
INVALID_MEDIA = "`Invalid Extension`"
BANNED_RIGHTS = ChatBannedRights(
until_date=None,
view_messages=True,
send_messages=True,
send_media=True,
send_stickers=True,
send_gifs=True,
send_games=True,
send_inline=True,
embed_links=True,
)
UNBAN_RIGHTS = ChatBannedRights(
until_date=None,
send_messages=None,
send_media=None,
send_stickers=None,
send_gifs=None,
send_games=None,
send_inline=None,
embed_links=None,
)
MUTE_RIGHTS = ChatBannedRights(until_date=None, send_messages=True)
UNMUTE_RIGHTS = ChatBannedRights(until_date=None, send_messages=False)
# ================================================
DELETE_TIMEOUT = 0
TYPE_TEXT = 0
TYPE_PHOTO = 1
TYPE_DOCUMENT = 2
global last_triggered_rkfilters
last_triggered_rkfilters = {} # pylint:disable=E0602
#filters logic
@javes.on(events.NewMessage(incoming=True))
async def on_snip(event):
global last_triggered_rkfilters
name = event.raw_text
if event.chat_id in last_triggered_rkfilters:
if name in last_triggered_rkfilters[event.chat_id]:
# avoid ub spam
# "I demand rights for us bots, we are equal to you humans." -Henri Koivuneva (t.me/UserbotTesting/2698)
return False
snips = get_all_rkfilters(event.chat_id)
if snips:
for snip in snips:
pattern = r"( |^|[^\w])" + re.escape(snip.keyword) + r"( |$|[^\w])"
if re.search(pattern, name, flags=re.IGNORECASE):
if snip.snip_type == TYPE_PHOTO:
media = types.InputPhoto(
int(snip.media_id),
int(snip.media_access_hash),
snip.media_file_reference
)
elif snip.snip_type == TYPE_DOCUMENT:
media = types.InputDocument(
int(snip.media_id),
int(snip.media_access_hash),
snip.media_file_reference
)
else:
media = None
message_id = event.message.id
if event.reply_to_msg_id:
message_id = event.reply_to_msg_id
await event.reply(
snip.reply,
file=media
)
if event.chat_id not in last_triggered_rkfilters:
last_triggered_rkfilters[event.chat_id] = []
last_triggered_rkfilters[event.chat_id].append(name)
await asyncio.sleep(DELETE_TIMEOUT)
last_triggered_rkfilters[event.chat_id].remove(name)
@javes.on(events.NewMessage(incoming=True))
async def filter_incoming_handler(handler):
#filters logic
try:
if not (await handler.get_sender()).bot:
try:
from ub.modules.sql_helper.filter_sql import get_filters
except AttributeError:
await handler.edit("`Running on Non-SQL mode!`")
return
name = handler.raw_text
filters = get_filters(handler.chat_id)
if not filters:
return
for trigger in filters:
pro = fullmatch(trigger.keyword, name, flags=IGNORECASE)
if pro and trigger.f_mesg_id:
msg_o = await handler.client.get_messages(
entity=BOTLOG_CHATID, ids=int(trigger.f_mesg_id))
await handler.reply(msg_o.message, file=msg_o.media)
elif pro and trigger.reply:
await handler.reply(trigger.reply)
except AttributeError:
pass
@javes05(outgoing=True, disable_errors=True, pattern="^\!userid$")
async def useridgetter(target):
""" For .userid command, returns the ID of the target user. """
message = await target.get_reply_message()
if message:
if not message.forward:
user_id = message.sender.id
if message.sender.username:
name = "@" + message.sender.username
else:
name = "**" + message.sender.first_name + "**"
else:
user_id = message.forward.sender.id
if message.forward.sender.username:
name = "@" + message.forward.sender.username
else:
name = "*" + message.forward.sender.first_name + "*"
await target.edit(" **Name:** {} \n**User ID:** `{}`".format(
name, user_id))
@javes.on(rekcah05(pattern=f"userid$", allow_sudo=True))
async def useridgetter(target):
""" For .userid command, returns the ID of the target user. """
message = await target.get_reply_message()
if message:
if not message.forward:
user_id = message.sender.id
if message.sender.username:
name = "@" + message.sender.username
else:
name = "**" + message.sender.first_name + "**"
else:
user_id = message.forward.sender.id
if message.forward.sender.username:
name = "@" + message.forward.sender.username
else:
name = "*" + message.forward.sender.first_name + "*"
await target.reply(" **Name:** {} \n**User ID:** `{}`".format(
name, user_id))
@javes05(outgoing=True, disable_errors=True, pattern="^\!link(?: |$)(.*)")
async def permalink(mention):
""" For .link command, generates a link to the user's PM with a custom text. """
user, custom = await get_user_from_event(mention)
if not user:
return
if custom:
await mention.edit(f"[{custom}](tg://user?id={user.id})")
else:
tag = user.first_name.replace("\u2060",
"") if user.first_name else user.username
await mention.edit(f"`{JAVES_NNAME}`: [{tag}](tg://user?id={user.id})")
@javes.on(rekcah05(pattern=f"link(?: |$)(.*)", allow_sudo=True))
async def permalink(mention):
""" For .link command, generates a link to the user's PM with a custom text. """
user, custom = await get_user_from_event(mention)
if not user:
return
if custom:
await mention.reply(f"[{custom}](tg://user?id={user.id})")
else:
tag = user.first_name.replace("\u2060",
"") if user.first_name else user.username
await mention.reply(f"`{JAVES_NNAME}`: [{tag}](tg://user?id={user.id})")
@javes05(outgoing=True, disable_errors=True, pattern="^\!chatid$")
async def chatidgetter(chat):
""" For .chatid, returns the ID of the chat you are in at that moment. """
await chat.edit(f"`{JAVES_NNAME}`: Chat ID: `" + str(chat.chat_id) + "`")
@javes.on(rekcah05(pattern=f"chatid$", allow_sudo=True))
async def chatidgetter(chat):
""" For .chatid, returns the ID of the chat you are in at that moment. """
await chat.reply(f"`{JAVES_NNAME}`: Chat ID: `" + str(chat.chat_id) + "`")
@javes05(outgoing=True, disable_errors=True, pattern=r"^\!log(?: |$)([\s\S]*)")
async def log(log_text):
""" For .log command, forwards a message or the command argument to the bot logs group """
if BOTLOG:
if log_text.reply_to_msg_id:
reply_msg = await log_text.get_reply_message()
await reply_msg.forward_to(BOTLOG_CHATID)
elif log_text.pattern_match.group(1):
user = f"#LOG / Chat ID: {log_text.chat_id}\n\n"
textx = user + log_text.pattern_match.group(1)
await bot.send_message(BOTLOG_CHATID, textx)
else:
await log_text.edit(f"`{JAVES_NNAME}`: **What am I supposed to log?**")
return
await log_text.edit(f"`{JAVES_NNAME}`: **Logged Successfully**")
else:
await log_text.edit(f"`{JAVES_NNAME}`: **This feature requires Logging to be enabled!**")
await sleep(2)
await log_text.delete()
@javes.on(rekcah05(pattern=f"log$", allow_sudo=True))
async def iqless(e):
await e.reply(f"`{JAVES_NNAME}`: **Privacy error! , Sorry sudo users dont have permission to access it!**")
@javes05(outgoing=True, disable_errors=True, pattern="^\!kickme$")
async def kickme(leave):
""" Basically it's .kickme command """
await leave.edit(f"`{JAVES_NNAME}`: **My master Didnt like this place......GoodBye!**")
await leave.client.kick_participant(leave.chat_id, 'me')
@javes.on(rekcah05(pattern=f"kickme$", allow_sudo=True))
async def iqless(e):
await e.reply(f"`{JAVES_NNAME}`: **Privacy error! , Sorry sudo users dont have permission to access it!**")
@javes05(outgoing=True, disable_errors=True, pattern="^\!delusers(?: |$)(.*)", groups_only=True)
async def rm_deletedacc(show):
""" For .delusers command, list all the ghost/deleted accounts in a chat. """
if not show.is_group:
await show.edit(f"`{JAVES_NNAME}:` ** This is not a group.**")
return
con = show.pattern_match.group(1).lower()
del_u = 0
del_status = f"`{JAVES_NNAME}:` **No deleted accounts found**"
if con != "clean":
await show.edit(f"`{JAVES_NNAME}:` ** Searching for deleted accounts...**")
async for user in show.client.iter_participants(show.chat_id):
if user.deleted:
del_u += 1
await sleep(1)
if del_u > 0:
del_status = f"`{JAVES_NNAME}:` Found **{del_u}** deleted account(s) in this group,\
\nclean them by using `!delusers clean`"
await show.edit(del_status)
return
# Here laying the sanity check
chat = await show.get_chat()
admin = chat.admin_rights
creator = chat.creator
# Well
if not admin and not creator:
await show.edit(f"`{JAVES_NNAME}:` **Sorry, I can't able to get admin rights here**")
return
await show.edit(f"`{JAVES_NNAME}:` ** Removing deleted accounts...**")
del_u = 0
del_a = 0
async for user in show.client.iter_participants(show.chat_id):
if user.deleted:
try:
await show.client(
EditBannedRequest(show.chat_id, user.id, BANNED_RIGHTS))
except ChatAdminRequiredError:
await show.edit(f"`{JAVES_NNAME}:` **Sorry, I don't have ban rights in this group")
return
except UserAdminInvalidError:
del_u -= 1
del_a += 1
await show.client(
EditBannedRequest(show.chat_id, user.id, UNBAN_RIGHTS))
del_u += 1
if del_u > 0:
del_status = f"`{JAVES_NNAME}`: Cleaned **{del_u}** deleted account(s)"
if del_a > 0:
del_status = f"`{JAVES_NNAME}`: Cleaned **{del_u}** deleted account(s) \
\n**{del_a}** deleted admin accounts are not removed"
await show.edit(del_status)
await sleep(2)
await show.delete()
if BOTLOG:
await show.client.send_message(
BOTLOG_CHATID, "#CLEANUP\n"
f"Cleaned **{del_u}** deleted account(s) !!\
\nCHAT: {show.chat.title}(`{show.chat_id}`)")
@javes.on(rekcah05(pattern=f"delusers(?: |$)(.*)", allow_sudo=True))
async def rm_deletedacc(show):
""" For .delusers command, list all the ghost/deleted accounts in a chat. """
if not show.is_group:
await show.reply(f"`{JAVES_NNAME}:` ** This is not a group.**")
return
con = show.pattern_match.group(1).lower()
del_u = 0
del_status = f"`{JAVES_NNAME}:` **No deleted accounts found**"
if con != "clean":
await show.reply(f"`{JAVES_NNAME}:` ** Searching for deleted accounts...**")
async for user in show.client.iter_participants(show.chat_id):
if user.deleted:
del_u += 1
await sleep(1)
if del_u > 0:
del_status = f"`{JAVES_NNAME}:` Found **{del_u}** deleted account(s) in this group,\
\nclean them by using `!delusers clean`"
await show.reply(del_status)
return
# Here laying the sanity check
chat = await show.get_chat()
admin = chat.admin_rights
creator = chat.creator
# Well
if not admin and not creator:
await show.reply(f"`{JAVES_NNAME}:` **Sorry, I can't able to get admin rights here**")
return
await show.reply(f"`{JAVES_NNAME}:` ** Removing deleted accounts...**")
del_u = 0
del_a = 0
async for user in show.client.iter_participants(show.chat_id):
if user.deleted:
try:
await show.client(
EditBannedRequest(show.chat_id, user.id, BANNED_RIGHTS))
except ChatAdminRequiredError:
await show.reply(f"`{JAVES_NNAME}:` **Sorry, I don't have ban rights in this group")
return
except UserAdminInvalidError:
del_u -= 1
del_a += 1
await show.client(
EditBannedRequest(show.chat_id, user.id, UNBAN_RIGHTS))
del_u += 1
if del_u > 0:
del_status = f"`{JAVES_NNAME}`: Cleaned **{del_u}** deleted account(s)"
if del_a > 0:
del_status = f"`{JAVES_NNAME}`: Cleaned **{del_u}** deleted account(s) \
\n**{del_a}** deleted admin accounts are not removed"
await show.reply(del_status)
await sleep(2)
await show.delete()
if BOTLOG:
await show.client.send_message(
BOTLOG_CHATID, "#CLEANUP\n"
f"Cleaned **{del_u}** deleted account(s) !!\
\nCHAT: {show.chat.title}(`{show.chat_id}`)")
@javes05(outgoing=True, disable_errors=True, pattern="^\!admins$", groups_only=True)
async def get_admin(show):
""" For .admins command, list all of the admins of the chat. """
info = await show.client.get_entity(show.chat_id)
title = info.title if info.title else "this chat"
mentions = f'<b>Admins in {title}:</b> \n'
try:
async for user in show.client.iter_participants(
show.chat_id, filter=ChannelParticipantsAdmins):
if not user.deleted:
link = f"<a href=\"tg://user?id={user.id}\">{user.first_name}</a>"
userid = f"<code>{user.id}</code>"
mentions += f"\n{link} {userid}"
else:
mentions += f"\nDeleted Account <code>{user.id}</code>"
except ChatAdminRequiredError as err:
mentions += " " + str(err) + "\n"
try:
await show.edit(mentions, parse_mode="html")
except MessageTooLongError:
await show.edit(
f"`{JAVES_NNAME}`: **Too many admins here. Uploading admin list as file**")
file = open("adminlist.txt", "w+")
file.write(mentions)
file.close()
await show.client.send_file(
show.chat_id,
"adminlist.txt",
caption='Admins in {}'.format(title),
reply_to=show.id,
)
remove("adminlist.txt")
@javes.on(rekcah05(pattern=f"admins$", allow_sudo=True))
async def get_admin(show):
""" For .admins command, list all of the admins of the chat. """
info = await show.client.get_entity(show.chat_id)
title = info.title if info.title else "this chat"
mentions = f'<b>Admins in {title}:</b> \n'
try:
async for user in show.client.iter_participants(
show.chat_id, filter=ChannelParticipantsAdmins):
if not user.deleted:
link = f"<a href=\"tg://user?id={user.id}\">{user.first_name}</a>"
userid = f"<code>{user.id}</code>"
mentions += f"\n{link} {userid}"
else:
mentions += f"\nDeleted Account <code>{user.id}</code>"
except ChatAdminRequiredError as err:
mentions += " " + str(err) + "\n"
try:
await show.reply(mentions, parse_mode="html")
except MessageTooLongError:
await show.reply(
f"`{JAVES_NNAME}`: **Too many admins here. Uploading admin list as file**")
file = open("adminlist.txt", "w+")
file.write(mentions)
file.close()
await show.client.send_file(
show.chat_id,
"adminlist.txt",
caption='Admins in {}'.format(title),
reply_to=show.id,
)
remove("adminlist.txt")
@javes05(outgoing=True, disable_errors=True, pattern="^\!bots$", groups_only=True)
async def get_bots(show):
""" For .bots command, list all of the bots of the chat. """
info = await show.client.get_entity(show.chat_id)
title = info.title if info.title else "this chat"
mentions = f'<b>Bots in {title}:</b>\n'
try:
if isinstance(show.to_id, PeerChat):
await show.edit("`I heard that only Supergroups can have bots.`")
return
else:
async for user in show.client.iter_participants(
show.chat_id, filter=ChannelParticipantsBots):
if not user.deleted:
link = f"<a href=\"tg://user?id={user.id}\">{user.first_name}</a>"
userid = f"<code>{user.id}</code>"
mentions += f"\n{link} {userid}"
else:
mentions += f"\nDeleted Bot <code>{user.id}</code>"
except ChatAdminRequiredError as err:
mentions += " " + str(err) + "\n"
try:
await show.edit(mentions, parse_mode="html")
except MessageTooLongError:
await show.edit(
f"`{JAVES_NNAME}`: ** Too many bots here. Uploading bots list as file.**")
file = open("botlist.txt", "w+")
file.write(mentions)
file.close()
await show.client.send_file(
show.chat_id,
"botlist.txt",
caption='Bots in {}'.format(title),
reply_to=show.id,
)
remove("botlist.txt")
@javes.on(rekcah05(pattern=f"bots$", allow_sudo=True))
async def get_bots(show):
""" For .bots command, list all of the bots of the chat. """
info = await show.client.get_entity(show.chat_id)
title = info.title if info.title else "this chat"
mentions = f'<b>Bots in {title}:</b>\n'
try:
if isinstance(show.to_id, PeerChat):
await show.reply("`I heard that only Supergroups can have bots.`")
return
else:
async for user in show.client.iter_participants(
show.chat_id, filter=ChannelParticipantsBots):
if not user.deleted:
link = f"<a href=\"tg://user?id={user.id}\">{user.first_name}</a>"
userid = f"<code>{user.id}</code>"
mentions += f"\n{link} {userid}"
else:
mentions += f"\nDeleted Bot <code>{user.id}</code>"
except ChatAdminRequiredError as err:
mentions += " " + str(err) + "\n"
try:
await show.reply(mentions, parse_mode="html")
except MessageTooLongError:
await show.reply(
f"`{JAVES_NNAME}`: ** Too many bots here. Uploading bots list as file.**")
file = open("botlist.txt", "w+")
file.write(mentions)
file.close()
await show.client.send_file(
show.chat_id,
"botlist.txt",
caption='Bots in {}'.format(title),
reply_to=show.id,
)
remove("botlist.txt")
@javes05(outgoing=True, disable_errors=True, pattern="^\!users ?(.*)", groups_only=True)
async def get_users(show):
""" For .users command, list all of the users in a chat. """
info = await show.client.get_entity(show.chat_id)
title = info.title if info.title else "this chat"
mentions = 'Users in {}: \n'.format(title)
try:
if not show.pattern_match.group(1):
async for user in show.client.iter_participants(show.chat_id):
if not user.deleted:
mentions += f"\n[{user.first_name}](tg://user?id={user.id}) `{user.id}`"
else:
mentions += f"\nDeleted Account `{user.id}`"
else:
searchq = show.pattern_match.group(1)
async for user in show.client.iter_participants(
show.chat_id, search=f'{searchq}'):
if not user.deleted:
mentions += f"\n[{user.first_name}](tg://user?id={user.id}) `{user.id}`"
else:
mentions += f"\nDeleted Account `{user.id}`"
except ChatAdminRequiredError as err:
mentions += " " + str(err) + "\n"
try:
await show.edit(mentions)
except MessageTooLongError:
await show.edit(
f"`{JAVES_NNAME}`: ** This is a huge group. Uploading users lists as file.")
file = open("userslist.txt", "w+")
file.write(mentions)
file.close()
await show.client.send_file(
show.chat_id,
"userslist.txt",
caption='Users in {}'.format(title),
reply_to=show.id,
)
remove("userslist.txt")
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
args = event.pattern_match.group(1).split(':', 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.from_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit(f"`{JAVES_NNAME}`: ** Pass the user's username, id or reply!**")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj, extra
async def get_user_from_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj
@javes.on(rekcah05(pattern=f"users ?(.*)", allow_sudo=True))
async def get_users(show):
""" For .users command, list all of the users in a chat. """
info = await show.client.get_entity(show.chat_id)
title = info.title if info.title else "this chat"
mentions = 'Users in {}: \n'.format(title)
try:
if not show.pattern_match.group(1):
async for user in show.client.iter_participants(show.chat_id):
if not user.deleted:
mentions += f"\n[{user.first_name}](tg://user?id={user.id}) `{user.id}`"
else:
mentions += f"\nDeleted Account `{user.id}`"
else:
searchq = show.pattern_match.group(1)
async for user in show.client.iter_participants(
show.chat_id, search=f'{searchq}'):
if not user.deleted:
mentions += f"\n[{user.first_name}](tg://user?id={user.id}) `{user.id}`"
else:
mentions += f"\nDeleted Account `{user.id}`"
except ChatAdminRequiredError as err:
mentions += " " + str(err) + "\n"
try:
await show.reply(mentions)
except MessageTooLongError:
await show.reply(
f"`{JAVES_NNAME}`: ** This is a huge group. Uploading users lists as file.")
file = open("userslist.txt", "w+")
file.write(mentions)
file.close()
await show.client.send_file(
show.chat_id,
"userslist.txt",
caption='Users in {}'.format(title),
reply_to=show.id,
)
remove("userslist.txt")
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
args = event.pattern_match.group(1).split(':', 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.from_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.reply(f"`{JAVES_NNAME}`: ** Pass the user's username, id or reply!**")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.reply(str(err))
return None
return user_obj, extra
async def get_user_from_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.reply(str(err))
return None
return user_obj
@javes05(outgoing=True, disable_errors=True, pattern="^\!savefilter2 (\w*)")
async def add_new_filter(new_handler):
""" For .filter command, allows adding new filters in a chat """
try:
from ub.modules.sql_helper.filter_sql import add_filter
except AttributeError:
await new_handler.edit("`Running on Non-SQL mode!`")
return
keyword = new_handler.pattern_match.group(1)
string = new_handler.text.partition(keyword)[2]
msg = await new_handler.get_reply_message()
msg_id = None
if msg and msg.media and not string:
if BOTLOG_CHATID:
await new_handler.client.send_message(
BOTLOG_CHATID, f"#FILTER\
\nCHAT ID: {new_handler.chat_id}\
\nTRIGGER: {keyword}\
\n\nThe following message is saved as the filter's reply data for the chat, please do NOT delete it !!"
)
msg_o = await new_handler.client.forward_messages(
entity=BOTLOG_CHATID,
messages=msg,
from_peer=new_handler.chat_id,
silent=True)
msg_id = msg_o.id
else:
await new_handler.edit(
f"`{JAVES_NNAME}`: ** Saving media as reply to the filter requires the BOTLOG_CHATID to be set.**"
)
return
elif new_handler.reply_to_msg_id and not string:
rep_msg = await new_handler.get_reply_message()
string = rep_msg.text
success = " `Filter` **{}** `{} successfully`"
if add_filter(str(new_handler.chat_id), keyword, string, msg_id) is True:
await new_handler.edit(success.format(keyword, 'added'))
else:
await new_handler.edit(success.format(keyword, 'updated'))
@javes.on(rekcah05(pattern=f"savefilter2 (\w*)", allow_sudo=True))
async def add_new_filter(new_handler):
""" For .filter command, allows adding new filters in a chat """
try:
from ub.modules.sql_helper.filter_sql import add_filter
except AttributeError:
await new_handler.reply("`Running on Non-SQL mode!`")
return
keyword = new_handler.pattern_match.group(1)
string = new_handler.text.partition(keyword)[2]
msg = await new_handler.get_reply_message()
msg_id = None
if msg and msg.media and not string:
if BOTLOG_CHATID:
await new_handler.client.send_message(
BOTLOG_CHATID, f"#FILTER\
\nCHAT ID: {new_handler.chat_id}\
\nTRIGGER: {keyword}\
\n\nThe following message is saved as the filter's reply data for the chat, please do NOT delete it !!"
)
msg_o = await new_handler.client.forward_messages(
entity=BOTLOG_CHATID,
messages=msg,
from_peer=new_handler.chat_id,
silent=True)
msg_id = msg_o.id
else:
await new_handler.reply(
f"`{JAVES_NNAME}`: ** Saving media as reply to the filter requires the BOTLOG_CHATID to be set.**"
)
return
elif new_handler.reply_to_msg_id and not string:
rep_msg = await new_handler.get_reply_message()
string = rep_msg.text
success = " `Filter` **{}** `{} successfully`"
if add_filter(str(new_handler.chat_id), keyword, string, msg_id) is True:
await new_handler.reply(success.format(keyword, 'added'))
else:
await new_handler.reply(success.format(keyword, 'updated'))
@javes05(outgoing=True, disable_errors=True, pattern="^\!clearfilter2 (\w*)")
async def remove_a_filter(r_handler):
""" For .stop command, allows you to remove a filter from a chat. """
try:
from ub.modules.sql_helper.filter_sql import remove_filter
except AttributeError:
await r_handler.edit("`Running on Non-SQL mode!`")
return
filt = r_handler.pattern_match.group(1)
if not remove_filter(r_handler.chat_id, filt):
await r_handler.edit("`Filter` **{}** `doesn't exist.`".format(filt))
else:
await r_handler.edit(
"`Filter` **{}** `was deleted successfully`".format(filt))
@javes.on(rekcah05(pattern=f"clearfilter2 ?(.*)", allow_sudo=True))
async def remove_a_filter(r_handler):
""" For .stop command, allows you to remove a filter from a chat. """
try:
from ub.modules.sql_helper.filter_sql import remove_filter
except AttributeError:
await r_handler.reply("`Running on Non-SQL mode!`")
return
filt = r_handler.pattern_match.group(1)
if not remove_filter(r_handler.chat_id, filt):
await r_handler.reply("`Filter` **{}** `doesn't exist.`".format(filt))
else:
await r_handler.reply(
"`Filter` **{}** `was deleted successfully`".format(filt))
@javes05(outgoing=True, disable_errors=True, pattern="^\!checkfilter2$")
async def filters_active(event):
""" For .filters command, lists all of the active filters in a chat. """
try:
from ub.modules.sql_helper.filter_sql import get_filters
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
transact = f"`{JAVES_NNAME}`: ** There are no filters in this chat.**"
filters = get_filters(event.chat_id)
for filt in filters:
if transact == "`There are no filters in this chat.`":
transact = "Active filters in this chat:\n"
transact += "`{}`\n".format(filt.keyword)
else:
transact += "`{}`\n".format(filt.keyword)
await event.edit(transact)
@javes.on(rekcah05(pattern=f"checkfilter2$", allow_sudo=True))
async def filters_active(event):
""" For .filters command, lists all of the active filters in a chat. """
try:
from ub.modules.sql_helper.filter_sql import get_filters
except AttributeError:
await event.reply("`Running on Non-SQL mode!`")
return
transact = f"`{JAVES_NNAME}`: ** There are no filters in this chat.**"
filters = get_filters(event.chat_id)
for filt in filters:
if transact == "`There are no filters in this chat.`":
transact = "Active filters in this chat:\n"
transact += "`{}`\n".format(filt.keyword)
else:
transact += "`{}`\n".format(filt.keyword)
await event.reply(transact)
@javes05(pattern="!chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("`Analysing the chat...`")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`An unexpected error has occurred.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.edit("`This is a private channel/group or I am banned from there`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(chat_obj_info, "broadcast") else False
chat_type = "Channel" if broadcast else "Group"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
async for x in javes.iter_participants(chat.full_chat.id):
a=x.status
b=x.participant
if isinstance(b, owner):
#c=f"[{get_display_name(x)}](tg://user?id={x.id})"
global creator_id,creator_username,creator_firstname
creator_id=x.id
creator_username=x.username
creator_firstname=x.first_name##solbed by Sh1vam
#creator_id = creator_id
#creator_firstname = creator_firstname
#creator_username = creator_username
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and type(msg_info.messages[0].action) is MessageActionChannelMigrateFrom and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Unknown"
location = str(e)
#this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Yes</b>" if hasattr(chat_obj_info, "megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Yes</b>" if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Yes</b>" if hasattr(chat_obj_info, "restricted") and chat_obj_info.restricted else "No"
verified = "<b>Yes</b>" if hasattr(chat_obj_info, "verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(creator_username) if creator_username else None
#end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None, works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b>CHAT INFO:</b>\n"
caption += f"ID: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"{chat_type} name: {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"Former name: {former_title}\n"
if username is not None:
caption += f"{chat_type} type: Public\n"
caption += f"Link: {username}\n"
else:
caption += f"{chat_type} type: Private\n"
if creator_username is not None:
caption += f"Creator: {creator_username}\n"
elif creator_valid:
caption += f"Creator: <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"Created: <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"Created: <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"Data Centre ID: {dc_id}\n"
if exp_count is not None:
chat_level = int((1+sqrt(1+7*exp_count/14))/2)
caption += f"{chat_type} level: <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"Viewable messages: <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"Messages sent: <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"Messages sent: <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"Members: <code>{members}</code>\n"
if admins is not None:
caption += f"Administrators: <code>{admins}</code>\n"
if bots_list:
caption += f"Bots: <code>{bots}</code>\n"
if members_online:
caption += f"Currently online: <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"Restricted users: <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"Banned users: <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"{chat_type} stickers: <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"Slow mode: {slowmode}"
if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"Supergroup: {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"Restricted: {restricted}\n"
if chat_obj_info.restricted:
caption += f"> Platform: {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"> Reason: {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"> Text: {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "Scam: <b>Yes</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"Verified by Telegram: {verified}\n\n"
if description:
caption += f"Description: \n<code>{description}</code>\n"
return caption
@javes.on(rekcah05(pattern=f"chatinfo(?: |$)(.*)", allow_sudo=True))
async def info(event):
await event.reply("`Analysing the chat...`")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.reply(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.reply("`An unexpected error has occurred.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.reply("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.reply("`This is a private channel/group or I am banned from there`")
return None
except ChannelPublicGroupNaError:
await event.reply("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError) as err:
await event.reply(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(chat_obj_info, "broadcast") else False
chat_type = "Channel" if broadcast else "Group"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
async for x in javes.iter_participants(chat.full_chat.id):
a=x.status
b=x.participant
if isinstance(b, owner):
#c=f"[{get_display_name(x)}](tg://user?id={x.id})"
global creator_id,creator_username,creator_firstname
creator_id=x.id
creator_username=x.username
creator_firstname=x.first_name##solbed by Sh1vam
#creator_id = creator_id=x.id
#creator_firstname = creator_firstname
#creator_username = creator_username
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and type(msg_info.messages[0].action) is MessageActionChannelMigrateFrom and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Unknown"
location = str(e)
#this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Yes</b>" if hasattr(chat_obj_info, "megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Yes</b>" if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Yes</b>" if hasattr(chat_obj_info, "restricted") and chat_obj_info.restricted else "No"
verified = "<b>Yes</b>" if hasattr(chat_obj_info, "verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(creator_username) if creator_username else None
#end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None, works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b>CHAT INFO:</b>\n"
caption += f"ID: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"{chat_type} name: {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"Former name: {former_title}\n"
if username is not None:
caption += f"{chat_type} type: Public\n"
caption += f"Link: {username}\n"
else:
caption += f"{chat_type} type: Private\n"
if creator_username is not None:
caption += f"Creator: {creator_username}\n"
elif creator_valid:
caption += f"Creator: <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"Created: <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"Created: <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"Data Centre ID: {dc_id}\n"
if exp_count is not None:
chat_level = int((1+sqrt(1+7*exp_count/14))/2)
caption += f"{chat_type} level: <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"Viewable messages: <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"Messages sent: <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"Messages sent: <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"Members: <code>{members}</code>\n"
if admins is not None:
caption += f"Administrators: <code>{admins}</code>\n"
if bots_list:
caption += f"Bots: <code>{bots}</code>\n"
if members_online:
caption += f"Currently online: <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"Restricted users: <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"Banned users: <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"{chat_type} stickers: <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"Slow mode: {slowmode}"
if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"Supergroup: {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"Restricted: {restricted}\n"
if chat_obj_info.restricted:
caption += f"> Platform: {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"> Reason: {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"> Text: {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "Scam: <b>Yes</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"Verified by Telegram: {verified}\n\n"
if description:
caption += f"Description: \n<code>{description}</code>\n"
return caption
import ub.modules.sql_helper.warns_sql as sql
@javes05(outgoing=True, disable_errors=True, pattern="^!resetwarns(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
sql.reset_warns(reply_message.from_id, event.chat_id)
await event.edit("Warnings have been reset!")
@javes.on(rekcah05(pattern=f"resetwarns(?: |$)(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
sql.reset_warns(reply_message.from_id, event.chat_id)
await event.reply("Warnings have been reset!")
@javes05(outgoing=True, disable_errors=True, pattern="^!invite(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
to_add_users = event.pattern_match.group(1)
if event.is_private:
await event.edit(f"**{JAVES_NNAME}:** invite users to a chat, not to a Private Message")
else:
if not event.is_channel and event.is_group:
# https://lonamiwebs.github.io/Telethon/methods/messages/add_chat_user.html
for user_id in to_add_users.split(" "):
try:
await event.client(functions.messages.AddChatUserRequest(
chat_id=event.chat_id,
user_id=user_id,
fwd_limit=1000000
))
except Exception as e:
await event.reply(str(e))
await event.edit(f"**{JAVES_NNAME}:** Invited Requesr sent Successfully")
else:
# https://lonamiwebs.github.io/Telethon/methods/channels/invite_to_channel.html
for user_id in to_add_users.split(" "):
try:
await event.client(functions.channels.InviteToChannelRequest(
channel=event.chat_id,
users=[user_id]
))
except Exception as e:
await event.reply(str(e))
await event.edit(f"**{JAVES_NNAME}:** Invited Successfully")
@javes.on(rekcah05(pattern=f"invite(?: |$)(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
to_add_users = event.pattern_match.group(1)
if event.is_private:
await event.reply(f"**{JAVES_NNAME}:** invite users to a chat, not to a Private Message")
else:
if not event.is_channel and event.is_group:
# https://lonamiwebs.github.io/Telethon/methods/messages/add_chat_user.html
for user_id in to_add_users.split(" "):
try:
await event.client(functions.messages.AddChatUserRequest(
chat_id=event.chat_id,
user_id=user_id,
fwd_limit=1000000
))
except Exception as e:
await event.reply(str(e))
await event.reply(f"**{JAVES_NNAME}:** Invite request sent telethon Successfully")
else:
# https://lonamiwebs.github.io/Telethon/methods/channels/invite_to_channel.html
for user_id in to_add_users.split(" "):
try:
await event.client(functions.channels.InviteToChannelRequest(
channel=event.chat_id,
users=[user_id]
))
except Exception as e:
await event.reply(str(e))
await event.reply(f"**{JAVES_NNAME}:** Invite request sent telethon Successfully")
@javes05(outgoing=True, disable_errors=True, pattern="^!savefilter (.*)")
async def on_snip_save(event):
name = event.pattern_match.group(1)
msg = await event.get_reply_message()
if msg:
snip = {'type': TYPE_TEXT, 'text': msg.message or ''}
if msg.media:
media = None
if isinstance(msg.media, types.MessageMediaPhoto):
media = utils.get_input_photo(msg.media.photo)
snip['type'] = TYPE_PHOTO
elif isinstance(msg.media, types.MessageMediaDocument):
media = utils.get_input_document(msg.media.document)
snip['type'] = TYPE_DOCUMENT
if media:
snip['id'] = media.id
snip['hash'] = media.access_hash
snip['fr'] = media.file_reference
add_filter(event.chat_id, name, snip['text'], snip['type'], snip.get('id'), snip.get('hash'), snip.get('fr'))
await event.edit(f"`{JAVES_NNAME}`: filter {name} saved successfully. Get it with {name}")
else:
await event.edit(f"`{JAVES_NNAME}`: **Reply to a message with `!savefilter keyword` to save the filter**")
@javes.on(rekcah05(pattern=f"savefilter (.*)", allow_sudo=True))
async def on_snip_save(event):
name = event.pattern_match.group(1)
msg = await event.get_reply_message()
if msg:
snip = {'type': TYPE_TEXT, 'text': msg.message or ''}
if msg.media:
media = None
if isinstance(msg.media, types.MessageMediaPhoto):
media = utils.get_input_photo(msg.media.photo)
snip['type'] = TYPE_PHOTO
elif isinstance(msg.media, types.MessageMediaDocument):
media = utils.get_input_document(msg.media.document)
snip['type'] = TYPE_DOCUMENT
if media:
snip['id'] = media.id
snip['hash'] = media.access_hash
snip['fr'] = media.file_reference
add_filter(event.chat_id, name, snip['text'], snip['type'], snip.get('id'), snip.get('hash'), snip.get('fr'))
await event.reply(f"`{JAVES_NNAME}`: filter {name} saved successfully. Get it with {name}")
else:
await event.reply(f"`{JAVES_NNAME}`: **Reply to a message with `.savefilter keyword` to save the filter**")
@javes05(outgoing=True, disable_errors=True, pattern="^\!checkfilter$")
async def on_snip_list(event):
all_snips = get_all_rkfilters(event.chat_id)
OUT_STR = f"`{JAVES_NNAME}`: Available filters in the Current Chat:\n"
if len(all_snips) > 0:
for a_snip in all_snips:
OUT_STR += f"~> {a_snip.keyword} \n"
else:
OUT_STR = f"`{JAVES_NNAME}`: No filters. Start Saving using `!savefilter`"
if len(OUT_STR) > 4096:
with io.BytesIO(str.encode(OUT_STR)) as out_file:
out_file.name = "filters.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=f"`{JAVES_NNAME}`: **Available filters in the Current Chat**",
reply_to=event
)
await event.delete()
else:
await event.edit(OUT_STR)
@javes.on(rekcah05(pattern=f"checkfilter$", allow_sudo=True))
async def on_snip_list(event):
all_snips = get_all_rkfilters(event.chat_id)
OUT_STR = f"`{JAVES_NNAME}`: Available filters in the Current Chat:\n"
if len(all_snips) > 0:
for a_snip in all_snips:
OUT_STR += f"~> {a_snip.keyword} \n"
else:
OUT_STR = f"`{JAVES_NNAME}`: No filters. Start Saving using `.savefilter`"
if len(OUT_STR) > 4096:
with io.BytesIO(str.encode(OUT_STR)) as out_file:
out_file.name = "filters.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=f"`{JAVES_NNAME}`: **Available filters in the Current Chat**",
reply_to=event
)
await event.delete()
else:
await event.reply(OUT_STR)
@javes05(outgoing=True, disable_errors=True, pattern="^\!clearfilter (\w*)")
async def on_snip_delete(event):
name = event.pattern_match.group(1)
remove_filter(event.chat_id, name)
await event.edit(f"`{JAVES_NNAME}`: filter {name} deleted successfully")
@javes.on(rekcah05(pattern=f"clearfilter (.*)", allow_sudo=True))
async def on_snip_delete(event):
name = event.pattern_match.group(1)
remove_filter(event.chat_id, name)
await event.edit(f"`{JAVES_NNAME}`: filter {name} deleted successfully")
@javes05(outgoing=True, disable_errors=True, pattern="^\!clearallfilter$")
async def on_all_snip_delete(event):
remove_all_rkfilters(event.chat_id)
await event.edit(f"`{JAVES_NNAME}`: filters **in current chat** deleted successfully")
@javes.on(rekcah05(pattern=f"clearallfilter$", allow_sudo=True))
async def on_all_snip_delete(event):
remove_all_rkfilters(event.chat_id)
await event.reply(f"`{JAVES_NNAME}`: filters **in current chat** deleted successfully")
| 40.717109
| 206
| 0.629314
| 8,536
| 65,921
| 4.696579
| 0.065253
| 0.012123
| 0.018384
| 0.00898
| 0.902619
| 0.882539
| 0.858793
| 0.833325
| 0.815415
| 0.798503
| 0
| 0.005518
| 0.255002
| 65,921
| 1,618
| 207
| 40.742274
| 0.810796
| 0.025424
| 0
| 0.810912
| 0
| 0.007474
| 0.169598
| 0.034671
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.003737
| 0.076233
| 0
| 0.116592
| 0.004484
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b96a8a196b187dbc1a6b882152fb105a7a5fe697
| 4,476
|
py
|
Python
|
tests/test_connection.py
|
dated/python-client
|
ae966885b38cbdd25eadd27426c02cf9753cd42d
|
[
"MIT"
] | null | null | null |
tests/test_connection.py
|
dated/python-client
|
ae966885b38cbdd25eadd27426c02cf9753cd42d
|
[
"MIT"
] | null | null | null |
tests/test_connection.py
|
dated/python-client
|
ae966885b38cbdd25eadd27426c02cf9753cd42d
|
[
"MIT"
] | null | null | null |
import pytest
import requests
import responses
from client.connection import Connection
from client.exceptions import ArkHTTPException
def test_connection_creation_sets_default_session_headers_and_variables():
connection = Connection('http://127.0.0.1:4003')
assert connection.hostname == 'http://127.0.0.1:4003'
assert isinstance(connection.session, requests.Session)
assert connection.session.headers['Content-Type'] == 'application/json'
def test_connection_request_retry_successful():
responses.add(
responses.GET,
'http://127.0.0.1:4003/spongebob',
body=requests.exceptions.RequestException())
responses.add(
responses.GET,
'http://127.0.0.1:4003/spongebob',
body=requests.exceptions.RequestException())
responses.add(
responses.GET,
'http://127.0.0.1:4003/spongebob',
json={'success': True},
status=200
)
connection = Connection('http://127.0.0.1:4003')
data = connection.get('spongebob')
assert data == {'success': True}
assert len(responses.calls) == 3
assert responses.calls[0].request.url == 'http://127.0.0.1:4003/spongebob'
def test_connection_raises_for_request_retry_failure():
responses.add(
responses.GET,
'http://127.0.0.1:4003/spongebob',
body=requests.exceptions.RequestException())
connection = Connection('http://127.0.0.1:4003')
with pytest.raises(ArkHTTPException) as exception:
connection.get('spongebob')
assert len(responses.calls) == 3
def test_handle_response_raises_for_no_content_in_response():
responses.add(
responses.GET,
'http://127.0.0.1:4003/spongebob',
status=404
)
connection = Connection('http://127.0.0.1:4003')
response = requests.get('http://127.0.0.1:4003/spongebob')
with pytest.raises(ArkHTTPException) as exception:
connection._handle_response(response)
assert str(exception.value) == 'No content in response'
assert exception.value.response == response
def test_handle_response_raises_for_success_false_in_response():
responses.add(
responses.GET,
'http://127.0.0.1:4003/spongebob',
json={'success': False, 'error': 'Best error ever'},
status=404
)
connection = Connection('http://127.0.0.1:4003')
response = requests.get('http://127.0.0.1:4003/spongebob')
with pytest.raises(ArkHTTPException) as exception:
connection._handle_response(response)
assert str(exception.value) == 'GET 404 http://127.0.0.1:4003/spongebob - Best error ever'
assert exception.value.response == response
def test_handle_response_retuns_body_from_request():
responses.add(
responses.GET,
'http://127.0.0.1:4003/spongebob',
json={'success': True},
status=200
)
connection = Connection('http://127.0.0.1:4003')
response = requests.get('http://127.0.0.1:4003/spongebob')
body = connection._handle_response(response)
assert body == {'success': True}
@pytest.mark.parametrize('method,func_name', [
(responses.GET, 'get'),
(responses.POST, 'post'),
(responses.PUT, 'put'),
(responses.PATCH, 'patch'),
(responses.DELETE, 'delete'),
])
def test_http_methods_call_correct_url_and_return_correct_response(method, func_name):
responses.add(
method,
'http://127.0.0.1:4003/spongebob',
json={'success': True},
status=200
)
connection = Connection('http://127.0.0.1:4003')
data = getattr(connection, func_name)('spongebob')
assert data == {'success': True}
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://127.0.0.1:4003/spongebob'
@pytest.mark.parametrize('method,func_name', [
(responses.GET, 'get'),
(responses.POST, 'post'),
(responses.PUT, 'put'),
(responses.PATCH, 'patch'),
(responses.DELETE, 'delete'),
])
def test_http_methods_call_correct_url_with_params_and_return_correct_response(method, func_name):
responses.add(
method,
'http://127.0.0.1:4003/spongebob',
json={'success': True},
status=200
)
connection = Connection('http://127.0.0.1:4003')
data = getattr(connection, func_name)('spongebob', params={'foo': 'bar'})
assert data == {'success': True}
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://127.0.0.1:4003/spongebob?foo=bar'
| 31.083333
| 98
| 0.66555
| 561
| 4,476
| 5.181818
| 0.146168
| 0.0602
| 0.068799
| 0.077399
| 0.810114
| 0.788442
| 0.775026
| 0.739938
| 0.716546
| 0.657723
| 0
| 0.075956
| 0.182306
| 4,476
| 143
| 99
| 31.300699
| 0.718306
| 0
| 0
| 0.716814
| 0
| 0
| 0.216265
| 0
| 0
| 0
| 0
| 0
| 0.159292
| 1
| 0.070796
| false
| 0
| 0.044248
| 0
| 0.115044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b9d703baf96fe46be2da658a01a5dbfb70efb500
| 88,507
|
py
|
Python
|
python/openlattice/api/collections_api.py
|
openlattice/api-clients
|
1d5be9861785b295089b732f37464e31bf80c8ca
|
[
"Apache-2.0"
] | null | null | null |
python/openlattice/api/collections_api.py
|
openlattice/api-clients
|
1d5be9861785b295089b732f37464e31bf80c8ca
|
[
"Apache-2.0"
] | 1
|
2021-01-20T00:20:01.000Z
|
2021-01-20T00:20:01.000Z
|
python/openlattice/api/collections_api.py
|
openlattice/api-clients
|
1d5be9861785b295089b732f37464e31bf80c8ca
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenLattice API
OpenLattice API # noqa: E501
The version of the OpenAPI document: 0.0.1
Contact: support@openlattice.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openlattice.api_client import ApiClient
from openlattice.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class CollectionsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_type_to_entity_type_collection_template(self, entity_type_collection_id, collection_template_type, **kwargs): # noqa: E501
"""Appends type to template of the specified EntityTypeCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_type_to_entity_type_collection_template(entity_type_collection_id, collection_template_type, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param collection_template_type: (required)
:type collection_template_type: CollectionTemplateType
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.add_type_to_entity_type_collection_template_with_http_info(entity_type_collection_id, collection_template_type, **kwargs) # noqa: E501
def add_type_to_entity_type_collection_template_with_http_info(self, entity_type_collection_id, collection_template_type, **kwargs): # noqa: E501
"""Appends type to template of the specified EntityTypeCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_type_to_entity_type_collection_template_with_http_info(entity_type_collection_id, collection_template_type, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param collection_template_type: (required)
:type collection_template_type: CollectionTemplateType
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_type_collection_id',
'collection_template_type'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method add_type_to_entity_type_collection_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_type_collection_id' is set
if self.api_client.client_side_validation and ('entity_type_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_type_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_type_collection_id` when calling `add_type_to_entity_type_collection_template`") # noqa: E501
# verify the required parameter 'collection_template_type' is set
if self.api_client.client_side_validation and ('collection_template_type' not in local_var_params or # noqa: E501
local_var_params['collection_template_type'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `collection_template_type` when calling `add_type_to_entity_type_collection_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_type_collection_id' in local_var_params:
path_params['entityTypeCollectionId'] = local_var_params['entity_type_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'collection_template_type' in local_var_params:
body_params = local_var_params['collection_template_type']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/type/{entityTypeCollectionId}/template', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def create_entity_set_collection(self, entity_set_collection, **kwargs): # noqa: E501
"""Creates a new EntitySetCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_entity_set_collection(entity_set_collection, async_req=True)
>>> result = thread.get()
:param entity_set_collection: (required)
:type entity_set_collection: EntitySetCollection
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
return self.create_entity_set_collection_with_http_info(entity_set_collection, **kwargs) # noqa: E501
def create_entity_set_collection_with_http_info(self, entity_set_collection, **kwargs): # noqa: E501
"""Creates a new EntitySetCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_entity_set_collection_with_http_info(entity_set_collection, async_req=True)
>>> result = thread.get()
:param entity_set_collection: (required)
:type entity_set_collection: EntitySetCollection
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_set_collection'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_entity_set_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_collection' is set
if self.api_client.client_side_validation and ('entity_set_collection' not in local_var_params or # noqa: E501
local_var_params['entity_set_collection'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_collection` when calling `create_entity_set_collection`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'entity_set_collection' in local_var_params:
body_params = local_var_params['entity_set_collection']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/set', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def create_entity_type_collection(self, entity_type_collection, **kwargs): # noqa: E501
"""Creates a new EntityTypeCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_entity_type_collection(entity_type_collection, async_req=True)
>>> result = thread.get()
:param entity_type_collection: (required)
:type entity_type_collection: EntityTypeCollection
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
return self.create_entity_type_collection_with_http_info(entity_type_collection, **kwargs) # noqa: E501
def create_entity_type_collection_with_http_info(self, entity_type_collection, **kwargs): # noqa: E501
"""Creates a new EntityTypeCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_entity_type_collection_with_http_info(entity_type_collection, async_req=True)
>>> result = thread.get()
:param entity_type_collection: (required)
:type entity_type_collection: EntityTypeCollection
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_type_collection'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_entity_type_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_type_collection' is set
if self.api_client.client_side_validation and ('entity_type_collection' not in local_var_params or # noqa: E501
local_var_params['entity_type_collection'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_type_collection` when calling `create_entity_type_collection`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'entity_type_collection' in local_var_params:
body_params = local_var_params['entity_type_collection']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/type', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_entity_set_collection(self, entity_set_collection_id, **kwargs): # noqa: E501
"""Deletes the specified EntitySetCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_set_collection(entity_set_collection_id, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_entity_set_collection_with_http_info(entity_set_collection_id, **kwargs) # noqa: E501
def delete_entity_set_collection_with_http_info(self, entity_set_collection_id, **kwargs): # noqa: E501
"""Deletes the specified EntitySetCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_set_collection_with_http_info(entity_set_collection_id, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_set_collection_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_entity_set_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_collection_id' is set
if self.api_client.client_side_validation and ('entity_set_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_collection_id` when calling `delete_entity_set_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_collection_id' in local_var_params:
path_params['entitySetCollectionId'] = local_var_params['entity_set_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/set/{entitySetCollectionId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_entity_type_collection(self, entity_type_collection_id, **kwargs): # noqa: E501
"""Deletes the specified EntityTypeCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_type_collection(entity_type_collection_id, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_entity_type_collection_with_http_info(entity_type_collection_id, **kwargs) # noqa: E501
def delete_entity_type_collection_with_http_info(self, entity_type_collection_id, **kwargs): # noqa: E501
"""Deletes the specified EntityTypeCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_type_collection_with_http_info(entity_type_collection_id, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_type_collection_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_entity_type_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_type_collection_id' is set
if self.api_client.client_side_validation and ('entity_type_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_type_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_type_collection_id` when calling `delete_entity_type_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_type_collection_id' in local_var_params:
path_params['entityTypeCollectionId'] = local_var_params['entity_type_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/type/{entityTypeCollectionId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_all_entity_set_collections(self, **kwargs): # noqa: E501
"""Returns all EntitySetCollection objects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_entity_set_collections(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[EntitySetCollection]
"""
kwargs['_return_http_data_only'] = True
return self.get_all_entity_set_collections_with_http_info(**kwargs) # noqa: E501
def get_all_entity_set_collections_with_http_info(self, **kwargs): # noqa: E501
"""Returns all EntitySetCollection objects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_entity_set_collections_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[EntitySetCollection], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_entity_set_collections" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/set', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntitySetCollection]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_all_entity_type_collections(self, **kwargs): # noqa: E501
"""Returns all EntityTypeCollection objects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_entity_type_collections(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[EntityTypeCollection]
"""
kwargs['_return_http_data_only'] = True
return self.get_all_entity_type_collections_with_http_info(**kwargs) # noqa: E501
def get_all_entity_type_collections_with_http_info(self, **kwargs): # noqa: E501
"""Returns all EntityTypeCollection objects # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_entity_type_collections_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[EntityTypeCollection], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_entity_type_collections" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/type', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityTypeCollection]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_entity_set_collection(self, entity_set_collection_id, **kwargs): # noqa: E501
"""Returns the EntitySetCollection object for a given id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_set_collection(entity_set_collection_id, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: EntitySetCollection
"""
kwargs['_return_http_data_only'] = True
return self.get_entity_set_collection_with_http_info(entity_set_collection_id, **kwargs) # noqa: E501
def get_entity_set_collection_with_http_info(self, entity_set_collection_id, **kwargs): # noqa: E501
"""Returns the EntitySetCollection object for a given id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_set_collection_with_http_info(entity_set_collection_id, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(EntitySetCollection, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_set_collection_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_set_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_collection_id' is set
if self.api_client.client_side_validation and ('entity_set_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_collection_id` when calling `get_entity_set_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_collection_id' in local_var_params:
path_params['entitySetCollectionId'] = local_var_params['entity_set_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/set/{entitySetCollectionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntitySetCollection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_entity_set_collections_of_type(self, entity_set_collection_id, **kwargs): # noqa: E501
"""Returns all authorized EntitySetCollections for a given EntityTypeCollection id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_set_collections_of_type(entity_set_collection_id, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[EntitySetCollection]
"""
kwargs['_return_http_data_only'] = True
return self.get_entity_set_collections_of_type_with_http_info(entity_set_collection_id, **kwargs) # noqa: E501
def get_entity_set_collections_of_type_with_http_info(self, entity_set_collection_id, **kwargs): # noqa: E501
"""Returns all authorized EntitySetCollections for a given EntityTypeCollection id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_set_collections_of_type_with_http_info(entity_set_collection_id, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[EntitySetCollection], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_set_collection_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_set_collections_of_type" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_collection_id' is set
if self.api_client.client_side_validation and ('entity_set_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_collection_id` when calling `get_entity_set_collections_of_type`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_collection_id' in local_var_params:
path_params['entitySetCollectionId'] = local_var_params['entity_set_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/set/entity/type/{entitySetCollectionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntitySetCollection]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_entity_type_collection(self, entity_type_collection_id, **kwargs): # noqa: E501
"""Returns the EntityTypeCollection object for a given id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_type_collection(entity_type_collection_id, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: EntityTypeCollection
"""
kwargs['_return_http_data_only'] = True
return self.get_entity_type_collection_with_http_info(entity_type_collection_id, **kwargs) # noqa: E501
def get_entity_type_collection_with_http_info(self, entity_type_collection_id, **kwargs): # noqa: E501
"""Returns the EntityTypeCollection object for a given id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_type_collection_with_http_info(entity_type_collection_id, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(EntityTypeCollection, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_type_collection_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_type_collection" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_type_collection_id' is set
if self.api_client.client_side_validation and ('entity_type_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_type_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_type_collection_id` when calling `get_entity_type_collection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_type_collection_id' in local_var_params:
path_params['entityTypeCollectionId'] = local_var_params['entity_type_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/type/{entityTypeCollectionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityTypeCollection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def remove_type_from_entity_type_collection_template(self, entity_type_collection_id, type_id, **kwargs): # noqa: E501
"""Removes a key from an EntityTypeCollection template # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_type_from_entity_type_collection_template(entity_type_collection_id, type_id, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param type_id: (required)
:type type_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.remove_type_from_entity_type_collection_template_with_http_info(entity_type_collection_id, type_id, **kwargs) # noqa: E501
def remove_type_from_entity_type_collection_template_with_http_info(self, entity_type_collection_id, type_id, **kwargs): # noqa: E501
"""Removes a key from an EntityTypeCollection template # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_type_from_entity_type_collection_template_with_http_info(entity_type_collection_id, type_id, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param type_id: (required)
:type type_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_type_collection_id',
'type_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_type_from_entity_type_collection_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_type_collection_id' is set
if self.api_client.client_side_validation and ('entity_type_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_type_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_type_collection_id` when calling `remove_type_from_entity_type_collection_template`") # noqa: E501
# verify the required parameter 'type_id' is set
if self.api_client.client_side_validation and ('type_id' not in local_var_params or # noqa: E501
local_var_params['type_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `type_id` when calling `remove_type_from_entity_type_collection_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_type_collection_id' in local_var_params:
path_params['entityTypeCollectionId'] = local_var_params['entity_type_collection_id'] # noqa: E501
if 'type_id' in local_var_params:
path_params['typeId'] = local_var_params['type_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/type/{entityTypeCollectionId}/template/{typeId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_entity_set_collection_metadata(self, entity_set_collection_id, metadata_update, **kwargs): # noqa: E501
"""Updates metadata of the specified EntitySetCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_entity_set_collection_metadata(entity_set_collection_id, metadata_update, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param metadata_update: (required)
:type metadata_update: MetadataUpdate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.update_entity_set_collection_metadata_with_http_info(entity_set_collection_id, metadata_update, **kwargs) # noqa: E501
def update_entity_set_collection_metadata_with_http_info(self, entity_set_collection_id, metadata_update, **kwargs): # noqa: E501
"""Updates metadata of the specified EntitySetCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_entity_set_collection_metadata_with_http_info(entity_set_collection_id, metadata_update, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param metadata_update: (required)
:type metadata_update: MetadataUpdate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_set_collection_id',
'metadata_update'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_entity_set_collection_metadata" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_collection_id' is set
if self.api_client.client_side_validation and ('entity_set_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_collection_id` when calling `update_entity_set_collection_metadata`") # noqa: E501
# verify the required parameter 'metadata_update' is set
if self.api_client.client_side_validation and ('metadata_update' not in local_var_params or # noqa: E501
local_var_params['metadata_update'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `metadata_update` when calling `update_entity_set_collection_metadata`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_collection_id' in local_var_params:
path_params['entitySetCollectionId'] = local_var_params['entity_set_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'metadata_update' in local_var_params:
body_params = local_var_params['metadata_update']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/set/{entitySetCollectionId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_entity_set_collection_template(self, entity_set_collection_id, request_body, **kwargs): # noqa: E501
"""Updates template of the specified EntitySetCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_entity_set_collection_template(entity_set_collection_id, request_body, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param request_body: (required)
:type request_body: dict(str, str)
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.update_entity_set_collection_template_with_http_info(entity_set_collection_id, request_body, **kwargs) # noqa: E501
def update_entity_set_collection_template_with_http_info(self, entity_set_collection_id, request_body, **kwargs): # noqa: E501
"""Updates template of the specified EntitySetCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_entity_set_collection_template_with_http_info(entity_set_collection_id, request_body, async_req=True)
>>> result = thread.get()
:param entity_set_collection_id: (required)
:type entity_set_collection_id: str
:param request_body: (required)
:type request_body: dict(str, str)
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_set_collection_id',
'request_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_entity_set_collection_template" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_collection_id' is set
if self.api_client.client_side_validation and ('entity_set_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_collection_id` when calling `update_entity_set_collection_template`") # noqa: E501
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `update_entity_set_collection_template`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_collection_id' in local_var_params:
path_params['entitySetCollectionId'] = local_var_params['entity_set_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/set/{entitySetCollectionId}/template', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_entity_type_collection_metadata(self, entity_type_collection_id, metadata_update, **kwargs): # noqa: E501
"""Updates metadata of the specified EntityTypeCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_entity_type_collection_metadata(entity_type_collection_id, metadata_update, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param metadata_update: (required)
:type metadata_update: MetadataUpdate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.update_entity_type_collection_metadata_with_http_info(entity_type_collection_id, metadata_update, **kwargs) # noqa: E501
def update_entity_type_collection_metadata_with_http_info(self, entity_type_collection_id, metadata_update, **kwargs): # noqa: E501
"""Updates metadata of the specified EntityTypeCollection # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_entity_type_collection_metadata_with_http_info(entity_type_collection_id, metadata_update, async_req=True)
>>> result = thread.get()
:param entity_type_collection_id: (required)
:type entity_type_collection_id: str
:param metadata_update: (required)
:type metadata_update: MetadataUpdate
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_type_collection_id',
'metadata_update'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_entity_type_collection_metadata" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_type_collection_id' is set
if self.api_client.client_side_validation and ('entity_type_collection_id' not in local_var_params or # noqa: E501
local_var_params['entity_type_collection_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_type_collection_id` when calling `update_entity_type_collection_metadata`") # noqa: E501
# verify the required parameter 'metadata_update' is set
if self.api_client.client_side_validation and ('metadata_update' not in local_var_params or # noqa: E501
local_var_params['metadata_update'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `metadata_update` when calling `update_entity_type_collection_metadata`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_type_collection_id' in local_var_params:
path_params['entityTypeCollectionId'] = local_var_params['entity_type_collection_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'metadata_update' in local_var_params:
body_params = local_var_params['metadata_update']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/collections/entity/type/{entityTypeCollectionId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 47.228922
| 171
| 0.616358
| 9,753
| 88,507
| 5.276633
| 0.021224
| 0.03451
| 0.052776
| 0.02938
| 0.987408
| 0.985096
| 0.981132
| 0.977537
| 0.973612
| 0.968463
| 0
| 0.010305
| 0.317997
| 88,507
| 1,873
| 172
| 47.254138
| 0.842268
| 0.470098
| 0
| 0.756458
| 1
| 0
| 0.22519
| 0.121717
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03567
| false
| 0
| 0.00615
| 0
| 0.077491
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b9ef6566f098bff6f1a9cf0cff6b3aac79e798e1
| 3,134
|
py
|
Python
|
exercises/code/GSkernel_source/gs_kernel/unit_test.py
|
aldro61/microbiome-summer-school-2017
|
5f7fa384b66ea776db0d6e9c397f3d143254389b
|
[
"MIT"
] | 8
|
2017-07-26T17:53:24.000Z
|
2021-08-21T09:02:49.000Z
|
exercises/code/GSkernel_source/gs_kernel/unit_test.py
|
aldro61/microbiome-summer-school-2017
|
5f7fa384b66ea776db0d6e9c397f3d143254389b
|
[
"MIT"
] | 2
|
2017-06-20T02:48:08.000Z
|
2017-06-22T15:05:25.000Z
|
exercises/code/GSkernel_source/gs_kernel/unit_test.py
|
aldro61/microbiome-summer-school-2017
|
5f7fa384b66ea776db0d6e9c397f3d143254389b
|
[
"MIT"
] | 4
|
2018-02-26T18:24:37.000Z
|
2019-04-27T23:46:42.000Z
|
import numpy as np
from GSkernel_fast import GS_gram_matrix_fast
from GSkernel import GS_gram_matrix
def load_matrix(file_name):
f = open(file_name)
lines = f.readlines()
f.close()
M = []
for l in lines:
M.append([float(x) for x in l.split()])
return np.array(M)
def test():
amino_acid_property_file = 'amino_acids_matrix/AA.blosum50.dat'
sigma_position = 1.0
sigma_amino_acid = 1.0
substring_length = 3
f = open('examples/data/Zhou2010_bradykinin.dat')
bradykinin = [l.split()[0] for l in f.readlines()]
f.close()
f = open('examples/data/Zhou2010_cationic.dat')
cationic = [l.split()[0] for l in f.readlines()]
f.close()
print "Testing normalized symetric matrix"
K1 = GS_gram_matrix_fast(X=bradykinin,
Y=bradykinin,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
normalize_matrix=True)
K2 = GS_gram_matrix( X=bradykinin,
Y=bradykinin,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
normalize_matrix=True)
assert(np.allclose(K1,K2))
print "Testing un-normalized symetric matrix"
K1 = GS_gram_matrix_fast(X=bradykinin,
Y=bradykinin,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
normalize_matrix=False)
K2 = GS_gram_matrix( X=bradykinin,
Y=bradykinin,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
normalize_matrix=False)
assert(np.allclose(K1,K2))
print "Testing normalized non-symetric matrix"
K1 = GS_gram_matrix_fast(X=bradykinin,
Y=cationic,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
normalize_matrix=True)
K2 = GS_gram_matrix( X=bradykinin,
Y=cationic,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
normalize_matrix=True)
assert(np.allclose(K1,K2))
print "Testing un-normalized non-symetric matrix"
K1 = GS_gram_matrix_fast(X=bradykinin,
Y=cationic,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
normalize_matrix=False)
K2 = GS_gram_matrix( X=bradykinin,
Y=cationic,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
normalize_matrix=False)
assert(np.allclose(K1,K2))
| 28.490909
| 64
| 0.732929
| 422
| 3,134
| 5.063981
| 0.149289
| 0.143191
| 0.135236
| 0.167057
| 0.849789
| 0.814226
| 0.814226
| 0.80861
| 0.80861
| 0.80861
| 0
| 0.012946
| 0.186662
| 3,134
| 110
| 65
| 28.490909
| 0.825422
| 0
| 0
| 0.732558
| 0
| 0
| 0.081659
| 0.033812
| 0
| 0
| 0
| 0
| 0.046512
| 0
| null | null | 0
| 0.034884
| null | null | 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6a1a625dd7096419383a6dc0af1cb28722abf73b
| 70
|
py
|
Python
|
first-homework.py
|
neal03shah/astr-119
|
fae42b9c41c329b5e6cc6ba967597cef18742956
|
[
"MIT"
] | null | null | null |
first-homework.py
|
neal03shah/astr-119
|
fae42b9c41c329b5e6cc6ba967597cef18742956
|
[
"MIT"
] | 7
|
2021-09-23T23:17:57.000Z
|
2021-12-11T00:04:53.000Z
|
first-homework.py
|
neal03shah/astr-119
|
fae42b9c41c329b5e6cc6ba967597cef18742956
|
[
"MIT"
] | null | null | null |
print("Neal K Shah, He/Him/His") # print out Neal K Shah, He/Him/His
| 35
| 69
| 0.671429
| 15
| 70
| 3.133333
| 0.533333
| 0.212766
| 0.382979
| 0.468085
| 0.723404
| 0.723404
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 70
| 1
| 70
| 70
| 0.810345
| 0.471429
| 0
| 0
| 0
| 0
| 0.676471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
6a4ad8d1bf0e152719e04f731539e2151bf9a41a
| 16,233
|
py
|
Python
|
mask.py
|
Robbie1977/AlignmentPipe
|
f7979cbf67a40619fd36ae1873c460439d7ecd64
|
[
"MIT"
] | null | null | null |
mask.py
|
Robbie1977/AlignmentPipe
|
f7979cbf67a40619fd36ae1873c460439d7ecd64
|
[
"MIT"
] | 18
|
2015-03-03T15:55:37.000Z
|
2016-07-15T13:53:52.000Z
|
mask.py
|
Robbie1977/AlignmentPipe
|
f7979cbf67a40619fd36ae1873c460439d7ecd64
|
[
"MIT"
] | null | null | null |
import os, sys, nrrd, cmtk, gc, stat, shutil
import numpy as np
import warpScoring.slicescore as slicescore
import warpScoring.CheckImages as ci
from cmtk import cur, tempfolder, active, run_stage, cmtkdir, template, checkDir, host, templatedir
from NRRDtools.labelObjects import labelObj, cutObj, cropObj
if __name__ == "__main__":
if active and '0' in run_stage:
cur.execute("SELECT images_mask_original.id, images_mask_original.intensity_threshold, images_mask_original.min_object_size, images_original_nrrd.file FROM images_mask_original, images_original_nrrd WHERE images_original_nrrd.id = images_mask_original.image_id AND images_mask_original.complete = False ORDER BY images_mask_original.id")
records = cur.fetchall()
total = len(records)
count = 0
print records
for line in records:
count +=1
print 'Create original image mask: ' + str(count) + ' of ' + str(total)
outfile = str(line[3]).replace('.nrrd','-objMask.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
modfile = str(line[3]).replace('.nrrd','-modFile.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
if not os.path.isfile(tempfolder + modfile):
shutil.copyfile(tempfolder + str(line[3]), tempfolder + modfile)
objs = labelObj(tempfolder + str(line[3]), tempfolder + outfile, t=line[1], ms=line[2])
cur.execute("UPDATE images_mask_original SET complete=True, cut_complete=False, crop_complete=False, detected_objects=%s WHERE id = %s ", [objs.tolist(), str(line[0])])
cur.connection.commit()
gc.collect()
try:
os.chmod(tempfolder + outfile, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
except:
pass
print 'done'
else:
print 'inactive or stage 0 not selected'
if active and '0' in run_stage:
cur.execute("SELECT images_mask_original.id, images_mask_original.cut_objects, images_original_nrrd.file, images_mask_original.auto_restart_alignment, images_alignment.id, images_original_nrrd.id, images_mask_original.overwrite_original, images_alignment.name FROM images_mask_original, images_original_nrrd, images_alignment WHERE images_original_nrrd.id = images_mask_original.image_id AND images_original_nrrd.image_id = images_alignment.id AND images_mask_original.complete = True AND images_mask_original.cut_complete = False AND images_mask_original.cut_objects is not null AND images_mask_original.cut_objects != '' AND images_mask_original.cut_objects != '{}' ORDER BY images_mask_original.id")
records = cur.fetchall()
total = len(records)
count = 0
print records
for line in records:
count +=1
print 'Cut object(s) from original image: ' + str(count) + ' of ' + str(total)
maskfile = str(line[2]).replace('.nrrd','-objMask.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
modfile = str(line[2]).replace('.nrrd','-ModFile.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
if not os.path.isfile(tempfolder + modfile):
shutil.copyfile(tempfolder + str(line[2]),tempfolder + modfile)
if not line[6]:
oldName = str(line[7])
newName = str(line[7]) + "_ModByMask" + str(line[0])
shutil.copyfile(tempfolder + modfile, tempfolder + str(line[2]).replace(oldName, newName))
cutObj(tempfolder + modfile, tempfolder + maskfile, labels=str(line[1]))
cur.execute("UPDATE images_mask_original SET cut_complete=True WHERE id = %s ", [str(line[0])])
cur.connection.commit()
gc.collect()
newId = str(line[4])
oldId = str(line[4])
if line[6]:
print 'Updating with results...'
cur.execute("UPDATE images_original_nrrd SET file=%s WHERE id = %s ", [modfile, str(line[5])])
cur.connection.commit()
gc.collect()
else:
print 'Creating new alignment record with results...'
print "Old ID: " + str(oldId)
cur.execute("INSERT INTO images_alignment(name, settings_id, max_stage, last_host, alignment_stage, orig_orientation, loading_host, original_ext, original_path, crop_xyz, background_channel, signal_channel, ac1_channel, notes, reference, user_id) SELECT %s, settings_id, 2, last_host, alignment_stage, orig_orientation, loading_host, original_ext, original_path, crop_xyz, background_channel, signal_channel, ac1_channel, notes, reference, user_id FROM images_alignment WHERE id = %s", [newName, oldId])
cur.connection.commit()
gc.collect()
cur.execute("SELECT id FROM images_alignment WHERE name = %s", [newName])
results = cur.fetchall()
newId = results[0][0]
gc.collect()
print "New ID: " + str(newId)
cur.execute("INSERT INTO images_original_nrrd ( image_id, channel, new_min, new_max, file, is_index, pre_hist ) SELECT %s, channel, new_min, new_max, replace(file, %s, %s), is_index, pre_hist FROM images_original_nrrd WHERE image_id = %s", [newId, oldName, newName, oldId])
cur.connection.commit()
gc.collect()
cur.execute("SELECT file, id FROM images_original_nrrd WHERE image_id = %s", [newId])
results = cur.fetchall()
print 'Duplicating files...'
newOrig = line[5]
for fl in results:
shutil.copyfile(tempfolder + str(fl[0]).replace(newName, oldName),tempfolder + str(fl[0]))
if (str(line[2]) == str(fl[0]).replace(newName, oldName)):
newOrig = fl[1]
print 'file matched'
os.rename(tempfolder + modfile, tempfolder + str(modfile).replace(oldName, newName))
os.rename(tempfolder + maskfile, tempfolder + str(maskfile).replace(oldName, newName))
print 'Switching to new alignment via ' + str(newOrig)
cur.execute("UPDATE images_mask_original SET image_id=%s WHERE id = %s ", [newOrig, line[0]])
cur.connection.commit()
gc.collect()
if line[3]:
print 'Auto restarting alignment...'
cur.execute("UPDATE images_alignment SET alignment_stage=2002 WHERE id = %s ", [newId])
cur.connection.commit()
gc.collect()
try:
os.chmod((tempfolder + modfile), stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
except:
pass
print 'done'
else:
print 'inactive or stage 0 not selected'
if active and '0' in run_stage:
cur.execute("SELECT images_mask_original.id, images_mask_original.crop_objects, images_original_nrrd.file, images_mask_original.auto_restart_alignment, images_alignment.id, images_original_nrrd.id, images_mask_original.overwrite_original, images_alignment.name FROM images_mask_original, images_original_nrrd, images_alignment WHERE images_original_nrrd.id = images_mask_original.image_id AND images_original_nrrd.image_id = images_alignment.id AND images_mask_original.complete = True AND images_mask_original.crop_complete = False AND images_mask_original.crop_objects is not null AND images_mask_original.crop_objects != '' AND images_mask_original.crop_objects != '{}' ORDER BY images_mask_original.id")
records = cur.fetchall()
total = len(records)
count = 0
print records
for line in records:
count +=1
print 'Crop object(s) from original image: ' + str(count) + ' of ' + str(total)
maskfile = str(line[2]).replace('.nrrd','-objMask.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
modfile = str(line[2]).replace('.nrrd','-ModFile.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
if not os.path.isfile(tempfolder + modfile):
shutil.copyfile(tempfolder + str(line[2]),tempfolder + modfile)
if not line[6]:
oldName = str(line[7])
newName = str(line[7]) + "_ModByMask" + str(line[0])
shutil.copyfile(tempfolder + modfile, tempfolder + str(line[2]).replace(oldName, newName))
cropObj(tempfolder + modfile, tempfolder + maskfile, labels=str(line[1]))
cur.execute("UPDATE images_mask_original SET crop_complete=True WHERE id = %s ", [str(line[0])])
cur.connection.commit()
gc.collect()
newId = str(line[4])
oldId = str(line[4])
if line[6]:
print 'Updating with results...'
cur.execute("UPDATE images_original_nrrd SET file=%s WHERE id = %s ", [modfile, str(line[5])])
cur.connection.commit()
gc.collect()
else:
print 'Creating new alignment record with results...'
print "Old ID: " + str(oldId)
cur.execute("INSERT INTO images_alignment(name, settings_id, max_stage, last_host, alignment_stage, orig_orientation, loading_host, original_ext, original_path, crop_xyz, background_channel, signal_channel, ac1_channel, notes, reference, user_id) SELECT %s, settings_id, 2, last_host, alignment_stage, orig_orientation, loading_host, original_ext, original_path, crop_xyz, background_channel, signal_channel, ac1_channel, notes, reference, user_id FROM images_alignment WHERE id = %s", [newName, oldId])
cur.connection.commit()
gc.collect()
cur.execute("SELECT id FROM images_alignment WHERE name = %s", [newName])
results = cur.fetchall()
newId = results[0][0]
gc.collect()
print "New ID: " + str(newId)
cur.execute("INSERT INTO images_original_nrrd ( image_id, channel, new_min, new_max, file, is_index, pre_hist ) SELECT %s, channel, new_min, new_max, replace(file, %s, %s), is_index, pre_hist FROM images_original_nrrd WHERE image_id = %s", [newId, oldName, newName, oldId])
cur.connection.commit()
gc.collect()
cur.execute("SELECT file, id FROM images_original_nrrd WHERE image_id = %s", [newId])
results = cur.fetchall()
print 'Duplicating files...'
newOrig = line[5]
for fl in results:
shutil.copyfile(tempfolder + str(fl[0]).replace(newName, oldName),tempfolder + str(fl[0]))
if (str(line[2]) == str(fl[0]).replace(newName, oldName)):
newOrig = fl[1]
print 'file matched'
os.rename(tempfolder + modfile, tempfolder + str(modfile).replace(oldName, newName))
os.rename(tempfolder + maskfile, tempfolder + str(maskfile).replace(oldName, newName))
print 'Switching to new alignment via ' + str(newOrig)
cur.execute("UPDATE images_mask_original SET image_id=%s WHERE id = %s ", [newOrig, line[0]])
cur.connection.commit()
gc.collect()
if line[3]:
print 'Auto restarting alignment...'
cur.execute("UPDATE images_alignment SET alignment_stage=2002 WHERE id = %s ", [newId])
cur.connection.commit()
gc.collect()
try:
os.chmod((tempfolder + modfile), stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
except:
pass
print 'done'
else:
print 'inactive or stage 0 not selected'
if active and '7' in run_stage:
cur.execute("SELECT images_mask_aligned.id, images_mask_aligned.intensity_threshold, images_mask_aligned.min_object_size, images_mask_aligned.channel, images_alignment.aligned_bg, images_alignment.aligned_sg, images_alignment.aligned_ac1 FROM images_mask_aligned, images_alignment WHERE images_alignment.id = images_mask_aligned.image_id AND images_mask_aligned.complete = False ORDER BY images_mask_aligned.id")
records = cur.fetchall()
total = len(records)
count = 0
print records
for line in records:
count +=1
chan = 5
print 'Create aligned image mask: ' + str(count) + ' of ' + str(total)
if str(line[3]) == 'bg':
chan = 4
if str(line[3]) == 'ac1':
chan = 6
outfile = str(line[chan]).replace('.nrrd','-objMask.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
modfile = str(line[chan]).replace('.nrrd','-ModFile.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
if not os.path.isfile(tempfolder + modfile):
shutil.copyfile(tempfolder + str(line[chan]), tempfolder + modfile)
objs = labelObj(tempfolder + str(line[chan]), tempfolder + outfile, t=line[1], ms=line[2])
cur.execute("UPDATE images_mask_aligned SET complete=True, cut_complete=False, crop_complete=False, detected_objects=%s WHERE id = %s ", [objs.tolist(), str(line[0])])
cur.connection.commit()
gc.collect()
try:
os.chmod(tempfolder + outfile, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
except:
pass
print 'done'
else:
print 'inactive or stage 7 not selected'
if active and '7' in run_stage:
cur.execute("SELECT images_mask_aligned.id, images_mask_aligned.cut_objects, images_mask_aligned.channel, images_alignment.aligned_bg, images_alignment.aligned_sg, images_alignment.aligned_ac1, images_alignment.id FROM images_mask_aligned, images_alignment WHERE images_alignment.id = images_mask_aligned.image_id AND images_mask_aligned.complete = True AND images_mask_aligned.cut_complete = False AND images_mask_aligned.cut_objects is not null AND images_mask_aligned.cut_objects != '' AND images_mask_aligned.cut_objects != '{}' ORDER BY images_mask_aligned.id")
records = cur.fetchall()
total = len(records)
count = 0
print records
for line in records:
count +=1
chan = 4
print 'Cut object(s) from aligned image: ' + str(count) + ' of ' + str(total)
if str(line[2]) == 'bg':
chan = 3
if str(line[2]) == 'ac1':
chan = 5
maskfile = str(line[chan]).replace('.nrrd','-objMask.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
modfile = str(line[chan]).replace('.nrrd','-ModFile.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
if not os.path.isfile(tempfolder + modfile):
shutil.copyfile(tempfolder + str(line[chan]),tempfolder + modfile)
cutObj(tempfolder + modfile, tempfolder + maskfile, labels=str(line[1]))
print 'Updating with results...'
cur.execute("UPDATE images_alignment SET images_alignment.aligned_%s=%s WHERE id = %s ", [str(line[2]), modfile, str(line[6])])
cur.connection.commit()
gc.collect()
cur.execute("UPDATE images_mask_aligned SET cut_complete=True WHERE id = %s ", [str(line[0])])
cur.connection.commit()
gc.collect()
try:
os.chmod(tempfolder + str(line[chan]), stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
except:
pass
print 'done'
else:
print 'inactive or stage 7 not selected'
if active and '7' in run_stage:
cur.execute("SELECT images_mask_aligned.id, images_mask_aligned.crop_objects, images_mask_aligned.channel, images_alignment.aligned_bg, images_alignment.aligned_sg, images_alignment.aligned_ac1, images_alignment.id FROM images_mask_aligned, images_alignment WHERE images_alignment.id = images_mask_aligned.image_id AND images_mask_aligned.complete = True AND images_mask_aligned.crop_complete = False AND images_mask_aligned.crop_objects is not null AND images_mask_aligned.crop_objects != '' AND images_mask_aligned.crop_objects != '{}' ORDER BY images_mask_aligned.id")
records = cur.fetchall()
total = len(records)
count = 0
print records
for line in records:
count +=1
chan = 4
print 'Crop to object(s) in aligned image: ' + str(count) + ' of ' + str(total)
if str(line[2]) == 'bg':
chan = 3
if str(line[2]) == 'ac1':
chan = 5
maskfile = str(line[chan]).replace('.nrrd','-objMask.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
modfile = str(line[chan]).replace('.nrrd','-ModFile.nrrd').replace('.nrrd', str(line[0]) + '.nrrd')
if not os.path.isfile(tempfolder + modfile):
shutil.copyfile(tempfolder + str(line[chan]),tempfolder + modfile)
cropObj(tempfolder + modfile, tempfolder + maskfile, labels=str(line[1]))
print 'Updating with results...'
cur.execute("UPDATE images_alignment SET images_alignment.aligned_%s=%s WHERE id = %s ", [str(line[2]), modfile, str(line[6])])
cur.connection.commit()
gc.collect()
cur.execute("UPDATE images_mask_aligned SET crop_complete=True WHERE id = %s ", [str(line[0])])
cur.connection.commit()
gc.collect()
try:
os.chmod(tempfolder + str(line[chan]), stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
except:
pass
print 'done'
else:
print 'inactive or stage 7 not selected'
| 59.461538
| 711
| 0.678802
| 2,200
| 16,233
| 4.831818
| 0.074091
| 0.046096
| 0.06096
| 0.03556
| 0.95588
| 0.951552
| 0.921825
| 0.906209
| 0.888617
| 0.886171
| 0
| 0.010479
| 0.194604
| 16,233
| 272
| 712
| 59.680147
| 0.802585
| 0
| 0
| 0.871212
| 0
| 0.045455
| 0.43276
| 0.149633
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.022727
| 0.022727
| null | null | 0.159091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e017cf0494d677f67769d1778f982378213c9b9b
| 140
|
py
|
Python
|
dedomena/apis/twitter.py
|
abhijithneilabraham/dedomena
|
26422e0ad8c7e9fd1ec6fdfab49c8943d89fda50
|
[
"MIT"
] | 3
|
2018-08-26T12:32:01.000Z
|
2019-07-15T06:34:23.000Z
|
dedomena/apis/twitter.py
|
abhijithneilabraham/dedomena
|
26422e0ad8c7e9fd1ec6fdfab49c8943d89fda50
|
[
"MIT"
] | 10
|
2022-01-27T20:45:16.000Z
|
2022-01-30T14:40:59.000Z
|
dedomena/apis/twitter.py
|
abhijithneilabraham/dedomena
|
26422e0ad8c7e9fd1ec6fdfab49c8943d89fda50
|
[
"MIT"
] | 1
|
2022-01-27T18:41:02.000Z
|
2022-01-27T18:41:02.000Z
|
def twitter(search_string, n):
"""Search Twitter API for keywords"""
import twintel as tw
return tw.search(search_string, n)
| 17.5
| 41
| 0.685714
| 20
| 140
| 4.7
| 0.65
| 0.255319
| 0.276596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 140
| 7
| 42
| 20
| 0.854545
| 0.221429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e03d64184201acf92b0fa7d0b974ac58cd30ba62
| 87,514
|
py
|
Python
|
examples/railways/grid_railway/railway_5vsc.py
|
pydae/pydae
|
8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d
|
[
"MIT"
] | 1
|
2020-12-20T03:45:26.000Z
|
2020-12-20T03:45:26.000Z
|
examples/railways/grid_railway/railway_5vsc.py
|
pydae/pydae
|
8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d
|
[
"MIT"
] | null | null | null |
examples/railways/grid_railway/railway_5vsc.py
|
pydae/pydae
|
8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d
|
[
"MIT"
] | null | null | null |
import numpy as np
import numba
import scipy.optimize as sopt
import json
sin = np.sin
cos = np.cos
atan2 = np.arctan2
sqrt = np.sqrt
sign = np.sign
exp = np.exp
class railway_5vsc_class:
def __init__(self):
self.t_end = 10.000000
self.Dt = 0.0010000
self.decimation = 10.000000
self.itol = 1e-6
self.Dt_max = 0.001000
self.Dt_min = 0.001000
self.solvern = 5
self.imax = 100
self.N_x = 5
self.N_y = 69
self.N_z = 10
self.N_store = 10000
self.params_list = ['R_1112', 'R_1213', 'R_1314', 'R_1415', 'R_1521', 'R_2122', 'R_2223', 'R_2324', 'R_2425', 'R_2531', 'R_3132', 'R_3233', 'R_3334', 'R_3435', 'R_3541', 'R_4142', 'R_4243', 'R_4344', 'R_4445', 'R_4551', 'R_5152', 'R_5253', 'R_5354', 'R_5455', 'p_11', 'p_12', 'p_14', 'p_15', 'p_21', 'p_22', 'p_24', 'p_25', 'p_31', 'p_32', 'p_34', 'p_35', 'p_41', 'p_42', 'p_44', 'p_45', 'p_51', 'p_52', 'p_54', 'p_55']
self.params_values_list = [0.06306666666666667, 0.06306666666666667, 0.07961686626133334, 0.008762450101333334, 0.008762450101333334, 0.008762450101333334, 0.008762450101333334, 0.018346666666666667, 0.018346666666666667, 0.018346666666666667, 0.018346666666666667, 0.018346666666666667, 0.029813333333333334, 0.029813333333333334, 0.029813333333333334, 0.029813333333333334, 0.029813333333333334, 0.07803063134933337, 0.02922567549599999, 0.02922567549599999, 0.02922567549599999, 0.02922567549599999, 0.0344, 0.0344, 0.0, 0.0, -1932995.075, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1438308.138, 0.0, 0.0, 0.0, 0.0, 0.0]
self.inputs_ini_list = ['Dv_r_13', 'Dv_r_23', 'Dv_r_33', 'Dv_r_43', 'Dv_r_53', 'v_nom', 'T_v', 'K_r']
self.inputs_ini_values_list = [0.0, 0.0, 0.0, 0.0, 0.0, 3000.0, 0.02, 0.0003]
self.inputs_run_list = ['Dv_r_13', 'Dv_r_23', 'Dv_r_33', 'Dv_r_43', 'Dv_r_53', 'v_nom', 'T_v', 'K_r']
self.inputs_run_values_list = [0.0, 0.0, 0.0, 0.0, 0.0, 3000.0, 0.02, 0.0003]
self.outputs_list = ['p_13', 'v_13', 'p_23', 'v_23', 'p_33', 'v_33', 'p_43', 'v_43', 'p_53', 'v_53']
self.x_list = ['v_13', 'v_23', 'v_33', 'v_43', 'v_53']
self.y_run_list = ['i_l_1112', 'i_l_1213', 'i_l_1314', 'i_l_1415', 'i_l_2122', 'i_l_2223', 'i_l_2324', 'i_l_2425', 'i_l_3132', 'i_l_3233', 'i_l_3334', 'i_l_3435', 'i_l_4142', 'i_l_4243', 'i_l_4344', 'i_l_4445', 'i_l_5152', 'i_l_5253', 'i_l_5354', 'i_l_5455', 'i_l_1521', 'i_l_2531', 'i_l_3541', 'i_l_4551', 'v_11', 'v_12', 'i_13', 'v_14', 'v_15', 'v_21', 'v_22', 'i_23', 'v_24', 'v_25', 'v_31', 'v_32', 'i_33', 'v_34', 'v_35', 'v_41', 'v_42', 'i_43', 'v_44', 'v_45', 'v_51', 'v_52', 'i_53', 'v_54', 'v_55', 'i_11', 'i_12', 'i_14', 'i_15', 'i_21', 'i_22', 'i_24', 'i_25', 'i_31', 'i_32', 'i_34', 'i_35', 'i_41', 'i_42', 'i_44', 'i_45', 'i_51', 'i_52', 'i_54', 'i_55']
self.xy_list = self.x_list + self.y_run_list
self.y_ini_list = ['i_l_1112', 'i_l_1213', 'i_l_1314', 'i_l_1415', 'i_l_2122', 'i_l_2223', 'i_l_2324', 'i_l_2425', 'i_l_3132', 'i_l_3233', 'i_l_3334', 'i_l_3435', 'i_l_4142', 'i_l_4243', 'i_l_4344', 'i_l_4445', 'i_l_5152', 'i_l_5253', 'i_l_5354', 'i_l_5455', 'i_l_1521', 'i_l_2531', 'i_l_3541', 'i_l_4551', 'v_11', 'v_12', 'i_13', 'v_14', 'v_15', 'v_21', 'v_22', 'i_23', 'v_24', 'v_25', 'v_31', 'v_32', 'i_33', 'v_34', 'v_35', 'v_41', 'v_42', 'i_43', 'v_44', 'v_45', 'v_51', 'v_52', 'i_53', 'v_54', 'v_55', 'i_11', 'i_12', 'i_14', 'i_15', 'i_21', 'i_22', 'i_24', 'i_25', 'i_31', 'i_32', 'i_34', 'i_35', 'i_41', 'i_42', 'i_44', 'i_45', 'i_51', 'i_52', 'i_54', 'i_55']
self.xy_ini_list = self.x_list + self.y_ini_list
self.t = 0.0
self.it = 0
self.it_store = 0
self.xy_prev = np.zeros((self.N_x+self.N_y,1))
self.initialization_tol = 1e-6
self.N_u = len(self.inputs_run_list)
self.sopt_root_method='hybr'
self.sopt_root_jac=True
self.u_ini_list = self.inputs_ini_list
self.u_ini_values_list = self.inputs_ini_values_list
self.u_run_list = self.inputs_run_list
self.u_run_values_list = self.inputs_run_values_list
self.N_u = len(self.u_run_list)
Fx_ini_rows,Fx_ini_cols,Fy_ini_rows,Fy_ini_cols,Gx_ini_rows,Gx_ini_cols,Gy_ini_rows,Gy_ini_cols = nonzeros()
self.Fx_ini_rows = np.array(Fx_ini_rows)
if len(Fx_ini_rows) == 1:
self.Fx_ini_rows = np.array([[Fx_ini_rows]]).reshape(1,)
self.Fx_ini_cols = np.array([[Fx_ini_cols]]).reshape(1,)
self.Fx_ini_cols = np.array(Fx_ini_cols)
self.Fy_ini_rows = np.array(Fy_ini_rows)
self.Fy_ini_cols = np.array(Fy_ini_cols)
self.Gx_ini_rows = np.array(Gx_ini_rows)
self.Gx_ini_cols = np.array(Gx_ini_cols)
self.Gy_ini_rows = np.array(Gy_ini_rows)
self.Gy_ini_cols = np.array(Gy_ini_cols)
self.yini2urun = list(set(self.inputs_run_list).intersection(set(self.y_ini_list)))
self.uini2yrun = list(set(self.y_run_list).intersection(set(self.inputs_ini_list)))
self.update()
def update(self):
self.N_steps = int(np.ceil(self.t_end/self.Dt))
dt = [
('t_end', np.float64),
('Dt', np.float64),
('decimation', np.float64),
('itol', np.float64),
('Dt_max', np.float64),
('Dt_min', np.float64),
('solvern', np.int64),
('imax', np.int64),
('N_steps', np.int64),
('N_store', np.int64),
('N_x', np.int64),
('N_y', np.int64),
('N_z', np.int64),
('t', np.float64),
('it', np.int64),
('it_store', np.int64),
('idx', np.int64),
('idy', np.int64),
('f', np.float64, (self.N_x,1)),
('x', np.float64, (self.N_x,1)),
('x_0', np.float64, (self.N_x,1)),
('g', np.float64, (self.N_y,1)),
('y_run', np.float64, (self.N_y,1)),
('y_ini', np.float64, (self.N_y,1)),
('u_run', np.float64, (self.N_u,1)),
('y_0', np.float64, (self.N_y,1)),
('h', np.float64, (self.N_z,1)),
('Fx', np.float64, (self.N_x,self.N_x)),
('Fy', np.float64, (self.N_x,self.N_y)),
('Gx', np.float64, (self.N_y,self.N_x)),
('Gy', np.float64, (self.N_y,self.N_y)),
('Fu', np.float64, (self.N_x,self.N_u)),
('Gu', np.float64, (self.N_y,self.N_u)),
('Hx', np.float64, (self.N_z,self.N_x)),
('Hy', np.float64, (self.N_z,self.N_y)),
('Hu', np.float64, (self.N_z,self.N_u)),
('Fx_ini', np.float64, (self.N_x,self.N_x)),
('Fy_ini', np.float64, (self.N_x,self.N_y)),
('Gx_ini', np.float64, (self.N_y,self.N_x)),
('Gy_ini', np.float64, (self.N_y,self.N_y)),
('T', np.float64, (self.N_store+1,1)),
('X', np.float64, (self.N_store+1,self.N_x)),
('Y', np.float64, (self.N_store+1,self.N_y)),
('Z', np.float64, (self.N_store+1,self.N_z)),
('iters', np.float64, (self.N_store+1,1)),
('store', np.int64),
('Fx_ini_rows', np.int64, self.Fx_ini_rows.shape),
('Fx_ini_cols', np.int64, self.Fx_ini_cols.shape),
('Fy_ini_rows', np.int64, self.Fy_ini_rows.shape),
('Fy_ini_cols', np.int64, self.Fy_ini_cols.shape),
('Gx_ini_rows', np.int64, self.Gx_ini_rows.shape),
('Gx_ini_cols', np.int64, self.Gx_ini_cols.shape),
('Gy_ini_rows', np.int64, self.Gy_ini_rows.shape),
('Gy_ini_cols', np.int64, self.Gy_ini_cols.shape),
('Ac_ini', np.float64, ((self.N_x+self.N_y,self.N_x+self.N_y))),
('fg', np.float64, ((self.N_x+self.N_y,1))),
]
values = [
self.t_end,
self.Dt,
self.decimation,
self.itol,
self.Dt_max,
self.Dt_min,
self.solvern,
self.imax,
self.N_steps,
self.N_store,
self.N_x,
self.N_y,
self.N_z,
self.t,
self.it,
self.it_store,
0, # idx
0, # idy
np.zeros((self.N_x,1)), # f
np.zeros((self.N_x,1)), # x
np.zeros((self.N_x,1)), # x_0
np.zeros((self.N_y,1)), # g
np.zeros((self.N_y,1)), # y_run
np.zeros((self.N_y,1)), # y_ini
np.zeros((self.N_u,1)), # u_run
np.zeros((self.N_y,1)), # y_0
np.zeros((self.N_z,1)), # h
np.zeros((self.N_x,self.N_x)), # Fx
np.zeros((self.N_x,self.N_y)), # Fy
np.zeros((self.N_y,self.N_x)), # Gx
np.zeros((self.N_y,self.N_y)), # Fy
np.zeros((self.N_x,self.N_u)), # Fu
np.zeros((self.N_y,self.N_u)), # Gu
np.zeros((self.N_z,self.N_x)), # Hx
np.zeros((self.N_z,self.N_y)), # Hy
np.zeros((self.N_z,self.N_u)), # Hu
np.zeros((self.N_x,self.N_x)), # Fx_ini
np.zeros((self.N_x,self.N_y)), # Fy_ini
np.zeros((self.N_y,self.N_x)), # Gx_ini
np.zeros((self.N_y,self.N_y)), # Fy_ini
np.zeros((self.N_store+1,1)), # T
np.zeros((self.N_store+1,self.N_x)), # X
np.zeros((self.N_store+1,self.N_y)), # Y
np.zeros((self.N_store+1,self.N_z)), # Z
np.zeros((self.N_store+1,1)), # iters
1,
self.Fx_ini_rows,
self.Fx_ini_cols,
self.Fy_ini_rows,
self.Fy_ini_cols,
self.Gx_ini_rows,
self.Gx_ini_cols,
self.Gy_ini_rows,
self.Gy_ini_cols,
np.zeros((self.N_x+self.N_y,self.N_x+self.N_y)),
np.zeros((self.N_x+self.N_y,1)),
]
dt += [(item,np.float64) for item in self.params_list]
values += [item for item in self.params_values_list]
for item_id,item_val in zip(self.inputs_ini_list,self.inputs_ini_values_list):
if item_id in self.inputs_run_list: continue
dt += [(item_id,np.float64)]
values += [item_val]
dt += [(item,np.float64) for item in self.inputs_run_list]
values += [item for item in self.inputs_run_values_list]
self.struct = np.rec.array([tuple(values)], dtype=np.dtype(dt))
xy0 = np.zeros((self.N_x+self.N_y,))
self.ini_dae_jacobian_nn(xy0)
self.run_dae_jacobian_nn(xy0)
def load_params(self,data_input):
if type(data_input) == str:
json_file = data_input
self.json_file = json_file
self.json_data = open(json_file).read().replace("'",'"')
data = json.loads(self.json_data)
elif type(data_input) == dict:
data = data_input
self.data = data
for item in self.data:
self.struct[0][item] = self.data[item]
if item in self.params_list:
self.params_values_list[self.params_list.index(item)] = self.data[item]
elif item in self.inputs_ini_list:
self.inputs_ini_values_list[self.inputs_ini_list.index(item)] = self.data[item]
elif item in self.inputs_run_list:
self.inputs_run_values_list[self.inputs_run_list.index(item)] = self.data[item]
else:
print(f'parameter or input {item} not found')
def ini_problem(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
ini(self.struct,2)
ini(self.struct,3)
else:
ini.py_func(self.struct,2)
ini.py_func(self.struct,3)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_problem(self,x):
t = self.struct[0].t
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
run(t,self.struct,2)
run(t,self.struct,3)
run(t,self.struct,10)
run(t,self.struct,11)
run(t,self.struct,12)
run(t,self.struct,13)
else:
run.py_func(t,self.struct,2)
run.py_func(t,self.struct,3)
run.py_func(t,self.struct,10)
run.py_func(t,self.struct,11)
run.py_func(t,self.struct,12)
run.py_func(t,self.struct,13)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,13)
A_c = np.block([[self.struct[0].Fx,self.struct[0].Fy],
[self.struct[0].Gx,self.struct[0].Gy]])
return A_c
def run_dae_jacobian_nn(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run_nn(0.0,self.struct,10)
run_nn(0.0,self.struct,11)
run_nn(0.0,self.struct,12)
run_nn(0.0,self.struct,13)
def eval_jacobians(self):
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
return 1
def ini_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
ini(self.struct,10)
ini(self.struct,11)
else:
ini.py_func(self.struct,10)
ini.py_func(self.struct,11)
A_c = np.block([[self.struct[0].Fx_ini,self.struct[0].Fy_ini],
[self.struct[0].Gx_ini,self.struct[0].Gy_ini]])
return A_c
def ini_dae_jacobian_nn(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
ini_nn(self.struct,10)
ini_nn(self.struct,11)
def f_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_odeint(self,x,t):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_ivp(self,t,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def Fx_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,10)
return self.struct[0].Fx
def eval_A(self):
Fx = self.struct[0].Fx
Fy = self.struct[0].Fy
Gx = self.struct[0].Gx
Gy = self.struct[0].Gy
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
self.A = A
return A
def eval_A_ini(self):
Fx = self.struct[0].Fx_ini
Fy = self.struct[0].Fy_ini
Gx = self.struct[0].Gx_ini
Gy = self.struct[0].Gy_ini
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
return A
def reset(self):
for param,param_value in zip(self.params_list,self.params_values_list):
self.struct[0][param] = param_value
for input_name,input_value in zip(self.inputs_ini_list,self.inputs_ini_values_list):
self.struct[0][input_name] = input_value
for input_name,input_value in zip(self.inputs_run_list,self.inputs_run_values_list):
self.struct[0][input_name] = input_value
def simulate(self,events,xy0=0):
# initialize both the ini and the run system
self.initialize(events,xy0=xy0)
# simulation run
for event in events:
# make all the desired changes
self.run([event])
# post process
T,X,Y,Z = self.post()
return T,X,Y,Z
def run(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
daesolver(self.struct) # run until next event
return 1
def rtrun(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
self.struct[0].it_store = self.struct[0].N_store-1
daesolver(self.struct) # run until next event
return 1
def post(self):
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return T,X,Y,Z
def save_0(self,file_name = 'xy_0.json'):
xy_0_dict = {}
for item in self.x_list:
xy_0_dict.update({item:self.get_value(item)})
for item in self.y_ini_list:
xy_0_dict.update({item:self.get_value(item)})
xy_0_str = json.dumps(xy_0_dict, indent=4)
with open(file_name,'w') as fobj:
fobj.write(xy_0_str)
def load_0(self,file_name = 'xy_0.json'):
with open(file_name) as fobj:
xy_0_str = fobj.read()
xy_0_dict = json.loads(xy_0_str)
for item in xy_0_dict:
if item in self.x_list:
self.xy_prev[self.x_list.index(item)] = xy_0_dict[item]
if item in self.y_ini_list:
self.xy_prev[self.y_ini_list.index(item)+self.N_x] = xy_0_dict[item]
def initialize(self,events=[{}],xy0=0,compile=True):
'''
Parameters
----------
events : dictionary
Dictionary with at least 't_end' and all inputs and parameters
that need to be changed.
xy0 : float or string, optional
0 means all states should be zero as initial guess.
If not zero all the states initial guess are the given input.
If 'prev' it uses the last known initialization result as initial guess.
Returns
-------
T : TYPE
DESCRIPTION.
X : TYPE
DESCRIPTION.
Y : TYPE
DESCRIPTION.
Z : TYPE
DESCRIPTION.
'''
self.compile = compile
# simulation parameters
self.struct[0].it = 0 # set time step to zero
self.struct[0].it_store = 0 # set storage to zero
self.struct[0].t = 0.0 # set time to zero
# initialization
it_event = 0
event = events[it_event]
for item in event:
self.struct[0][item] = event[item]
## compute initial conditions using x and y_ini
if type(xy0) == str:
if xy0 == 'prev':
xy0 = self.xy_prev
else:
self.load_0(xy0)
xy0 = self.xy_prev
elif type(xy0) == dict:
with open('xy_0.json','w') as fobj:
fobj.write(json.dumps(xy0))
self.load_0('xy_0.json')
xy0 = self.xy_prev
else:
if xy0 == 0:
xy0 = np.zeros(self.N_x+self.N_y)
elif xy0 == 1:
xy0 = np.ones(self.N_x+self.N_y)
else:
xy0 = xy0*np.ones(self.N_x+self.N_y)
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.ini_problem, xy0,
jac=self.ini_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.ini_problem, xy0, method=self.sopt_root_method)
self.initialization_ok = True
if sol.success == False:
print('initialization not found!')
self.initialization_ok = False
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
if self.initialization_ok:
xy = sol.x
self.xy_prev = xy
self.struct[0].x[:,0] = xy[0:self.N_x]
self.struct[0].y_run[:,0] = xy[self.N_x:]
## y_ini to u_run
for item in self.inputs_run_list:
if item in self.y_ini_list:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.inputs_ini_list:
if item in self.y_run_list:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.run_problem, xy0,
jac=self.run_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.run_problem, xy0, method=self.sopt_root_method)
if self.compile:
# evaluate f and g
run(0.0,self.struct,2)
run(0.0,self.struct,3)
# evaluate run jacobians
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,14)
else:
# evaluate f and g
run.py_func(0.0,self.struct,2)
run.py_func(0.0,self.struct,3)
# evaluate run jacobians
run.py_func(0.0,self.struct,10)
run.py_func(0.0,self.struct,11)
run.py_func(0.0,self.struct,12)
run.py_func(0.0,self.struct,14)
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return self.initialization_ok
def get_value(self,name):
if name in self.inputs_run_list:
value = self.struct[0][name]
if name in self.x_list:
idx = self.x_list.index(name)
value = self.struct[0].x[idx,0]
if name in self.y_run_list:
idy = self.y_run_list.index(name)
value = self.struct[0].y_run[idy,0]
if name in self.params_list:
value = self.struct[0][name]
if name in self.outputs_list:
value = self.struct[0].h[self.outputs_list.index(name),0]
return value
def get_values(self,name):
if name in self.x_list:
values = self.X[:,self.x_list.index(name)]
if name in self.y_run_list:
values = self.Y[:,self.y_run_list.index(name)]
if name in self.outputs_list:
values = self.Z[:,self.outputs_list.index(name)]
return values
def get_mvalue(self,names):
'''
Parameters
----------
names : list
list of variables names to return each value.
Returns
-------
mvalue : TYPE
list of value of each variable.
'''
mvalue = []
for name in names:
mvalue += [self.get_value(name)]
return mvalue
def set_value(self,name_,value):
if name_ in self.inputs_run_list:
self.struct[0][name_] = value
return
elif name_ in self.params_list:
self.struct[0][name_] = value
return
elif name_ in self.inputs_ini_list:
self.struct[0][name_] = value
return
else:
print(f'Input or parameter {name_} not found.')
def set_values(self,dictionary):
for item in dictionary:
self.set_value(item,dictionary[item])
def report_x(self,value_format='5.2f', decimals=2):
for item in self.x_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_y(self,value_format='5.2f', decimals=2):
for item in self.y_run_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_u(self,value_format='5.2f', decimals=2):
for item in self.inputs_run_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_z(self,value_format='5.2f', decimals=2):
for item in self.outputs_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_params(self,value_format='5.2f', decimals=2):
for item in self.params_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def get_x(self):
return self.struct[0].x
def ss(self):
ssate(self.struct,self.xy_prev.reshape(len(self.xy_prev),1))
## y_ini to y_run
self.struct[0].y_run = self.struct[0].y_ini
## y_ini to u_run
for item in self.yini2urun:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.uini2yrun:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
@numba.njit(cache=True)
def ini(struct,mode):
# Parameters:
R_1112 = struct[0].R_1112
R_1213 = struct[0].R_1213
R_1314 = struct[0].R_1314
R_1415 = struct[0].R_1415
R_1521 = struct[0].R_1521
R_2122 = struct[0].R_2122
R_2223 = struct[0].R_2223
R_2324 = struct[0].R_2324
R_2425 = struct[0].R_2425
R_2531 = struct[0].R_2531
R_3132 = struct[0].R_3132
R_3233 = struct[0].R_3233
R_3334 = struct[0].R_3334
R_3435 = struct[0].R_3435
R_3541 = struct[0].R_3541
R_4142 = struct[0].R_4142
R_4243 = struct[0].R_4243
R_4344 = struct[0].R_4344
R_4445 = struct[0].R_4445
R_4551 = struct[0].R_4551
R_5152 = struct[0].R_5152
R_5253 = struct[0].R_5253
R_5354 = struct[0].R_5354
R_5455 = struct[0].R_5455
p_11 = struct[0].p_11
p_12 = struct[0].p_12
p_14 = struct[0].p_14
p_15 = struct[0].p_15
p_21 = struct[0].p_21
p_22 = struct[0].p_22
p_24 = struct[0].p_24
p_25 = struct[0].p_25
p_31 = struct[0].p_31
p_32 = struct[0].p_32
p_34 = struct[0].p_34
p_35 = struct[0].p_35
p_41 = struct[0].p_41
p_42 = struct[0].p_42
p_44 = struct[0].p_44
p_45 = struct[0].p_45
p_51 = struct[0].p_51
p_52 = struct[0].p_52
p_54 = struct[0].p_54
p_55 = struct[0].p_55
# Inputs:
Dv_r_13 = struct[0].Dv_r_13
Dv_r_23 = struct[0].Dv_r_23
Dv_r_33 = struct[0].Dv_r_33
Dv_r_43 = struct[0].Dv_r_43
Dv_r_53 = struct[0].Dv_r_53
v_nom = struct[0].v_nom
T_v = struct[0].T_v
K_r = struct[0].K_r
# Dynamical states:
v_13 = struct[0].x[0,0]
v_23 = struct[0].x[1,0]
v_33 = struct[0].x[2,0]
v_43 = struct[0].x[3,0]
v_53 = struct[0].x[4,0]
# Algebraic states:
i_l_1112 = struct[0].y_ini[0,0]
i_l_1213 = struct[0].y_ini[1,0]
i_l_1314 = struct[0].y_ini[2,0]
i_l_1415 = struct[0].y_ini[3,0]
i_l_2122 = struct[0].y_ini[4,0]
i_l_2223 = struct[0].y_ini[5,0]
i_l_2324 = struct[0].y_ini[6,0]
i_l_2425 = struct[0].y_ini[7,0]
i_l_3132 = struct[0].y_ini[8,0]
i_l_3233 = struct[0].y_ini[9,0]
i_l_3334 = struct[0].y_ini[10,0]
i_l_3435 = struct[0].y_ini[11,0]
i_l_4142 = struct[0].y_ini[12,0]
i_l_4243 = struct[0].y_ini[13,0]
i_l_4344 = struct[0].y_ini[14,0]
i_l_4445 = struct[0].y_ini[15,0]
i_l_5152 = struct[0].y_ini[16,0]
i_l_5253 = struct[0].y_ini[17,0]
i_l_5354 = struct[0].y_ini[18,0]
i_l_5455 = struct[0].y_ini[19,0]
i_l_1521 = struct[0].y_ini[20,0]
i_l_2531 = struct[0].y_ini[21,0]
i_l_3541 = struct[0].y_ini[22,0]
i_l_4551 = struct[0].y_ini[23,0]
v_11 = struct[0].y_ini[24,0]
v_12 = struct[0].y_ini[25,0]
i_13 = struct[0].y_ini[26,0]
v_14 = struct[0].y_ini[27,0]
v_15 = struct[0].y_ini[28,0]
v_21 = struct[0].y_ini[29,0]
v_22 = struct[0].y_ini[30,0]
i_23 = struct[0].y_ini[31,0]
v_24 = struct[0].y_ini[32,0]
v_25 = struct[0].y_ini[33,0]
v_31 = struct[0].y_ini[34,0]
v_32 = struct[0].y_ini[35,0]
i_33 = struct[0].y_ini[36,0]
v_34 = struct[0].y_ini[37,0]
v_35 = struct[0].y_ini[38,0]
v_41 = struct[0].y_ini[39,0]
v_42 = struct[0].y_ini[40,0]
i_43 = struct[0].y_ini[41,0]
v_44 = struct[0].y_ini[42,0]
v_45 = struct[0].y_ini[43,0]
v_51 = struct[0].y_ini[44,0]
v_52 = struct[0].y_ini[45,0]
i_53 = struct[0].y_ini[46,0]
v_54 = struct[0].y_ini[47,0]
v_55 = struct[0].y_ini[48,0]
i_11 = struct[0].y_ini[49,0]
i_12 = struct[0].y_ini[50,0]
i_14 = struct[0].y_ini[51,0]
i_15 = struct[0].y_ini[52,0]
i_21 = struct[0].y_ini[53,0]
i_22 = struct[0].y_ini[54,0]
i_24 = struct[0].y_ini[55,0]
i_25 = struct[0].y_ini[56,0]
i_31 = struct[0].y_ini[57,0]
i_32 = struct[0].y_ini[58,0]
i_34 = struct[0].y_ini[59,0]
i_35 = struct[0].y_ini[60,0]
i_41 = struct[0].y_ini[61,0]
i_42 = struct[0].y_ini[62,0]
i_44 = struct[0].y_ini[63,0]
i_45 = struct[0].y_ini[64,0]
i_51 = struct[0].y_ini[65,0]
i_52 = struct[0].y_ini[66,0]
i_54 = struct[0].y_ini[67,0]
i_55 = struct[0].y_ini[68,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (-Dv_r_13 - K_r*i_13*v_13 - v_13 + v_nom)/T_v
struct[0].f[1,0] = (-Dv_r_23 - K_r*i_23*v_23 - v_23 + v_nom)/T_v
struct[0].f[2,0] = (-Dv_r_33 - K_r*i_33*v_33 - v_33 + v_nom)/T_v
struct[0].f[3,0] = (-Dv_r_43 - K_r*i_43*v_43 - v_43 + v_nom)/T_v
struct[0].f[4,0] = (-Dv_r_53 - K_r*i_53*v_53 - v_53 + v_nom)/T_v
# Algebraic equations:
if mode == 3:
struct[0].g[:,:] = np.ascontiguousarray(struct[0].Gy_ini) @ np.ascontiguousarray(struct[0].y_ini)
struct[0].g[1,0] = -R_1213*i_l_1213 + v_12 - v_13
struct[0].g[2,0] = -R_1314*i_l_1314 + v_13 - v_14
struct[0].g[5,0] = -R_2223*i_l_2223 + v_22 - v_23
struct[0].g[6,0] = -R_2324*i_l_2324 + v_23 - v_24
struct[0].g[9,0] = -R_3233*i_l_3233 + v_32 - v_33
struct[0].g[10,0] = -R_3334*i_l_3334 + v_33 - v_34
struct[0].g[13,0] = -R_4243*i_l_4243 + v_42 - v_43
struct[0].g[14,0] = -R_4344*i_l_4344 + v_43 - v_44
struct[0].g[17,0] = -R_5253*i_l_5253 + v_52 - v_53
struct[0].g[18,0] = -R_5354*i_l_5354 + v_53 - v_54
struct[0].g[49,0] = i_11*v_11 - p_11
struct[0].g[50,0] = i_12*v_12 - p_12
struct[0].g[51,0] = i_14*v_14 - p_14
struct[0].g[52,0] = i_15*v_15 - p_15
struct[0].g[53,0] = i_21*v_21 - p_21
struct[0].g[54,0] = i_22*v_22 - p_22
struct[0].g[55,0] = i_24*v_24 - p_24
struct[0].g[56,0] = i_25*v_25 - p_25
struct[0].g[57,0] = i_31*v_31 - p_31
struct[0].g[58,0] = i_32*v_32 - p_32
struct[0].g[59,0] = i_34*v_34 - p_34
struct[0].g[60,0] = i_35*v_35 - p_35
struct[0].g[61,0] = i_41*v_41 - p_41
struct[0].g[62,0] = i_42*v_42 - p_42
struct[0].g[63,0] = i_44*v_44 - p_44
struct[0].g[64,0] = i_45*v_45 - p_45
struct[0].g[65,0] = i_51*v_51 - p_51
struct[0].g[66,0] = i_52*v_52 - p_52
struct[0].g[67,0] = i_54*v_54 - p_54
struct[0].g[68,0] = i_55*v_55 - p_55
# Outputs:
if mode == 3:
struct[0].h[0,0] = i_13*v_13
struct[0].h[1,0] = v_13
struct[0].h[2,0] = i_23*v_23
struct[0].h[3,0] = v_23
struct[0].h[4,0] = i_33*v_33
struct[0].h[5,0] = v_33
struct[0].h[6,0] = i_43*v_43
struct[0].h[7,0] = v_43
struct[0].h[8,0] = i_53*v_53
struct[0].h[9,0] = v_53
if mode == 10:
struct[0].Fx_ini[0,0] = (-K_r*i_13 - 1)/T_v
struct[0].Fx_ini[1,1] = (-K_r*i_23 - 1)/T_v
struct[0].Fx_ini[2,2] = (-K_r*i_33 - 1)/T_v
struct[0].Fx_ini[3,3] = (-K_r*i_43 - 1)/T_v
struct[0].Fx_ini[4,4] = (-K_r*i_53 - 1)/T_v
if mode == 11:
struct[0].Fy_ini[0,26] = -K_r*v_13/T_v
struct[0].Fy_ini[1,31] = -K_r*v_23/T_v
struct[0].Fy_ini[2,36] = -K_r*v_33/T_v
struct[0].Fy_ini[3,41] = -K_r*v_43/T_v
struct[0].Fy_ini[4,46] = -K_r*v_53/T_v
struct[0].Gx_ini[1,0] = -1
struct[0].Gx_ini[2,0] = 1
struct[0].Gx_ini[5,1] = -1
struct[0].Gx_ini[6,1] = 1
struct[0].Gx_ini[9,2] = -1
struct[0].Gx_ini[10,2] = 1
struct[0].Gx_ini[13,3] = -1
struct[0].Gx_ini[14,3] = 1
struct[0].Gx_ini[17,4] = -1
struct[0].Gx_ini[18,4] = 1
struct[0].Gy_ini[0,0] = -R_1112
struct[0].Gy_ini[1,1] = -R_1213
struct[0].Gy_ini[2,2] = -R_1314
struct[0].Gy_ini[3,3] = -R_1415
struct[0].Gy_ini[4,4] = -R_2122
struct[0].Gy_ini[5,5] = -R_2223
struct[0].Gy_ini[6,6] = -R_2324
struct[0].Gy_ini[7,7] = -R_2425
struct[0].Gy_ini[8,8] = -R_3132
struct[0].Gy_ini[9,9] = -R_3233
struct[0].Gy_ini[10,10] = -R_3334
struct[0].Gy_ini[11,11] = -R_3435
struct[0].Gy_ini[12,12] = -R_4142
struct[0].Gy_ini[13,13] = -R_4243
struct[0].Gy_ini[14,14] = -R_4344
struct[0].Gy_ini[15,15] = -R_4445
struct[0].Gy_ini[16,16] = -R_5152
struct[0].Gy_ini[17,17] = -R_5253
struct[0].Gy_ini[18,18] = -R_5354
struct[0].Gy_ini[19,19] = -R_5455
struct[0].Gy_ini[20,20] = -R_1521
struct[0].Gy_ini[21,21] = -R_2531
struct[0].Gy_ini[22,22] = -R_3541
struct[0].Gy_ini[23,23] = -R_4551
struct[0].Gy_ini[49,24] = i_11
struct[0].Gy_ini[49,49] = v_11
struct[0].Gy_ini[50,25] = i_12
struct[0].Gy_ini[50,50] = v_12
struct[0].Gy_ini[51,27] = i_14
struct[0].Gy_ini[51,51] = v_14
struct[0].Gy_ini[52,28] = i_15
struct[0].Gy_ini[52,52] = v_15
struct[0].Gy_ini[53,29] = i_21
struct[0].Gy_ini[53,53] = v_21
struct[0].Gy_ini[54,30] = i_22
struct[0].Gy_ini[54,54] = v_22
struct[0].Gy_ini[55,32] = i_24
struct[0].Gy_ini[55,55] = v_24
struct[0].Gy_ini[56,33] = i_25
struct[0].Gy_ini[56,56] = v_25
struct[0].Gy_ini[57,34] = i_31
struct[0].Gy_ini[57,57] = v_31
struct[0].Gy_ini[58,35] = i_32
struct[0].Gy_ini[58,58] = v_32
struct[0].Gy_ini[59,37] = i_34
struct[0].Gy_ini[59,59] = v_34
struct[0].Gy_ini[60,38] = i_35
struct[0].Gy_ini[60,60] = v_35
struct[0].Gy_ini[61,39] = i_41
struct[0].Gy_ini[61,61] = v_41
struct[0].Gy_ini[62,40] = i_42
struct[0].Gy_ini[62,62] = v_42
struct[0].Gy_ini[63,42] = i_44
struct[0].Gy_ini[63,63] = v_44
struct[0].Gy_ini[64,43] = i_45
struct[0].Gy_ini[64,64] = v_45
struct[0].Gy_ini[65,44] = i_51
struct[0].Gy_ini[65,65] = v_51
struct[0].Gy_ini[66,45] = i_52
struct[0].Gy_ini[66,66] = v_52
struct[0].Gy_ini[67,47] = i_54
struct[0].Gy_ini[67,67] = v_54
struct[0].Gy_ini[68,48] = i_55
struct[0].Gy_ini[68,68] = v_55
@numba.njit(cache=True)
def run(t,struct,mode):
# Parameters:
R_1112 = struct[0].R_1112
R_1213 = struct[0].R_1213
R_1314 = struct[0].R_1314
R_1415 = struct[0].R_1415
R_1521 = struct[0].R_1521
R_2122 = struct[0].R_2122
R_2223 = struct[0].R_2223
R_2324 = struct[0].R_2324
R_2425 = struct[0].R_2425
R_2531 = struct[0].R_2531
R_3132 = struct[0].R_3132
R_3233 = struct[0].R_3233
R_3334 = struct[0].R_3334
R_3435 = struct[0].R_3435
R_3541 = struct[0].R_3541
R_4142 = struct[0].R_4142
R_4243 = struct[0].R_4243
R_4344 = struct[0].R_4344
R_4445 = struct[0].R_4445
R_4551 = struct[0].R_4551
R_5152 = struct[0].R_5152
R_5253 = struct[0].R_5253
R_5354 = struct[0].R_5354
R_5455 = struct[0].R_5455
p_11 = struct[0].p_11
p_12 = struct[0].p_12
p_14 = struct[0].p_14
p_15 = struct[0].p_15
p_21 = struct[0].p_21
p_22 = struct[0].p_22
p_24 = struct[0].p_24
p_25 = struct[0].p_25
p_31 = struct[0].p_31
p_32 = struct[0].p_32
p_34 = struct[0].p_34
p_35 = struct[0].p_35
p_41 = struct[0].p_41
p_42 = struct[0].p_42
p_44 = struct[0].p_44
p_45 = struct[0].p_45
p_51 = struct[0].p_51
p_52 = struct[0].p_52
p_54 = struct[0].p_54
p_55 = struct[0].p_55
# Inputs:
Dv_r_13 = struct[0].Dv_r_13
Dv_r_23 = struct[0].Dv_r_23
Dv_r_33 = struct[0].Dv_r_33
Dv_r_43 = struct[0].Dv_r_43
Dv_r_53 = struct[0].Dv_r_53
v_nom = struct[0].v_nom
T_v = struct[0].T_v
K_r = struct[0].K_r
# Dynamical states:
v_13 = struct[0].x[0,0]
v_23 = struct[0].x[1,0]
v_33 = struct[0].x[2,0]
v_43 = struct[0].x[3,0]
v_53 = struct[0].x[4,0]
# Algebraic states:
i_l_1112 = struct[0].y_run[0,0]
i_l_1213 = struct[0].y_run[1,0]
i_l_1314 = struct[0].y_run[2,0]
i_l_1415 = struct[0].y_run[3,0]
i_l_2122 = struct[0].y_run[4,0]
i_l_2223 = struct[0].y_run[5,0]
i_l_2324 = struct[0].y_run[6,0]
i_l_2425 = struct[0].y_run[7,0]
i_l_3132 = struct[0].y_run[8,0]
i_l_3233 = struct[0].y_run[9,0]
i_l_3334 = struct[0].y_run[10,0]
i_l_3435 = struct[0].y_run[11,0]
i_l_4142 = struct[0].y_run[12,0]
i_l_4243 = struct[0].y_run[13,0]
i_l_4344 = struct[0].y_run[14,0]
i_l_4445 = struct[0].y_run[15,0]
i_l_5152 = struct[0].y_run[16,0]
i_l_5253 = struct[0].y_run[17,0]
i_l_5354 = struct[0].y_run[18,0]
i_l_5455 = struct[0].y_run[19,0]
i_l_1521 = struct[0].y_run[20,0]
i_l_2531 = struct[0].y_run[21,0]
i_l_3541 = struct[0].y_run[22,0]
i_l_4551 = struct[0].y_run[23,0]
v_11 = struct[0].y_run[24,0]
v_12 = struct[0].y_run[25,0]
i_13 = struct[0].y_run[26,0]
v_14 = struct[0].y_run[27,0]
v_15 = struct[0].y_run[28,0]
v_21 = struct[0].y_run[29,0]
v_22 = struct[0].y_run[30,0]
i_23 = struct[0].y_run[31,0]
v_24 = struct[0].y_run[32,0]
v_25 = struct[0].y_run[33,0]
v_31 = struct[0].y_run[34,0]
v_32 = struct[0].y_run[35,0]
i_33 = struct[0].y_run[36,0]
v_34 = struct[0].y_run[37,0]
v_35 = struct[0].y_run[38,0]
v_41 = struct[0].y_run[39,0]
v_42 = struct[0].y_run[40,0]
i_43 = struct[0].y_run[41,0]
v_44 = struct[0].y_run[42,0]
v_45 = struct[0].y_run[43,0]
v_51 = struct[0].y_run[44,0]
v_52 = struct[0].y_run[45,0]
i_53 = struct[0].y_run[46,0]
v_54 = struct[0].y_run[47,0]
v_55 = struct[0].y_run[48,0]
i_11 = struct[0].y_run[49,0]
i_12 = struct[0].y_run[50,0]
i_14 = struct[0].y_run[51,0]
i_15 = struct[0].y_run[52,0]
i_21 = struct[0].y_run[53,0]
i_22 = struct[0].y_run[54,0]
i_24 = struct[0].y_run[55,0]
i_25 = struct[0].y_run[56,0]
i_31 = struct[0].y_run[57,0]
i_32 = struct[0].y_run[58,0]
i_34 = struct[0].y_run[59,0]
i_35 = struct[0].y_run[60,0]
i_41 = struct[0].y_run[61,0]
i_42 = struct[0].y_run[62,0]
i_44 = struct[0].y_run[63,0]
i_45 = struct[0].y_run[64,0]
i_51 = struct[0].y_run[65,0]
i_52 = struct[0].y_run[66,0]
i_54 = struct[0].y_run[67,0]
i_55 = struct[0].y_run[68,0]
struct[0].u_run[0,0] = Dv_r_13
struct[0].u_run[1,0] = Dv_r_23
struct[0].u_run[2,0] = Dv_r_33
struct[0].u_run[3,0] = Dv_r_43
struct[0].u_run[4,0] = Dv_r_53
struct[0].u_run[5,0] = v_nom
struct[0].u_run[6,0] = T_v
struct[0].u_run[7,0] = K_r
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (-Dv_r_13 - K_r*i_13*v_13 - v_13 + v_nom)/T_v
struct[0].f[1,0] = (-Dv_r_23 - K_r*i_23*v_23 - v_23 + v_nom)/T_v
struct[0].f[2,0] = (-Dv_r_33 - K_r*i_33*v_33 - v_33 + v_nom)/T_v
struct[0].f[3,0] = (-Dv_r_43 - K_r*i_43*v_43 - v_43 + v_nom)/T_v
struct[0].f[4,0] = (-Dv_r_53 - K_r*i_53*v_53 - v_53 + v_nom)/T_v
# Algebraic equations:
if mode == 3:
struct[0].g[:,:] = np.ascontiguousarray(struct[0].Gy) @ np.ascontiguousarray(struct[0].y_run) + np.ascontiguousarray(struct[0].Gu) @ np.ascontiguousarray(struct[0].u_run)
struct[0].g[1,0] = -R_1213*i_l_1213 + v_12 - v_13
struct[0].g[2,0] = -R_1314*i_l_1314 + v_13 - v_14
struct[0].g[5,0] = -R_2223*i_l_2223 + v_22 - v_23
struct[0].g[6,0] = -R_2324*i_l_2324 + v_23 - v_24
struct[0].g[9,0] = -R_3233*i_l_3233 + v_32 - v_33
struct[0].g[10,0] = -R_3334*i_l_3334 + v_33 - v_34
struct[0].g[13,0] = -R_4243*i_l_4243 + v_42 - v_43
struct[0].g[14,0] = -R_4344*i_l_4344 + v_43 - v_44
struct[0].g[17,0] = -R_5253*i_l_5253 + v_52 - v_53
struct[0].g[18,0] = -R_5354*i_l_5354 + v_53 - v_54
struct[0].g[49,0] = i_11*v_11 - p_11
struct[0].g[50,0] = i_12*v_12 - p_12
struct[0].g[51,0] = i_14*v_14 - p_14
struct[0].g[52,0] = i_15*v_15 - p_15
struct[0].g[53,0] = i_21*v_21 - p_21
struct[0].g[54,0] = i_22*v_22 - p_22
struct[0].g[55,0] = i_24*v_24 - p_24
struct[0].g[56,0] = i_25*v_25 - p_25
struct[0].g[57,0] = i_31*v_31 - p_31
struct[0].g[58,0] = i_32*v_32 - p_32
struct[0].g[59,0] = i_34*v_34 - p_34
struct[0].g[60,0] = i_35*v_35 - p_35
struct[0].g[61,0] = i_41*v_41 - p_41
struct[0].g[62,0] = i_42*v_42 - p_42
struct[0].g[63,0] = i_44*v_44 - p_44
struct[0].g[64,0] = i_45*v_45 - p_45
struct[0].g[65,0] = i_51*v_51 - p_51
struct[0].g[66,0] = i_52*v_52 - p_52
struct[0].g[67,0] = i_54*v_54 - p_54
struct[0].g[68,0] = i_55*v_55 - p_55
# Outputs:
if mode == 3:
struct[0].h[0,0] = i_13*v_13
struct[0].h[1,0] = v_13
struct[0].h[2,0] = i_23*v_23
struct[0].h[3,0] = v_23
struct[0].h[4,0] = i_33*v_33
struct[0].h[5,0] = v_33
struct[0].h[6,0] = i_43*v_43
struct[0].h[7,0] = v_43
struct[0].h[8,0] = i_53*v_53
struct[0].h[9,0] = v_53
if mode == 10:
struct[0].Fx[0,0] = (-K_r*i_13 - 1)/T_v
struct[0].Fx[1,1] = (-K_r*i_23 - 1)/T_v
struct[0].Fx[2,2] = (-K_r*i_33 - 1)/T_v
struct[0].Fx[3,3] = (-K_r*i_43 - 1)/T_v
struct[0].Fx[4,4] = (-K_r*i_53 - 1)/T_v
if mode == 11:
struct[0].Fy[0,26] = -K_r*v_13/T_v
struct[0].Fy[1,31] = -K_r*v_23/T_v
struct[0].Fy[2,36] = -K_r*v_33/T_v
struct[0].Fy[3,41] = -K_r*v_43/T_v
struct[0].Fy[4,46] = -K_r*v_53/T_v
struct[0].Gx[1,0] = -1
struct[0].Gx[2,0] = 1
struct[0].Gx[5,1] = -1
struct[0].Gx[6,1] = 1
struct[0].Gx[9,2] = -1
struct[0].Gx[10,2] = 1
struct[0].Gx[13,3] = -1
struct[0].Gx[14,3] = 1
struct[0].Gx[17,4] = -1
struct[0].Gx[18,4] = 1
struct[0].Gy[0,0] = -R_1112
struct[0].Gy[1,1] = -R_1213
struct[0].Gy[2,2] = -R_1314
struct[0].Gy[3,3] = -R_1415
struct[0].Gy[4,4] = -R_2122
struct[0].Gy[5,5] = -R_2223
struct[0].Gy[6,6] = -R_2324
struct[0].Gy[7,7] = -R_2425
struct[0].Gy[8,8] = -R_3132
struct[0].Gy[9,9] = -R_3233
struct[0].Gy[10,10] = -R_3334
struct[0].Gy[11,11] = -R_3435
struct[0].Gy[12,12] = -R_4142
struct[0].Gy[13,13] = -R_4243
struct[0].Gy[14,14] = -R_4344
struct[0].Gy[15,15] = -R_4445
struct[0].Gy[16,16] = -R_5152
struct[0].Gy[17,17] = -R_5253
struct[0].Gy[18,18] = -R_5354
struct[0].Gy[19,19] = -R_5455
struct[0].Gy[20,20] = -R_1521
struct[0].Gy[21,21] = -R_2531
struct[0].Gy[22,22] = -R_3541
struct[0].Gy[23,23] = -R_4551
struct[0].Gy[49,24] = i_11
struct[0].Gy[49,49] = v_11
struct[0].Gy[50,25] = i_12
struct[0].Gy[50,50] = v_12
struct[0].Gy[51,27] = i_14
struct[0].Gy[51,51] = v_14
struct[0].Gy[52,28] = i_15
struct[0].Gy[52,52] = v_15
struct[0].Gy[53,29] = i_21
struct[0].Gy[53,53] = v_21
struct[0].Gy[54,30] = i_22
struct[0].Gy[54,54] = v_22
struct[0].Gy[55,32] = i_24
struct[0].Gy[55,55] = v_24
struct[0].Gy[56,33] = i_25
struct[0].Gy[56,56] = v_25
struct[0].Gy[57,34] = i_31
struct[0].Gy[57,57] = v_31
struct[0].Gy[58,35] = i_32
struct[0].Gy[58,58] = v_32
struct[0].Gy[59,37] = i_34
struct[0].Gy[59,59] = v_34
struct[0].Gy[60,38] = i_35
struct[0].Gy[60,60] = v_35
struct[0].Gy[61,39] = i_41
struct[0].Gy[61,61] = v_41
struct[0].Gy[62,40] = i_42
struct[0].Gy[62,62] = v_42
struct[0].Gy[63,42] = i_44
struct[0].Gy[63,63] = v_44
struct[0].Gy[64,43] = i_45
struct[0].Gy[64,64] = v_45
struct[0].Gy[65,44] = i_51
struct[0].Gy[65,65] = v_51
struct[0].Gy[66,45] = i_52
struct[0].Gy[66,66] = v_52
struct[0].Gy[67,47] = i_54
struct[0].Gy[67,67] = v_54
struct[0].Gy[68,48] = i_55
struct[0].Gy[68,68] = v_55
if mode > 12:
struct[0].Fu[0,0] = -1/T_v
struct[0].Fu[0,5] = 1/T_v
struct[0].Fu[0,6] = -(-Dv_r_13 - K_r*i_13*v_13 - v_13 + v_nom)/T_v**2
struct[0].Fu[0,7] = -i_13*v_13/T_v
struct[0].Fu[1,1] = -1/T_v
struct[0].Fu[1,5] = 1/T_v
struct[0].Fu[1,6] = -(-Dv_r_23 - K_r*i_23*v_23 - v_23 + v_nom)/T_v**2
struct[0].Fu[1,7] = -i_23*v_23/T_v
struct[0].Fu[2,2] = -1/T_v
struct[0].Fu[2,5] = 1/T_v
struct[0].Fu[2,6] = -(-Dv_r_33 - K_r*i_33*v_33 - v_33 + v_nom)/T_v**2
struct[0].Fu[2,7] = -i_33*v_33/T_v
struct[0].Fu[3,3] = -1/T_v
struct[0].Fu[3,5] = 1/T_v
struct[0].Fu[3,6] = -(-Dv_r_43 - K_r*i_43*v_43 - v_43 + v_nom)/T_v**2
struct[0].Fu[3,7] = -i_43*v_43/T_v
struct[0].Fu[4,4] = -1/T_v
struct[0].Fu[4,5] = 1/T_v
struct[0].Fu[4,6] = -(-Dv_r_53 - K_r*i_53*v_53 - v_53 + v_nom)/T_v**2
struct[0].Fu[4,7] = -i_53*v_53/T_v
struct[0].Hx[0,0] = i_13
struct[0].Hx[1,0] = 1
struct[0].Hx[2,1] = i_23
struct[0].Hx[3,1] = 1
struct[0].Hx[4,2] = i_33
struct[0].Hx[5,2] = 1
struct[0].Hx[6,3] = i_43
struct[0].Hx[7,3] = 1
struct[0].Hx[8,4] = i_53
struct[0].Hx[9,4] = 1
struct[0].Hy[0,26] = v_13
struct[0].Hy[2,31] = v_23
struct[0].Hy[4,36] = v_33
struct[0].Hy[6,41] = v_43
struct[0].Hy[8,46] = v_53
def ini_nn(struct,mode):
# Parameters:
R_1112 = struct[0].R_1112
R_1213 = struct[0].R_1213
R_1314 = struct[0].R_1314
R_1415 = struct[0].R_1415
R_1521 = struct[0].R_1521
R_2122 = struct[0].R_2122
R_2223 = struct[0].R_2223
R_2324 = struct[0].R_2324
R_2425 = struct[0].R_2425
R_2531 = struct[0].R_2531
R_3132 = struct[0].R_3132
R_3233 = struct[0].R_3233
R_3334 = struct[0].R_3334
R_3435 = struct[0].R_3435
R_3541 = struct[0].R_3541
R_4142 = struct[0].R_4142
R_4243 = struct[0].R_4243
R_4344 = struct[0].R_4344
R_4445 = struct[0].R_4445
R_4551 = struct[0].R_4551
R_5152 = struct[0].R_5152
R_5253 = struct[0].R_5253
R_5354 = struct[0].R_5354
R_5455 = struct[0].R_5455
p_11 = struct[0].p_11
p_12 = struct[0].p_12
p_14 = struct[0].p_14
p_15 = struct[0].p_15
p_21 = struct[0].p_21
p_22 = struct[0].p_22
p_24 = struct[0].p_24
p_25 = struct[0].p_25
p_31 = struct[0].p_31
p_32 = struct[0].p_32
p_34 = struct[0].p_34
p_35 = struct[0].p_35
p_41 = struct[0].p_41
p_42 = struct[0].p_42
p_44 = struct[0].p_44
p_45 = struct[0].p_45
p_51 = struct[0].p_51
p_52 = struct[0].p_52
p_54 = struct[0].p_54
p_55 = struct[0].p_55
# Inputs:
Dv_r_13 = struct[0].Dv_r_13
Dv_r_23 = struct[0].Dv_r_23
Dv_r_33 = struct[0].Dv_r_33
Dv_r_43 = struct[0].Dv_r_43
Dv_r_53 = struct[0].Dv_r_53
v_nom = struct[0].v_nom
T_v = struct[0].T_v
K_r = struct[0].K_r
# Dynamical states:
v_13 = struct[0].x[0,0]
v_23 = struct[0].x[1,0]
v_33 = struct[0].x[2,0]
v_43 = struct[0].x[3,0]
v_53 = struct[0].x[4,0]
# Algebraic states:
i_l_1112 = struct[0].y_ini[0,0]
i_l_1213 = struct[0].y_ini[1,0]
i_l_1314 = struct[0].y_ini[2,0]
i_l_1415 = struct[0].y_ini[3,0]
i_l_2122 = struct[0].y_ini[4,0]
i_l_2223 = struct[0].y_ini[5,0]
i_l_2324 = struct[0].y_ini[6,0]
i_l_2425 = struct[0].y_ini[7,0]
i_l_3132 = struct[0].y_ini[8,0]
i_l_3233 = struct[0].y_ini[9,0]
i_l_3334 = struct[0].y_ini[10,0]
i_l_3435 = struct[0].y_ini[11,0]
i_l_4142 = struct[0].y_ini[12,0]
i_l_4243 = struct[0].y_ini[13,0]
i_l_4344 = struct[0].y_ini[14,0]
i_l_4445 = struct[0].y_ini[15,0]
i_l_5152 = struct[0].y_ini[16,0]
i_l_5253 = struct[0].y_ini[17,0]
i_l_5354 = struct[0].y_ini[18,0]
i_l_5455 = struct[0].y_ini[19,0]
i_l_1521 = struct[0].y_ini[20,0]
i_l_2531 = struct[0].y_ini[21,0]
i_l_3541 = struct[0].y_ini[22,0]
i_l_4551 = struct[0].y_ini[23,0]
v_11 = struct[0].y_ini[24,0]
v_12 = struct[0].y_ini[25,0]
i_13 = struct[0].y_ini[26,0]
v_14 = struct[0].y_ini[27,0]
v_15 = struct[0].y_ini[28,0]
v_21 = struct[0].y_ini[29,0]
v_22 = struct[0].y_ini[30,0]
i_23 = struct[0].y_ini[31,0]
v_24 = struct[0].y_ini[32,0]
v_25 = struct[0].y_ini[33,0]
v_31 = struct[0].y_ini[34,0]
v_32 = struct[0].y_ini[35,0]
i_33 = struct[0].y_ini[36,0]
v_34 = struct[0].y_ini[37,0]
v_35 = struct[0].y_ini[38,0]
v_41 = struct[0].y_ini[39,0]
v_42 = struct[0].y_ini[40,0]
i_43 = struct[0].y_ini[41,0]
v_44 = struct[0].y_ini[42,0]
v_45 = struct[0].y_ini[43,0]
v_51 = struct[0].y_ini[44,0]
v_52 = struct[0].y_ini[45,0]
i_53 = struct[0].y_ini[46,0]
v_54 = struct[0].y_ini[47,0]
v_55 = struct[0].y_ini[48,0]
i_11 = struct[0].y_ini[49,0]
i_12 = struct[0].y_ini[50,0]
i_14 = struct[0].y_ini[51,0]
i_15 = struct[0].y_ini[52,0]
i_21 = struct[0].y_ini[53,0]
i_22 = struct[0].y_ini[54,0]
i_24 = struct[0].y_ini[55,0]
i_25 = struct[0].y_ini[56,0]
i_31 = struct[0].y_ini[57,0]
i_32 = struct[0].y_ini[58,0]
i_34 = struct[0].y_ini[59,0]
i_35 = struct[0].y_ini[60,0]
i_41 = struct[0].y_ini[61,0]
i_42 = struct[0].y_ini[62,0]
i_44 = struct[0].y_ini[63,0]
i_45 = struct[0].y_ini[64,0]
i_51 = struct[0].y_ini[65,0]
i_52 = struct[0].y_ini[66,0]
i_54 = struct[0].y_ini[67,0]
i_55 = struct[0].y_ini[68,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (-Dv_r_13 - K_r*i_13*v_13 - v_13 + v_nom)/T_v
struct[0].f[1,0] = (-Dv_r_23 - K_r*i_23*v_23 - v_23 + v_nom)/T_v
struct[0].f[2,0] = (-Dv_r_33 - K_r*i_33*v_33 - v_33 + v_nom)/T_v
struct[0].f[3,0] = (-Dv_r_43 - K_r*i_43*v_43 - v_43 + v_nom)/T_v
struct[0].f[4,0] = (-Dv_r_53 - K_r*i_53*v_53 - v_53 + v_nom)/T_v
# Algebraic equations:
if mode == 3:
struct[0].g[0,0] = -R_1112*i_l_1112 + v_11 - v_12
struct[0].g[1,0] = -R_1213*i_l_1213 + v_12 - v_13
struct[0].g[2,0] = -R_1314*i_l_1314 + v_13 - v_14
struct[0].g[3,0] = -R_1415*i_l_1415 + v_14 - v_15
struct[0].g[4,0] = -R_2122*i_l_2122 + v_21 - v_22
struct[0].g[5,0] = -R_2223*i_l_2223 + v_22 - v_23
struct[0].g[6,0] = -R_2324*i_l_2324 + v_23 - v_24
struct[0].g[7,0] = -R_2425*i_l_2425 + v_24 - v_25
struct[0].g[8,0] = -R_3132*i_l_3132 + v_31 - v_32
struct[0].g[9,0] = -R_3233*i_l_3233 + v_32 - v_33
struct[0].g[10,0] = -R_3334*i_l_3334 + v_33 - v_34
struct[0].g[11,0] = -R_3435*i_l_3435 + v_34 - v_35
struct[0].g[12,0] = -R_4142*i_l_4142 + v_41 - v_42
struct[0].g[13,0] = -R_4243*i_l_4243 + v_42 - v_43
struct[0].g[14,0] = -R_4344*i_l_4344 + v_43 - v_44
struct[0].g[15,0] = -R_4445*i_l_4445 + v_44 - v_45
struct[0].g[16,0] = -R_5152*i_l_5152 + v_51 - v_52
struct[0].g[17,0] = -R_5253*i_l_5253 + v_52 - v_53
struct[0].g[18,0] = -R_5354*i_l_5354 + v_53 - v_54
struct[0].g[19,0] = -R_5455*i_l_5455 + v_54 - v_55
struct[0].g[20,0] = -R_1521*i_l_1521 + v_15 - v_21
struct[0].g[21,0] = -R_2531*i_l_2531 + v_25 - v_31
struct[0].g[22,0] = -R_3541*i_l_3541 + v_35 - v_41
struct[0].g[23,0] = -R_4551*i_l_4551 + v_45 - v_51
struct[0].g[24,0] = i_11 - i_l_1112
struct[0].g[25,0] = i_12 + i_l_1112 - i_l_1213
struct[0].g[26,0] = i_13 + i_l_1213 - i_l_1314
struct[0].g[27,0] = i_14 + i_l_1314 - i_l_1415
struct[0].g[28,0] = i_15 + i_l_1415 - i_l_1521
struct[0].g[29,0] = i_21 + i_l_1521 - i_l_2122
struct[0].g[30,0] = i_22 + i_l_2122 - i_l_2223
struct[0].g[31,0] = i_23 + i_l_2223 - i_l_2324
struct[0].g[32,0] = i_24 + i_l_2324 - i_l_2425
struct[0].g[33,0] = i_25 + i_l_2425 - i_l_2531
struct[0].g[34,0] = i_31 + i_l_2531 - i_l_3132
struct[0].g[35,0] = i_32 + i_l_3132 - i_l_3233
struct[0].g[36,0] = i_33 + i_l_3233 - i_l_3334
struct[0].g[37,0] = i_34 + i_l_3334 - i_l_3435
struct[0].g[38,0] = i_35 + i_l_3435 - i_l_3541
struct[0].g[39,0] = i_41 + i_l_3541 - i_l_4142
struct[0].g[40,0] = i_42 + i_l_4142 - i_l_4243
struct[0].g[41,0] = i_43 + i_l_4243 - i_l_4344
struct[0].g[42,0] = i_44 + i_l_4344 - i_l_4445
struct[0].g[43,0] = i_45 + i_l_4445 - i_l_4551
struct[0].g[44,0] = i_51 + i_l_4551 - i_l_5152
struct[0].g[45,0] = i_52 + i_l_5152 - i_l_5253
struct[0].g[46,0] = i_53 + i_l_5253 - i_l_5354
struct[0].g[47,0] = i_54 + i_l_5354 - i_l_5455
struct[0].g[48,0] = i_55 + i_l_5455
struct[0].g[49,0] = i_11*v_11 - p_11
struct[0].g[50,0] = i_12*v_12 - p_12
struct[0].g[51,0] = i_14*v_14 - p_14
struct[0].g[52,0] = i_15*v_15 - p_15
struct[0].g[53,0] = i_21*v_21 - p_21
struct[0].g[54,0] = i_22*v_22 - p_22
struct[0].g[55,0] = i_24*v_24 - p_24
struct[0].g[56,0] = i_25*v_25 - p_25
struct[0].g[57,0] = i_31*v_31 - p_31
struct[0].g[58,0] = i_32*v_32 - p_32
struct[0].g[59,0] = i_34*v_34 - p_34
struct[0].g[60,0] = i_35*v_35 - p_35
struct[0].g[61,0] = i_41*v_41 - p_41
struct[0].g[62,0] = i_42*v_42 - p_42
struct[0].g[63,0] = i_44*v_44 - p_44
struct[0].g[64,0] = i_45*v_45 - p_45
struct[0].g[65,0] = i_51*v_51 - p_51
struct[0].g[66,0] = i_52*v_52 - p_52
struct[0].g[67,0] = i_54*v_54 - p_54
struct[0].g[68,0] = i_55*v_55 - p_55
# Outputs:
if mode == 3:
struct[0].h[0,0] = i_13*v_13
struct[0].h[1,0] = v_13
struct[0].h[2,0] = i_23*v_23
struct[0].h[3,0] = v_23
struct[0].h[4,0] = i_33*v_33
struct[0].h[5,0] = v_33
struct[0].h[6,0] = i_43*v_43
struct[0].h[7,0] = v_43
struct[0].h[8,0] = i_53*v_53
struct[0].h[9,0] = v_53
if mode == 10:
struct[0].Fx_ini[0,0] = (-K_r*i_13 - 1)/T_v
struct[0].Fx_ini[1,1] = (-K_r*i_23 - 1)/T_v
struct[0].Fx_ini[2,2] = (-K_r*i_33 - 1)/T_v
struct[0].Fx_ini[3,3] = (-K_r*i_43 - 1)/T_v
struct[0].Fx_ini[4,4] = (-K_r*i_53 - 1)/T_v
if mode == 11:
struct[0].Fy_ini[0,26] = -K_r*v_13/T_v
struct[0].Fy_ini[1,31] = -K_r*v_23/T_v
struct[0].Fy_ini[2,36] = -K_r*v_33/T_v
struct[0].Fy_ini[3,41] = -K_r*v_43/T_v
struct[0].Fy_ini[4,46] = -K_r*v_53/T_v
struct[0].Gy_ini[0,0] = -R_1112
struct[0].Gy_ini[0,24] = 1
struct[0].Gy_ini[0,25] = -1
struct[0].Gy_ini[1,1] = -R_1213
struct[0].Gy_ini[1,25] = 1
struct[0].Gy_ini[2,2] = -R_1314
struct[0].Gy_ini[2,27] = -1
struct[0].Gy_ini[3,3] = -R_1415
struct[0].Gy_ini[3,27] = 1
struct[0].Gy_ini[3,28] = -1
struct[0].Gy_ini[4,4] = -R_2122
struct[0].Gy_ini[4,29] = 1
struct[0].Gy_ini[4,30] = -1
struct[0].Gy_ini[5,5] = -R_2223
struct[0].Gy_ini[5,30] = 1
struct[0].Gy_ini[6,6] = -R_2324
struct[0].Gy_ini[6,32] = -1
struct[0].Gy_ini[7,7] = -R_2425
struct[0].Gy_ini[7,32] = 1
struct[0].Gy_ini[7,33] = -1
struct[0].Gy_ini[8,8] = -R_3132
struct[0].Gy_ini[8,34] = 1
struct[0].Gy_ini[8,35] = -1
struct[0].Gy_ini[9,9] = -R_3233
struct[0].Gy_ini[9,35] = 1
struct[0].Gy_ini[10,10] = -R_3334
struct[0].Gy_ini[10,37] = -1
struct[0].Gy_ini[11,11] = -R_3435
struct[0].Gy_ini[11,37] = 1
struct[0].Gy_ini[11,38] = -1
struct[0].Gy_ini[12,12] = -R_4142
struct[0].Gy_ini[12,39] = 1
struct[0].Gy_ini[12,40] = -1
struct[0].Gy_ini[13,13] = -R_4243
struct[0].Gy_ini[13,40] = 1
struct[0].Gy_ini[14,14] = -R_4344
struct[0].Gy_ini[14,42] = -1
struct[0].Gy_ini[15,15] = -R_4445
struct[0].Gy_ini[15,42] = 1
struct[0].Gy_ini[15,43] = -1
struct[0].Gy_ini[16,16] = -R_5152
struct[0].Gy_ini[16,44] = 1
struct[0].Gy_ini[16,45] = -1
struct[0].Gy_ini[17,17] = -R_5253
struct[0].Gy_ini[17,45] = 1
struct[0].Gy_ini[18,18] = -R_5354
struct[0].Gy_ini[18,47] = -1
struct[0].Gy_ini[19,19] = -R_5455
struct[0].Gy_ini[19,47] = 1
struct[0].Gy_ini[19,48] = -1
struct[0].Gy_ini[20,20] = -R_1521
struct[0].Gy_ini[20,28] = 1
struct[0].Gy_ini[20,29] = -1
struct[0].Gy_ini[21,21] = -R_2531
struct[0].Gy_ini[21,33] = 1
struct[0].Gy_ini[21,34] = -1
struct[0].Gy_ini[22,22] = -R_3541
struct[0].Gy_ini[22,38] = 1
struct[0].Gy_ini[22,39] = -1
struct[0].Gy_ini[23,23] = -R_4551
struct[0].Gy_ini[23,43] = 1
struct[0].Gy_ini[23,44] = -1
struct[0].Gy_ini[24,0] = -1
struct[0].Gy_ini[24,49] = 1
struct[0].Gy_ini[25,0] = 1
struct[0].Gy_ini[25,1] = -1
struct[0].Gy_ini[25,50] = 1
struct[0].Gy_ini[26,1] = 1
struct[0].Gy_ini[26,2] = -1
struct[0].Gy_ini[26,26] = 1
struct[0].Gy_ini[27,2] = 1
struct[0].Gy_ini[27,3] = -1
struct[0].Gy_ini[27,51] = 1
struct[0].Gy_ini[28,3] = 1
struct[0].Gy_ini[28,20] = -1
struct[0].Gy_ini[28,52] = 1
struct[0].Gy_ini[29,4] = -1
struct[0].Gy_ini[29,20] = 1
struct[0].Gy_ini[29,53] = 1
struct[0].Gy_ini[30,4] = 1
struct[0].Gy_ini[30,5] = -1
struct[0].Gy_ini[30,54] = 1
struct[0].Gy_ini[31,5] = 1
struct[0].Gy_ini[31,6] = -1
struct[0].Gy_ini[31,31] = 1
struct[0].Gy_ini[32,6] = 1
struct[0].Gy_ini[32,7] = -1
struct[0].Gy_ini[32,55] = 1
struct[0].Gy_ini[33,7] = 1
struct[0].Gy_ini[33,21] = -1
struct[0].Gy_ini[33,56] = 1
struct[0].Gy_ini[34,8] = -1
struct[0].Gy_ini[34,21] = 1
struct[0].Gy_ini[34,57] = 1
struct[0].Gy_ini[35,8] = 1
struct[0].Gy_ini[35,9] = -1
struct[0].Gy_ini[35,58] = 1
struct[0].Gy_ini[36,9] = 1
struct[0].Gy_ini[36,10] = -1
struct[0].Gy_ini[36,36] = 1
struct[0].Gy_ini[37,10] = 1
struct[0].Gy_ini[37,11] = -1
struct[0].Gy_ini[37,59] = 1
struct[0].Gy_ini[38,11] = 1
struct[0].Gy_ini[38,22] = -1
struct[0].Gy_ini[38,60] = 1
struct[0].Gy_ini[39,12] = -1
struct[0].Gy_ini[39,22] = 1
struct[0].Gy_ini[39,61] = 1
struct[0].Gy_ini[40,12] = 1
struct[0].Gy_ini[40,13] = -1
struct[0].Gy_ini[40,62] = 1
struct[0].Gy_ini[41,13] = 1
struct[0].Gy_ini[41,14] = -1
struct[0].Gy_ini[41,41] = 1
struct[0].Gy_ini[42,14] = 1
struct[0].Gy_ini[42,15] = -1
struct[0].Gy_ini[42,63] = 1
struct[0].Gy_ini[43,15] = 1
struct[0].Gy_ini[43,23] = -1
struct[0].Gy_ini[43,64] = 1
struct[0].Gy_ini[44,16] = -1
struct[0].Gy_ini[44,23] = 1
struct[0].Gy_ini[44,65] = 1
struct[0].Gy_ini[45,16] = 1
struct[0].Gy_ini[45,17] = -1
struct[0].Gy_ini[45,66] = 1
struct[0].Gy_ini[46,17] = 1
struct[0].Gy_ini[46,18] = -1
struct[0].Gy_ini[46,46] = 1
struct[0].Gy_ini[47,18] = 1
struct[0].Gy_ini[47,19] = -1
struct[0].Gy_ini[47,67] = 1
struct[0].Gy_ini[48,19] = 1
struct[0].Gy_ini[48,68] = 1
struct[0].Gy_ini[49,24] = i_11
struct[0].Gy_ini[49,49] = v_11
struct[0].Gy_ini[50,25] = i_12
struct[0].Gy_ini[50,50] = v_12
struct[0].Gy_ini[51,27] = i_14
struct[0].Gy_ini[51,51] = v_14
struct[0].Gy_ini[52,28] = i_15
struct[0].Gy_ini[52,52] = v_15
struct[0].Gy_ini[53,29] = i_21
struct[0].Gy_ini[53,53] = v_21
struct[0].Gy_ini[54,30] = i_22
struct[0].Gy_ini[54,54] = v_22
struct[0].Gy_ini[55,32] = i_24
struct[0].Gy_ini[55,55] = v_24
struct[0].Gy_ini[56,33] = i_25
struct[0].Gy_ini[56,56] = v_25
struct[0].Gy_ini[57,34] = i_31
struct[0].Gy_ini[57,57] = v_31
struct[0].Gy_ini[58,35] = i_32
struct[0].Gy_ini[58,58] = v_32
struct[0].Gy_ini[59,37] = i_34
struct[0].Gy_ini[59,59] = v_34
struct[0].Gy_ini[60,38] = i_35
struct[0].Gy_ini[60,60] = v_35
struct[0].Gy_ini[61,39] = i_41
struct[0].Gy_ini[61,61] = v_41
struct[0].Gy_ini[62,40] = i_42
struct[0].Gy_ini[62,62] = v_42
struct[0].Gy_ini[63,42] = i_44
struct[0].Gy_ini[63,63] = v_44
struct[0].Gy_ini[64,43] = i_45
struct[0].Gy_ini[64,64] = v_45
struct[0].Gy_ini[65,44] = i_51
struct[0].Gy_ini[65,65] = v_51
struct[0].Gy_ini[66,45] = i_52
struct[0].Gy_ini[66,66] = v_52
struct[0].Gy_ini[67,47] = i_54
struct[0].Gy_ini[67,67] = v_54
struct[0].Gy_ini[68,48] = i_55
struct[0].Gy_ini[68,68] = v_55
def run_nn(t,struct,mode):
# Parameters:
R_1112 = struct[0].R_1112
R_1213 = struct[0].R_1213
R_1314 = struct[0].R_1314
R_1415 = struct[0].R_1415
R_1521 = struct[0].R_1521
R_2122 = struct[0].R_2122
R_2223 = struct[0].R_2223
R_2324 = struct[0].R_2324
R_2425 = struct[0].R_2425
R_2531 = struct[0].R_2531
R_3132 = struct[0].R_3132
R_3233 = struct[0].R_3233
R_3334 = struct[0].R_3334
R_3435 = struct[0].R_3435
R_3541 = struct[0].R_3541
R_4142 = struct[0].R_4142
R_4243 = struct[0].R_4243
R_4344 = struct[0].R_4344
R_4445 = struct[0].R_4445
R_4551 = struct[0].R_4551
R_5152 = struct[0].R_5152
R_5253 = struct[0].R_5253
R_5354 = struct[0].R_5354
R_5455 = struct[0].R_5455
p_11 = struct[0].p_11
p_12 = struct[0].p_12
p_14 = struct[0].p_14
p_15 = struct[0].p_15
p_21 = struct[0].p_21
p_22 = struct[0].p_22
p_24 = struct[0].p_24
p_25 = struct[0].p_25
p_31 = struct[0].p_31
p_32 = struct[0].p_32
p_34 = struct[0].p_34
p_35 = struct[0].p_35
p_41 = struct[0].p_41
p_42 = struct[0].p_42
p_44 = struct[0].p_44
p_45 = struct[0].p_45
p_51 = struct[0].p_51
p_52 = struct[0].p_52
p_54 = struct[0].p_54
p_55 = struct[0].p_55
# Inputs:
Dv_r_13 = struct[0].Dv_r_13
Dv_r_23 = struct[0].Dv_r_23
Dv_r_33 = struct[0].Dv_r_33
Dv_r_43 = struct[0].Dv_r_43
Dv_r_53 = struct[0].Dv_r_53
v_nom = struct[0].v_nom
T_v = struct[0].T_v
K_r = struct[0].K_r
# Dynamical states:
v_13 = struct[0].x[0,0]
v_23 = struct[0].x[1,0]
v_33 = struct[0].x[2,0]
v_43 = struct[0].x[3,0]
v_53 = struct[0].x[4,0]
# Algebraic states:
i_l_1112 = struct[0].y_run[0,0]
i_l_1213 = struct[0].y_run[1,0]
i_l_1314 = struct[0].y_run[2,0]
i_l_1415 = struct[0].y_run[3,0]
i_l_2122 = struct[0].y_run[4,0]
i_l_2223 = struct[0].y_run[5,0]
i_l_2324 = struct[0].y_run[6,0]
i_l_2425 = struct[0].y_run[7,0]
i_l_3132 = struct[0].y_run[8,0]
i_l_3233 = struct[0].y_run[9,0]
i_l_3334 = struct[0].y_run[10,0]
i_l_3435 = struct[0].y_run[11,0]
i_l_4142 = struct[0].y_run[12,0]
i_l_4243 = struct[0].y_run[13,0]
i_l_4344 = struct[0].y_run[14,0]
i_l_4445 = struct[0].y_run[15,0]
i_l_5152 = struct[0].y_run[16,0]
i_l_5253 = struct[0].y_run[17,0]
i_l_5354 = struct[0].y_run[18,0]
i_l_5455 = struct[0].y_run[19,0]
i_l_1521 = struct[0].y_run[20,0]
i_l_2531 = struct[0].y_run[21,0]
i_l_3541 = struct[0].y_run[22,0]
i_l_4551 = struct[0].y_run[23,0]
v_11 = struct[0].y_run[24,0]
v_12 = struct[0].y_run[25,0]
i_13 = struct[0].y_run[26,0]
v_14 = struct[0].y_run[27,0]
v_15 = struct[0].y_run[28,0]
v_21 = struct[0].y_run[29,0]
v_22 = struct[0].y_run[30,0]
i_23 = struct[0].y_run[31,0]
v_24 = struct[0].y_run[32,0]
v_25 = struct[0].y_run[33,0]
v_31 = struct[0].y_run[34,0]
v_32 = struct[0].y_run[35,0]
i_33 = struct[0].y_run[36,0]
v_34 = struct[0].y_run[37,0]
v_35 = struct[0].y_run[38,0]
v_41 = struct[0].y_run[39,0]
v_42 = struct[0].y_run[40,0]
i_43 = struct[0].y_run[41,0]
v_44 = struct[0].y_run[42,0]
v_45 = struct[0].y_run[43,0]
v_51 = struct[0].y_run[44,0]
v_52 = struct[0].y_run[45,0]
i_53 = struct[0].y_run[46,0]
v_54 = struct[0].y_run[47,0]
v_55 = struct[0].y_run[48,0]
i_11 = struct[0].y_run[49,0]
i_12 = struct[0].y_run[50,0]
i_14 = struct[0].y_run[51,0]
i_15 = struct[0].y_run[52,0]
i_21 = struct[0].y_run[53,0]
i_22 = struct[0].y_run[54,0]
i_24 = struct[0].y_run[55,0]
i_25 = struct[0].y_run[56,0]
i_31 = struct[0].y_run[57,0]
i_32 = struct[0].y_run[58,0]
i_34 = struct[0].y_run[59,0]
i_35 = struct[0].y_run[60,0]
i_41 = struct[0].y_run[61,0]
i_42 = struct[0].y_run[62,0]
i_44 = struct[0].y_run[63,0]
i_45 = struct[0].y_run[64,0]
i_51 = struct[0].y_run[65,0]
i_52 = struct[0].y_run[66,0]
i_54 = struct[0].y_run[67,0]
i_55 = struct[0].y_run[68,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (-Dv_r_13 - K_r*i_13*v_13 - v_13 + v_nom)/T_v
struct[0].f[1,0] = (-Dv_r_23 - K_r*i_23*v_23 - v_23 + v_nom)/T_v
struct[0].f[2,0] = (-Dv_r_33 - K_r*i_33*v_33 - v_33 + v_nom)/T_v
struct[0].f[3,0] = (-Dv_r_43 - K_r*i_43*v_43 - v_43 + v_nom)/T_v
struct[0].f[4,0] = (-Dv_r_53 - K_r*i_53*v_53 - v_53 + v_nom)/T_v
# Algebraic equations:
if mode == 3:
struct[0].g[0,0] = -R_1112*i_l_1112 + v_11 - v_12
struct[0].g[1,0] = -R_1213*i_l_1213 + v_12 - v_13
struct[0].g[2,0] = -R_1314*i_l_1314 + v_13 - v_14
struct[0].g[3,0] = -R_1415*i_l_1415 + v_14 - v_15
struct[0].g[4,0] = -R_2122*i_l_2122 + v_21 - v_22
struct[0].g[5,0] = -R_2223*i_l_2223 + v_22 - v_23
struct[0].g[6,0] = -R_2324*i_l_2324 + v_23 - v_24
struct[0].g[7,0] = -R_2425*i_l_2425 + v_24 - v_25
struct[0].g[8,0] = -R_3132*i_l_3132 + v_31 - v_32
struct[0].g[9,0] = -R_3233*i_l_3233 + v_32 - v_33
struct[0].g[10,0] = -R_3334*i_l_3334 + v_33 - v_34
struct[0].g[11,0] = -R_3435*i_l_3435 + v_34 - v_35
struct[0].g[12,0] = -R_4142*i_l_4142 + v_41 - v_42
struct[0].g[13,0] = -R_4243*i_l_4243 + v_42 - v_43
struct[0].g[14,0] = -R_4344*i_l_4344 + v_43 - v_44
struct[0].g[15,0] = -R_4445*i_l_4445 + v_44 - v_45
struct[0].g[16,0] = -R_5152*i_l_5152 + v_51 - v_52
struct[0].g[17,0] = -R_5253*i_l_5253 + v_52 - v_53
struct[0].g[18,0] = -R_5354*i_l_5354 + v_53 - v_54
struct[0].g[19,0] = -R_5455*i_l_5455 + v_54 - v_55
struct[0].g[20,0] = -R_1521*i_l_1521 + v_15 - v_21
struct[0].g[21,0] = -R_2531*i_l_2531 + v_25 - v_31
struct[0].g[22,0] = -R_3541*i_l_3541 + v_35 - v_41
struct[0].g[23,0] = -R_4551*i_l_4551 + v_45 - v_51
struct[0].g[24,0] = i_11 - i_l_1112
struct[0].g[25,0] = i_12 + i_l_1112 - i_l_1213
struct[0].g[26,0] = i_13 + i_l_1213 - i_l_1314
struct[0].g[27,0] = i_14 + i_l_1314 - i_l_1415
struct[0].g[28,0] = i_15 + i_l_1415 - i_l_1521
struct[0].g[29,0] = i_21 + i_l_1521 - i_l_2122
struct[0].g[30,0] = i_22 + i_l_2122 - i_l_2223
struct[0].g[31,0] = i_23 + i_l_2223 - i_l_2324
struct[0].g[32,0] = i_24 + i_l_2324 - i_l_2425
struct[0].g[33,0] = i_25 + i_l_2425 - i_l_2531
struct[0].g[34,0] = i_31 + i_l_2531 - i_l_3132
struct[0].g[35,0] = i_32 + i_l_3132 - i_l_3233
struct[0].g[36,0] = i_33 + i_l_3233 - i_l_3334
struct[0].g[37,0] = i_34 + i_l_3334 - i_l_3435
struct[0].g[38,0] = i_35 + i_l_3435 - i_l_3541
struct[0].g[39,0] = i_41 + i_l_3541 - i_l_4142
struct[0].g[40,0] = i_42 + i_l_4142 - i_l_4243
struct[0].g[41,0] = i_43 + i_l_4243 - i_l_4344
struct[0].g[42,0] = i_44 + i_l_4344 - i_l_4445
struct[0].g[43,0] = i_45 + i_l_4445 - i_l_4551
struct[0].g[44,0] = i_51 + i_l_4551 - i_l_5152
struct[0].g[45,0] = i_52 + i_l_5152 - i_l_5253
struct[0].g[46,0] = i_53 + i_l_5253 - i_l_5354
struct[0].g[47,0] = i_54 + i_l_5354 - i_l_5455
struct[0].g[48,0] = i_55 + i_l_5455
struct[0].g[49,0] = i_11*v_11 - p_11
struct[0].g[50,0] = i_12*v_12 - p_12
struct[0].g[51,0] = i_14*v_14 - p_14
struct[0].g[52,0] = i_15*v_15 - p_15
struct[0].g[53,0] = i_21*v_21 - p_21
struct[0].g[54,0] = i_22*v_22 - p_22
struct[0].g[55,0] = i_24*v_24 - p_24
struct[0].g[56,0] = i_25*v_25 - p_25
struct[0].g[57,0] = i_31*v_31 - p_31
struct[0].g[58,0] = i_32*v_32 - p_32
struct[0].g[59,0] = i_34*v_34 - p_34
struct[0].g[60,0] = i_35*v_35 - p_35
struct[0].g[61,0] = i_41*v_41 - p_41
struct[0].g[62,0] = i_42*v_42 - p_42
struct[0].g[63,0] = i_44*v_44 - p_44
struct[0].g[64,0] = i_45*v_45 - p_45
struct[0].g[65,0] = i_51*v_51 - p_51
struct[0].g[66,0] = i_52*v_52 - p_52
struct[0].g[67,0] = i_54*v_54 - p_54
struct[0].g[68,0] = i_55*v_55 - p_55
# Outputs:
if mode == 3:
struct[0].h[0,0] = i_13*v_13
struct[0].h[1,0] = v_13
struct[0].h[2,0] = i_23*v_23
struct[0].h[3,0] = v_23
struct[0].h[4,0] = i_33*v_33
struct[0].h[5,0] = v_33
struct[0].h[6,0] = i_43*v_43
struct[0].h[7,0] = v_43
struct[0].h[8,0] = i_53*v_53
struct[0].h[9,0] = v_53
if mode == 10:
struct[0].Fx[0,0] = (-K_r*i_13 - 1)/T_v
struct[0].Fx[1,1] = (-K_r*i_23 - 1)/T_v
struct[0].Fx[2,2] = (-K_r*i_33 - 1)/T_v
struct[0].Fx[3,3] = (-K_r*i_43 - 1)/T_v
struct[0].Fx[4,4] = (-K_r*i_53 - 1)/T_v
if mode == 11:
struct[0].Fy[0,26] = -K_r*v_13/T_v
struct[0].Fy[1,31] = -K_r*v_23/T_v
struct[0].Fy[2,36] = -K_r*v_33/T_v
struct[0].Fy[3,41] = -K_r*v_43/T_v
struct[0].Fy[4,46] = -K_r*v_53/T_v
struct[0].Gy[0,0] = -R_1112
struct[0].Gy[0,24] = 1
struct[0].Gy[0,25] = -1
struct[0].Gy[1,1] = -R_1213
struct[0].Gy[1,25] = 1
struct[0].Gy[2,2] = -R_1314
struct[0].Gy[2,27] = -1
struct[0].Gy[3,3] = -R_1415
struct[0].Gy[3,27] = 1
struct[0].Gy[3,28] = -1
struct[0].Gy[4,4] = -R_2122
struct[0].Gy[4,29] = 1
struct[0].Gy[4,30] = -1
struct[0].Gy[5,5] = -R_2223
struct[0].Gy[5,30] = 1
struct[0].Gy[6,6] = -R_2324
struct[0].Gy[6,32] = -1
struct[0].Gy[7,7] = -R_2425
struct[0].Gy[7,32] = 1
struct[0].Gy[7,33] = -1
struct[0].Gy[8,8] = -R_3132
struct[0].Gy[8,34] = 1
struct[0].Gy[8,35] = -1
struct[0].Gy[9,9] = -R_3233
struct[0].Gy[9,35] = 1
struct[0].Gy[10,10] = -R_3334
struct[0].Gy[10,37] = -1
struct[0].Gy[11,11] = -R_3435
struct[0].Gy[11,37] = 1
struct[0].Gy[11,38] = -1
struct[0].Gy[12,12] = -R_4142
struct[0].Gy[12,39] = 1
struct[0].Gy[12,40] = -1
struct[0].Gy[13,13] = -R_4243
struct[0].Gy[13,40] = 1
struct[0].Gy[14,14] = -R_4344
struct[0].Gy[14,42] = -1
struct[0].Gy[15,15] = -R_4445
struct[0].Gy[15,42] = 1
struct[0].Gy[15,43] = -1
struct[0].Gy[16,16] = -R_5152
struct[0].Gy[16,44] = 1
struct[0].Gy[16,45] = -1
struct[0].Gy[17,17] = -R_5253
struct[0].Gy[17,45] = 1
struct[0].Gy[18,18] = -R_5354
struct[0].Gy[18,47] = -1
struct[0].Gy[19,19] = -R_5455
struct[0].Gy[19,47] = 1
struct[0].Gy[19,48] = -1
struct[0].Gy[20,20] = -R_1521
struct[0].Gy[20,28] = 1
struct[0].Gy[20,29] = -1
struct[0].Gy[21,21] = -R_2531
struct[0].Gy[21,33] = 1
struct[0].Gy[21,34] = -1
struct[0].Gy[22,22] = -R_3541
struct[0].Gy[22,38] = 1
struct[0].Gy[22,39] = -1
struct[0].Gy[23,23] = -R_4551
struct[0].Gy[23,43] = 1
struct[0].Gy[23,44] = -1
struct[0].Gy[24,0] = -1
struct[0].Gy[24,49] = 1
struct[0].Gy[25,0] = 1
struct[0].Gy[25,1] = -1
struct[0].Gy[25,50] = 1
struct[0].Gy[26,1] = 1
struct[0].Gy[26,2] = -1
struct[0].Gy[26,26] = 1
struct[0].Gy[27,2] = 1
struct[0].Gy[27,3] = -1
struct[0].Gy[27,51] = 1
struct[0].Gy[28,3] = 1
struct[0].Gy[28,20] = -1
struct[0].Gy[28,52] = 1
struct[0].Gy[29,4] = -1
struct[0].Gy[29,20] = 1
struct[0].Gy[29,53] = 1
struct[0].Gy[30,4] = 1
struct[0].Gy[30,5] = -1
struct[0].Gy[30,54] = 1
struct[0].Gy[31,5] = 1
struct[0].Gy[31,6] = -1
struct[0].Gy[31,31] = 1
struct[0].Gy[32,6] = 1
struct[0].Gy[32,7] = -1
struct[0].Gy[32,55] = 1
struct[0].Gy[33,7] = 1
struct[0].Gy[33,21] = -1
struct[0].Gy[33,56] = 1
struct[0].Gy[34,8] = -1
struct[0].Gy[34,21] = 1
struct[0].Gy[34,57] = 1
struct[0].Gy[35,8] = 1
struct[0].Gy[35,9] = -1
struct[0].Gy[35,58] = 1
struct[0].Gy[36,9] = 1
struct[0].Gy[36,10] = -1
struct[0].Gy[36,36] = 1
struct[0].Gy[37,10] = 1
struct[0].Gy[37,11] = -1
struct[0].Gy[37,59] = 1
struct[0].Gy[38,11] = 1
struct[0].Gy[38,22] = -1
struct[0].Gy[38,60] = 1
struct[0].Gy[39,12] = -1
struct[0].Gy[39,22] = 1
struct[0].Gy[39,61] = 1
struct[0].Gy[40,12] = 1
struct[0].Gy[40,13] = -1
struct[0].Gy[40,62] = 1
struct[0].Gy[41,13] = 1
struct[0].Gy[41,14] = -1
struct[0].Gy[41,41] = 1
struct[0].Gy[42,14] = 1
struct[0].Gy[42,15] = -1
struct[0].Gy[42,63] = 1
struct[0].Gy[43,15] = 1
struct[0].Gy[43,23] = -1
struct[0].Gy[43,64] = 1
struct[0].Gy[44,16] = -1
struct[0].Gy[44,23] = 1
struct[0].Gy[44,65] = 1
struct[0].Gy[45,16] = 1
struct[0].Gy[45,17] = -1
struct[0].Gy[45,66] = 1
struct[0].Gy[46,17] = 1
struct[0].Gy[46,18] = -1
struct[0].Gy[46,46] = 1
struct[0].Gy[47,18] = 1
struct[0].Gy[47,19] = -1
struct[0].Gy[47,67] = 1
struct[0].Gy[48,19] = 1
struct[0].Gy[48,68] = 1
struct[0].Gy[49,24] = i_11
struct[0].Gy[49,49] = v_11
struct[0].Gy[50,25] = i_12
struct[0].Gy[50,50] = v_12
struct[0].Gy[51,27] = i_14
struct[0].Gy[51,51] = v_14
struct[0].Gy[52,28] = i_15
struct[0].Gy[52,52] = v_15
struct[0].Gy[53,29] = i_21
struct[0].Gy[53,53] = v_21
struct[0].Gy[54,30] = i_22
struct[0].Gy[54,54] = v_22
struct[0].Gy[55,32] = i_24
struct[0].Gy[55,55] = v_24
struct[0].Gy[56,33] = i_25
struct[0].Gy[56,56] = v_25
struct[0].Gy[57,34] = i_31
struct[0].Gy[57,57] = v_31
struct[0].Gy[58,35] = i_32
struct[0].Gy[58,58] = v_32
struct[0].Gy[59,37] = i_34
struct[0].Gy[59,59] = v_34
struct[0].Gy[60,38] = i_35
struct[0].Gy[60,60] = v_35
struct[0].Gy[61,39] = i_41
struct[0].Gy[61,61] = v_41
struct[0].Gy[62,40] = i_42
struct[0].Gy[62,62] = v_42
struct[0].Gy[63,42] = i_44
struct[0].Gy[63,63] = v_44
struct[0].Gy[64,43] = i_45
struct[0].Gy[64,64] = v_45
struct[0].Gy[65,44] = i_51
struct[0].Gy[65,65] = v_51
struct[0].Gy[66,45] = i_52
struct[0].Gy[66,66] = v_52
struct[0].Gy[67,47] = i_54
struct[0].Gy[67,67] = v_54
struct[0].Gy[68,48] = i_55
struct[0].Gy[68,68] = v_55
@numba.njit(cache=True)
def Piecewise(arg):
out = arg[0][1]
N = len(arg)
for it in range(N-1,-1,-1):
if arg[it][1]: out = arg[it][0]
return out
@numba.njit(cache=True)
def ITE(arg):
out = arg[0][1]
N = len(arg)
for it in range(N-1,-1,-1):
if arg[it][1]: out = arg[it][0]
return out
@numba.njit(cache=True)
def Abs(x):
return np.abs(x)
@numba.njit(cache=True)
def ini_dae_jacobian_numba(struct,x):
N_x = struct[0].N_x
N_y = struct[0].N_y
struct[0].x[:,0] = x[0:N_x]
struct[0].y_ini[:,0] = x[N_x:(N_x+N_y)]
ini(struct,10)
ini(struct,11)
for row,col in zip(struct[0].Fx_ini_rows,struct[0].Fx_ini_cols):
struct[0].Ac_ini[row,col] = struct[0].Fx_ini[row,col]
for row,col in zip(struct[0].Fy_ini_rows,struct[0].Fy_ini_cols):
struct[0].Ac_ini[row,col+N_x] = struct[0].Fy_ini[row,col]
for row,col in zip(struct[0].Gx_ini_rows,struct[0].Gx_ini_cols):
struct[0].Ac_ini[row+N_x,col] = struct[0].Gx_ini[row,col]
for row,col in zip(struct[0].Gy_ini_rows,struct[0].Gy_ini_cols):
struct[0].Ac_ini[row+N_x,col+N_x] = struct[0].Gy_ini[row,col]
@numba.njit(cache=True)
def ini_dae_problem(struct,x):
N_x = struct[0].N_x
N_y = struct[0].N_y
struct[0].x[:,0] = x[0:N_x]
struct[0].y_ini[:,0] = x[N_x:(N_x+N_y)]
ini(struct,2)
ini(struct,3)
struct[0].fg[:N_x,:] = struct[0].f[:]
struct[0].fg[N_x:,:] = struct[0].g[:]
@numba.njit(cache=True)
def ssate(struct,xy):
for it in range(100):
ini_dae_jacobian_numba(struct,xy[:,0])
ini_dae_problem(struct,xy[:,0])
xy[:] += np.linalg.solve(struct[0].Ac_ini,-struct[0].fg)
if np.max(np.abs(struct[0].fg[:,0]))<1e-8: break
N_x = struct[0].N_x
struct[0].x[:,0] = xy[:N_x,0]
struct[0].y_ini[:,0] = xy[N_x:,0]
return xy,it
@numba.njit(cache=True)
def daesolver(struct):
sin = np.sin
cos = np.cos
sqrt = np.sqrt
i = 0
Dt = struct[i].Dt
N_x = struct[i].N_x
N_y = struct[i].N_y
N_z = struct[i].N_z
decimation = struct[i].decimation
eye = np.eye(N_x)
t = struct[i].t
t_end = struct[i].t_end
if struct[i].it == 0:
run(t,struct, 1)
struct[i].it_store = 0
struct[i]['T'][0] = t
struct[i].X[0,:] = struct[i].x[:,0]
struct[i].Y[0,:] = struct[i].y_run[:,0]
struct[i].Z[0,:] = struct[i].h[:,0]
solver = struct[i].solvern
while t<t_end:
struct[i].it += 1
struct[i].t += Dt
t = struct[i].t
if solver == 5: # Teapezoidal DAE as in Milano's book
run(t,struct, 2)
run(t,struct, 3)
x = np.copy(struct[i].x[:])
y = np.copy(struct[i].y_run[:])
f = np.copy(struct[i].f[:])
g = np.copy(struct[i].g[:])
for iter in range(struct[i].imax):
run(t,struct, 2)
run(t,struct, 3)
run(t,struct,10)
run(t,struct,11)
x_i = struct[i].x[:]
y_i = struct[i].y_run[:]
f_i = struct[i].f[:]
g_i = struct[i].g[:]
F_x_i = struct[i].Fx[:,:]
F_y_i = struct[i].Fy[:,:]
G_x_i = struct[i].Gx[:,:]
G_y_i = struct[i].Gy[:,:]
A_c_i = np.vstack((np.hstack((eye-0.5*Dt*F_x_i, -0.5*Dt*F_y_i)),
np.hstack((G_x_i, G_y_i))))
f_n_i = x_i - x - 0.5*Dt*(f_i+f)
# print(t,iter,g_i)
Dxy_i = np.linalg.solve(-A_c_i,np.vstack((f_n_i,g_i)))
x_i = x_i + Dxy_i[0:N_x]
y_i = y_i + Dxy_i[N_x:(N_x+N_y)]
struct[i].x[:] = x_i
struct[i].y_run[:] = y_i
# [f_i,g_i,F_x_i,F_y_i,G_x_i,G_y_i] = smib_transient(x_i,y_i,u);
# A_c_i = [[eye(N_x)-0.5*Dt*F_x_i, -0.5*Dt*F_y_i],
# [ G_x_i, G_y_i]];
# f_n_i = x_i - x - 0.5*Dt*(f_i+f);
# Dxy_i = -A_c_i\[f_n_i.',g_i.'].';
# x_i = x_i + Dxy_i(1:N_x);
# y_i = y_i + Dxy_i(N_x+1:N_x+N_y);
xy = np.vstack((x_i,y_i))
max_relative = 0.0
for it_var in range(N_x+N_y):
abs_value = np.abs(xy[it_var,0])
if abs_value < 0.001:
abs_value = 0.001
relative_error = np.abs(Dxy_i[it_var,0])/abs_value
if relative_error > max_relative: max_relative = relative_error
if max_relative<struct[i].itol:
break
# if iter>struct[i].imax-2:
# print('Convergence problem')
struct[i].x[:] = x_i
struct[i].y_run[:] = y_i
# channels
if struct[i].store == 1:
it_store = struct[i].it_store
if struct[i].it >= it_store*decimation:
struct[i]['T'][it_store+1] = t
struct[i].X[it_store+1,:] = struct[i].x[:,0]
struct[i].Y[it_store+1,:] = struct[i].y_run[:,0]
struct[i].Z[it_store+1,:] = struct[i].h[:,0]
struct[i].iters[it_store+1,0] = iter
struct[i].it_store += 1
struct[i].t = t
return t
def nonzeros():
Fx_ini_rows = [0, 1, 2, 3, 4]
Fx_ini_cols = [0, 1, 2, 3, 4]
Fy_ini_rows = [0, 1, 2, 3, 4]
Fy_ini_cols = [26, 31, 36, 41, 46]
Gx_ini_rows = [1, 2, 5, 6, 9, 10, 13, 14, 17, 18]
Gx_ini_cols = [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]
Gy_ini_rows = [0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 18, 18, 19, 19, 19, 20, 20, 20, 21, 21, 21, 22, 22, 22, 23, 23, 23, 24, 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44, 44, 45, 45, 45, 46, 46, 46, 47, 47, 47, 48, 48, 49, 49, 50, 50, 51, 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68]
Gy_ini_cols = [0, 24, 25, 1, 25, 2, 27, 3, 27, 28, 4, 29, 30, 5, 30, 6, 32, 7, 32, 33, 8, 34, 35, 9, 35, 10, 37, 11, 37, 38, 12, 39, 40, 13, 40, 14, 42, 15, 42, 43, 16, 44, 45, 17, 45, 18, 47, 19, 47, 48, 20, 28, 29, 21, 33, 34, 22, 38, 39, 23, 43, 44, 0, 49, 0, 1, 50, 1, 2, 26, 2, 3, 51, 3, 20, 52, 4, 20, 53, 4, 5, 54, 5, 6, 31, 6, 7, 55, 7, 21, 56, 8, 21, 57, 8, 9, 58, 9, 10, 36, 10, 11, 59, 11, 22, 60, 12, 22, 61, 12, 13, 62, 13, 14, 41, 14, 15, 63, 15, 23, 64, 16, 23, 65, 16, 17, 66, 17, 18, 46, 18, 19, 67, 19, 68, 24, 49, 25, 50, 27, 51, 28, 52, 29, 53, 30, 54, 32, 55, 33, 56, 34, 57, 35, 58, 37, 59, 38, 60, 39, 61, 40, 62, 42, 63, 43, 64, 44, 65, 45, 66, 47, 67, 48, 68]
return Fx_ini_rows,Fx_ini_cols,Fy_ini_rows,Fy_ini_cols,Gx_ini_rows,Gx_ini_cols,Gy_ini_rows,Gy_ini_cols
| 36.103135
| 693
| 0.521014
| 17,051
| 87,514
| 2.43235
| 0.022403
| 0.251314
| 0.105681
| 0.070888
| 0.849231
| 0.812075
| 0.718354
| 0.675387
| 0.655929
| 0.632758
| 0
| 0.204855
| 0.299632
| 87,514
| 2,424
| 694
| 36.103135
| 0.471807
| 0.026659
| 0
| 0.628628
| 0
| 0
| 0.021399
| 0.002122
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02361
| false
| 0
| 0.001968
| 0.000984
| 0.040334
| 0.003935
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e06240a4f1be8493b22a993237277f93ea5d533b
| 249
|
py
|
Python
|
src/qtt/measurements/acquisition/__init__.py
|
codecrap/qtt
|
39a8bf21f7bcab94940a66f4d553a14bf34f82b0
|
[
"MIT"
] | 39
|
2018-09-13T14:14:56.000Z
|
2022-03-28T22:02:29.000Z
|
src/qtt/measurements/acquisition/__init__.py
|
codecrap/qtt
|
39a8bf21f7bcab94940a66f4d553a14bf34f82b0
|
[
"MIT"
] | 136
|
2018-08-30T19:38:22.000Z
|
2022-03-31T13:05:29.000Z
|
src/qtt/measurements/acquisition/__init__.py
|
codecrap/qtt
|
39a8bf21f7bcab94940a66f4d553a14bf34f82b0
|
[
"MIT"
] | 21
|
2018-11-04T09:00:02.000Z
|
2022-01-20T01:40:08.000Z
|
from qtt.measurements.acquisition.uhfli_scope_reader import UHFLIScopeReader
from qtt.measurements.acquisition.configuration_storage import load_configuration, save_configuration
from qtt.measurements.acquisition.uhfli_stimulus import UHFLIStimulus
| 62.25
| 101
| 0.907631
| 28
| 249
| 7.857143
| 0.535714
| 0.095455
| 0.259091
| 0.409091
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052209
| 249
| 3
| 102
| 83
| 0.932203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e072c0407eeed356bd3fa55107d2d087f6ef4a6a
| 4,472
|
py
|
Python
|
playgroud.py
|
BrancoLab/FC_analysis
|
7124a7d998275bce6f7a18c264399c7dabfd430b
|
[
"MIT"
] | 1
|
2018-08-20T14:47:09.000Z
|
2018-08-20T14:47:09.000Z
|
playgroud.py
|
BrancoLab/FC_analysis
|
7124a7d998275bce6f7a18c264399c7dabfd430b
|
[
"MIT"
] | null | null | null |
playgroud.py
|
BrancoLab/FC_analysis
|
7124a7d998275bce6f7a18c264399c7dabfd430b
|
[
"MIT"
] | 1
|
2018-09-24T15:58:57.000Z
|
2018-09-24T15:58:57.000Z
|
# %%
import os
import pandas as pd
import numpy as np
from fcutils.plotting.colors import colorMap
from analysis.misc.paths import cellfinder_cells_folder, cellfinder_out_dir, injections_folder
from analysis.anatomy.utils import *
# %%
import matplotlib.pyplot as plt
for i in range(100):
color = colorMap(i, name='YlOrBr', vmin=0, vmax=100)
# plt.scatter(0, i, c=color, s=20)
print(color)
# %%
# Merge highest projecting regions in a summary datafame
cell_files = dict(
# cc_136_0 = ('GRN', 'right', 'CC_136_0_ch0_cells.h5'),
# cc_136_1 = ('GRN', 'right', 'CC_136_1_ch0_cells.h5'),
cc_134_1 = ('SCm', 'left', 'CC_134_1_ch1_cells.h5'),
cc_134_2 = ('SCm', 'left', 'CC_134_2_ch1_cells.h5'),
)
data = {}
df = pd.DataFrame()
ipsidf, contradf = pd.DataFrame(), pd.DataFrame()
for mouse, (inj, hemi, path) in cell_files.items():
all_cells = pd.read_hdf(os.path.join(cellfinder_cells_folder, path), key='hdf')
all_cells = all_cells.loc[all_cells.region != inj]
n_cells = len(all_cells)
threshold = 2
ipsi = all_cells.loc[all_cells.hemisphere == hemi]
ipsi = (ipsi.groupby('region').count().sort_values('region_name')[::-1]/ n_cells) * 100
ipsi = ipsi.loc[ipsi.x > threshold].x.rename(f'{mouse}_{inj}_ipsi').round(2)
contra = all_cells.loc[all_cells.hemisphere != hemi]
contra = (contra.groupby('region').count().sort_values('region_name')[::-1]/ n_cells) * 100
contra = contra.loc[contra.x > threshold].x.rename(f'{mouse}_{inj}_contra').round(2)
df = pd.concat([df, ipsi, contra], axis=1).sort_index()
ipsidf = pd.concat([ipsidf, ipsi], axis=1).sort_index()
contradf = pd.concat([contradf, contra], axis=1).sort_index()
# print(df.to_markdown())
# %%
import networkx as nx
ipsi = ipsidf.sum(axis=1)/2
contra = contradf.sum(axis=1)/2
edges = []
regions = list(df.index)
for reg in regions:
# try:
# edges.append((f'{reg}_r', 'SC_r', {'weight':ipsi[reg]}))
# except:
# pass
try:
edges.append((f'{reg}_r', 'SC_l', {'weight':contra[reg]}))
except:
pass
# try:
# edges.append((f'{reg}_l', 'SC_r', {'weight':contra[reg]}))
# except:
# pass
try:
edges.append((f'{reg}_l', 'SC_l', {'weight':ipsi[reg]}))
except:
pass
# edges.append((f'{reg}_l', f'{reg}_r', {'weight':1}))
G=nx.Graph()
G.add_edges_from(edges)
nx.draw(G, with_labels=True, pos=nx.spring_layout(G))
# %%
cell_files = dict(
cc_136_0 = ('GRN', 'right', 'CC_136_0_ch0_cells.h5'),
cc_136_1 = ('GRN', 'right', 'CC_136_1_ch0_cells.h5'),
# cc_134_1 = ('SCm', 'left', 'CC_134_1_ch1_cells.h5'),
# cc_134_2 = ('SCm', 'left', 'CC_134_2_ch1_cells.h5'),
)
data = {}
df = pd.DataFrame()
ipsidf, contradf = pd.DataFrame(), pd.DataFrame()
for mouse, (inj, hemi, path) in cell_files.items():
all_cells = pd.read_hdf(os.path.join(cellfinder_cells_folder, path), key='hdf')
all_cells = all_cells.loc[all_cells.region != inj]
n_cells = len(all_cells)
ipsi = all_cells.loc[all_cells.hemisphere == hemi]
ipsi = (ipsi.groupby('region').count().sort_values('region_name')[::-1]/ n_cells) * 100
ipsi = ipsi.loc[ipsi.x > threshold].x.rename(f'{mouse}_{inj}_ipsi').round(2)
contra = all_cells.loc[all_cells.hemisphere != hemi]
contra = (contra.groupby('region').count().sort_values('region_name')[::-1]/ n_cells) * 100
contra = contra.loc[contra.x > threshold].x.rename(f'{mouse}_{inj}_contra').round(2)
df = pd.concat([df, ipsi, contra], axis=1).sort_index()
ipsidf = pd.concat([ipsidf, ipsi], axis=1).sort_index()
contradf = pd.concat([contradf, contra], axis=1).sort_index()
# %%
ipsi = ipsidf.sum(axis=1)/2
contra = contradf.sum(axis=1)/2
edges = []
regions = list(df.index)
for reg in regions:
try:
edges.append((f'{reg}_r', 'GRN_r', {'weight':ipsi[reg]}))
except:
pass
# try:
# edges.append((f'{reg}_r', 'GRN_l', {'weight':contra[reg]}))
# except:
# pass
try:
edges.append((f'{reg}_l', 'GRN_r', {'weight':contra[reg]}))
except:
pass
# try:
# edges.append((f'{reg}_l', 'GRN_l', {'weight':ipsi[reg]}))
# except:
# pass
# edges.append(('SC_r', 'SC_l', {'weight':1}))
# edges.append(('SC_l', 'GRN_r', {'weight':1}))
G.add_edges_from(edges)
# %%
nx.draw(G, with_labels=True, pos=nx.spring_layout(G))
# %%
| 25.849711
| 95
| 0.61695
| 672
| 4,472
| 3.897321
| 0.184524
| 0.054983
| 0.041237
| 0.051546
| 0.829706
| 0.827797
| 0.827797
| 0.823979
| 0.797251
| 0.797251
| 0
| 0.03533
| 0.189848
| 4,472
| 172
| 96
| 26
| 0.687552
| 0.185823
| 0
| 0.722892
| 0
| 0
| 0.094287
| 0.023295
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.048193
| 0.096386
| 0
| 0.096386
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.