hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fa6a8b00955b29c10ab5047333eefd37107d0882
| 20,933
|
py
|
Python
|
tests/test_makestr.py
|
wsmorgan/phonon-enumeration
|
5d7a8d8e3403cc387bdd58cf98a23e4751ea34dd
|
[
"MIT-0"
] | 5
|
2016-06-17T05:39:27.000Z
|
2021-05-30T21:02:08.000Z
|
tests/test_makestr.py
|
wsmorgan/phonon-enumeration
|
5d7a8d8e3403cc387bdd58cf98a23e4751ea34dd
|
[
"MIT-0"
] | 66
|
2016-04-02T05:02:08.000Z
|
2018-07-05T19:43:09.000Z
|
tests/test_makestr.py
|
wsmorgan/phonon-enumeration
|
5d7a8d8e3403cc387bdd58cf98a23e4751ea34dd
|
[
"MIT-0"
] | 5
|
2017-03-15T21:28:44.000Z
|
2020-01-09T14:44:45.000Z
|
"""Tests of the makeStr.py module."""
import unittest as ut
import os
import pytest
def get_sargs(args):
"""Returns the list of arguments parsed from sys.argv.
"""
import sys
sys.argv = args
from phenum.phenumStr import _parser_options
return _parser_options()
def test_examples():
"""Makes sure the script examples work properly.
"""
argv = ["py.test", "-examples"]
assert get_sargs(argv) is None
class TestMakeStructures(ut.TestCase):
"""Tests of the _make_structures subroutine."""
def _compare_files(self,file1,file2):
out1 = []
out2 = []
with open(file1,"r") as o1:
for line in o1:
out1.append(line.strip().split())
with open(file2,"r") as o2:
for line in o2:
out2.append(line.strip().split())
self.assertEqual(out1,out2)
def test_str1(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[10],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/sc_1/enum.out_100",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/sc_1/vasp.000010")
system("rm vasp*")
def test_str2(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[20],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/sc_1/enum.out_100",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/sc_1/vasp.000020")
system("rm vasp*")
def test_str3(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[33],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/sc_1/enum.out_100",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/sc_1/vasp.000033")
system("rm vasp*")
def test_str4(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[1],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_1/vasp.000001")
system("rm vasp*")
def test_str5(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[55],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_1/vasp.000055")
system("rm vasp*")
def test_str6(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[50],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_1/vasp.000050")
system("rm vasp*")
def test_str7(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[88],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_100_p2",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_1/vasp.000088")
system("rm vasp*")
def test_str8(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[1],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_2/enum.out_3_4",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_2/vasp.000001")
system("rm vasp*")
def test_str9(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[2],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_2/enum.out_3_4",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_2/vasp.000002")
system("rm vasp*")
def test_str10(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[3],
"debug":False,
"examples":False,
"displace":0.1,
"input":"tests/enumeration/fcc_2/enum.out_3_4",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_2/vasp.000003")
system("rm vasp*")
def test_str11(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[3],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_2/enum.out_3_4",
"mink":True,
"species":['Ni','Al','Cu'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_2/vasp.3.NiAlCu")
system("rm vasp*")
def test_str12(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[3],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_2/enum.out_3_4",
"mink":True,
"species":['Co','W','V'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_2/vasp.3.CoWV")
system("rm vasp*")
def test_str13(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[3],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":['Ti','S'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_1/vasp.3.TiS")
system("rm vasp*")
def test_str14(self):
from phenum.phenumStr import _make_structures
from os import system
args = {"structures":[3],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_1/vasp.3.HPt")
system("rm vasp*")
def test_str15(self):
from phenum.phenumStr import run
from os import system
args = {"structures":[3],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
run(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/fcc_1/vasp.3.HPt")
system("rm vasp*")
def test_str16(self):
from phenum.phenumStr import run
from os import system
args = {"structures": None,
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
with pytest.raises(ValueError):
run(args)
def test_str17(self):
from phenum.phenumStr import run
from os import system
args = {"structures": ['bite'],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
with pytest.raises(ValueError):
run(args)
def test_str18(self):
from phenum.phenumStr import run
from os import system
args = {"structures": None,
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
with pytest.raises(ValueError):
run(args)
def test_str19(self):
from phenum.phenumStr import run
from os import system
args = {"structures":['all'],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
run(args)
system("rm vasp*")
def test_str20(self):
from phenum.phenumStr import run
from os import system
args = {"structures":['1','3'],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/fcc_1/enum.out_2_6",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":True
}
run(args)
system("rm vasp*")
def test_str21(self):
from phenum.phenumStr import run
from os import system
args = {"structures":['1','175'],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/hcp_1/enum.out_1_4",
"mink":True,
"species":None,
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":"t"
}
run(args)
for i in [1,45,90,175]:
self._compare_files("vasp.{}".format(i),"tests/enumeration/hcp_1/vasp.{}.fin".format(i))
system("rm vasp*")
def test_str22(self):
from phenum.phenumStr import run
from os import system
args = {"structures":[1],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/sc_1/enum.out_100",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":False
}
run(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/sc_1/vasp.keep_zeros")
system("rm vasp*")
def test_str23(self):
from phenum.phenumStr import run
from os import system
args = {"structures":[1],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/sc_1/enum.out_100",
"mink":True,
"species":['H','Pt'],
"verbose":None,
"outfile":"vasp.{}",
"rattle":0.0,
"species_mapping":None,
"config":"f",
"remove_zeros":"t"
}
run(args)
self._compare_files("vasp.{}".format(args["structures"][0]),"tests/enumeration/sc_1/vasp.rm_zeros")
system("rm vasp*")
def test_str24(self):
from phenum.phenumStr import run
from os import system
args = None
with pytest.raises(ValueError):
run(args)
class TestMakeConfig(ut.TestCase):
"""Tests of the _make_structures subroutine make Config files."""
def _compare_files(self,file1,file2):
out1 = []
out2 = []
with open(file1,"r") as o1:
for line in o1:
out1.append(line.strip().split())
with open(file2,"r") as o2:
for line in o2:
out2.append(line.strip().split())
self.assertEqual(out1,out2)
def test_str1(self):
from phenum.phenumStr import _make_structures
from os import system
if os.path.isfile("to-relax.cfg"):
system("rm to-relax.cfg")
args = {"structures":[1,2],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/sc_1/enum.out_100",
"mink":True,
"species":None,
"verbose":None,
"outfile":"to-relax.cfg",
"rattle":0.0,
"mapping":None,
"config":"t",
"remove_zeros":True
}
_make_structures(args)
self._compare_files("to-relax.cfg","tests/enumeration/sc_1/to-relax.cfg_1")
system("rm to-relax.cfg")
def test_str2(self):
from phenum.phenumStr import run
from os import system
if os.path.isfile("to-relax.cfg"):
system("rm to-relax.cfg")
args = {"structures":[1,2],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/sc_1/enum.out_100",
"mink":True,
"species":["Al","Cu","Ni"],
"verbose":None,
"outfile":"to-relax.cfg",
"rattle":0.0,
"species_mapping":{0:1,1:2},
"config":"t",
"remove_zeros":True
}
run(args)
self._compare_files("to-relax.cfg","tests/enumeration/sc_1/to-relax.cfg_2")
system("rm to-relax.cfg")
def test_str3(self):
from phenum.phenumStr import run
from os import system
if os.path.isfile("to-relax.cfg"):
system("rm to-relax.cfg")
args = {"structures":[1,2],
"debug":False,
"examples":False,
"displace":0.0,
"input":"tests/enumeration/sc_1/enum.out_sc_config",
"mink":True,
"species":["Al","Cu","Ni"],
"verbose":None,
"outfile":"to-relax.cfg",
"rattle":0.0,
"species_mapping":{0:1,1:2},
"config":"t",
"remove_zeros":True
}
run(args)
self._compare_files("to-relax.cfg","tests/enumeration/sc_1/to-relax.cfg_3")
system("rm to-relax.cfg")
| 34.657285
| 109
| 0.46324
| 2,055
| 20,933
| 4.576642
| 0.081752
| 0.010845
| 0.058586
| 0.074429
| 0.9311
| 0.902924
| 0.894312
| 0.885167
| 0.871132
| 0.866135
| 0
| 0.032836
| 0.394783
| 20,933
| 603
| 110
| 34.71476
| 0.709527
| 0.011513
| 0
| 0.819964
| 0
| 0
| 0.251524
| 0.081196
| 0
| 0
| 0
| 0
| 0.005348
| 1
| 0.055258
| false
| 0
| 0.105169
| 0
| 0.165775
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d70f77d99f9ea37f52c256bba45f483483af1338
| 134
|
py
|
Python
|
geminidr/niri/__init__.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 19
|
2017-10-23T14:52:51.000Z
|
2022-03-28T04:49:00.000Z
|
geminidr/niri/__init__.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 194
|
2017-11-01T17:32:45.000Z
|
2022-03-31T21:32:59.000Z
|
geminidr/niri/__init__.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 16
|
2017-11-01T05:18:04.000Z
|
2021-12-14T23:08:57.000Z
|
from . import parameters_niri
from . import parameters_niri_image
from . import primitives_niri
from . import primitives_niri_image
| 19.142857
| 35
| 0.835821
| 18
| 134
| 5.888889
| 0.333333
| 0.377358
| 0.377358
| 0.45283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134328
| 134
| 6
| 36
| 22.333333
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
d757ac048d12bef53f909df921bc7b86db76147b
| 78,136
|
py
|
Python
|
ofspy/operations_lps.py
|
ehsanfar/ofspy_v2
|
6eedfec4bb36c48473abfd473941c5d3b34590b6
|
[
"Apache-2.0"
] | null | null | null |
ofspy/operations_lps.py
|
ehsanfar/ofspy_v2
|
6eedfec4bb36c48473abfd473941c5d3b34590b6
|
[
"Apache-2.0"
] | null | null | null |
ofspy/operations_lps.py
|
ehsanfar/ofspy_v2
|
6eedfec4bb36c48473abfd473941c5d3b34590b6
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Operations implementation for lpsolve module.
"""
import random
import logging
from .operations import Operations
from .lp_solve import LinearProgram
from .lp_solve import Row
class DynamicOperations(Operations):
def __init__(self, planningHorizon=6, storagePenalty=-100, islPenalty=-10):
"""
@param planningHorizon: the planning horizon
@type planningHorizon: L{int}
@param storagePenalty: the storage opportuntiy cost
@type storagePenalty: L{float}
@param islPenalty: the ISL opportuntiy cost
@type islPenalty: L{float}
"""
super(DynamicOperations, self).__init__()
self.planningHorizon = planningHorizon
self.storagePenalty = storagePenalty
self.islPenalty = islPenalty
def execute(self, controller, context):
"""
Executes this operations model.
@param controller: the controller for this operations model
@type controller: L{Entity}
@param context: the context of operations
@type context: L{Context}
"""
minTime = context.time
maxTime = (context.time + self.planningHorizon
if context.maxTime is None else
min(context.maxTime, context.time + self.planningHorizon))
with LinearProgram(name='OFS LP for {}'.format(controller.name)) as lp:
S = [] # S[i][j]: satellite i senses demand j
E_d = [] # E_d[t][i][j]: at time t satellite i holds data for demand j
E_c0 = [] # E_c0[i][j]: satellite i initially holds data for contract j
E_c = [] # E_c[t][i][j]: at time t satellite i holds data for contract j
T_d = [] # T_d[t][i][j][k][l]: at time t transmit data from satellite i to ground station j using protocol k for demand l
T_c = [] # T_c[t][i][j][k][l]: at time t transmit data from satellite i to ground station j using protocol k for contract l
L_d = [] # L_d[t][i][j][k][l]: at time t transmit data from isl satellite i to isl satellite j using protocol k for demand l
L_c = [] # L_c[t][i][j][k][l]: at time t transmit data from isl satellite i to isl satellite j using protocol k for contract l
R_d = [] # R_d[t][i][j]: at time t resolve data in system i for demand j
R_c = [] # R_c[t][i][j]: at time t resolve data in system i for contract j
J = Row() # objective function
demands = [e for e in context.currentEvents if e.isDemand()]
elements = controller.getElements()
federates = controller.getFederates()
satellites = [e for e in elements if e.isSpace()]
satellitesISL = [e for e in satellites
if any(m.isTransceiver() and m.isISL() for m in e.modules)]
stations = [e for e in elements if e.isGround()]
contracts = controller.getContracts()
protocolsSGL = list(set([m.protocol for e in elements
for m in e.modules if m.isTransceiver() and m.isSGL()]))
protocolsISL = list(set([m.protocol for e in elements
for m in e.modules if m.isTransceiver() and m.isISL()]))
phenomena = ['VIS','SAR',None]
for i, satellite in enumerate(satellites):
S.insert(i, [])
for j, demand in enumerate(demands):
# satellite i senses data for demand j
S[i].insert(j, lp.addColumn('{}-S-{}'.format(
satellite.name, demand.name), isBinary=True))
# constrain sensing per satellite
lp.addConstraint(Row().add(S[i][j], 1), 'LE',
1 if satellite.canSense(demand) else 0,
'{} can sense {}'.format(satellite.name,
demand.name))
for phenomenon in phenomena:
r = Row()
for j, demand in enumerate(demands):
if phenomenon is None or demand.phenomenon == phenomenon:
r.add(S[i][j], demand.size)
# constrain maximum data sensed by satellite
lp.addConstraint(r, 'LE',
min(satellite.getMaxSensed(phenomenon)
- satellite.getSensed(phenomenon),
satellite.getMaxStored(phenomenon)
- satellite.getStored(phenomenon)),
'{} max sense {}'.format(satellite.name,
phenomenon))
# set initial data stored
E_c0.insert(i, [])
for j, contract in enumerate(contracts):
E_c0[i].insert(j, 1 if any(d.contract is contract
for m in satellite.modules
for d in m.data) else 0)
for j, demand in enumerate(demands):
r = Row()
for i, satellite in enumerate(satellites):
r.add(S[i][j], 1)
lp.addConstraint(r, 'LE', 1, '{} max sensed'.format(demand.name))
for t, time in enumerate(range(minTime, maxTime+1)):
E_d.insert(t, [])
E_c.insert(t, [])
for i, satellite in enumerate(satellites):
E_d[t].insert(i, [])
E_c[t].insert(i, [])
for j, demand in enumerate(demands):
# satellite i stores data for new contract j
E_d[t][i].insert(j, lp.addColumn('{}-E-{}@{}'.format(
satellite.name, demand.name, time), isBinary=True))
# penalty for opportunity cost
J.add(E_d[t][i][j], demand.size*(self.storagePenalty
if self.storagePenalty is not None
else self.getStoragePenalty(satellite, context)))
for j, contract in enumerate(contracts):
# satellite i stores data for contract j
E_c[t][i].insert(j, lp.addColumn('{}-E-{}@{}'.format(
satellite.name, contract.name, time), isBinary=True))
# penalty for opportunity cost
J.add(E_c[t][i][j], contract.demand.size*(self.storagePenalty
if self.storagePenalty is not None
else self.getStoragePenalty(satellite, context)))
for phenomenon in phenomena:
r = Row()
for j, demand in enumerate(demands):
if phenomenon is None or demand.phenomenon == phenomenon:
r.add(E_d[t][i][j], demand.size)
for j, contract in enumerate(contracts):
if phenomenon is None or contract.demand.phenomenon == phenomenon:
r.add(E_c[t][i][j], contract.demand.size)
# constrain data stored in satellite
lp.addConstraint(r, 'LE', satellite.getMaxStored(phenomenon),
'{} max store {} at {}'.format(
satellite.name, phenomenon, time))
T_d.insert(t, [])
T_c.insert(t, [])
for i, satellite in enumerate(satellites):
T_d[t].insert(i, [])
T_c[t].insert(i, [])
txLocation = context.propagate(satellite.location, time-context.time)
for j, station in enumerate(stations):
T_d[t][i].insert(j, [])
T_c[t][i].insert(j, [])
rxLocation = context.propagate(station.location, time-context.time)
for k, protocol in enumerate(protocolsSGL):
T_d[t][i][j].insert(k, [])
T_c[t][i][j].insert(k, [])
r = Row()
maxSize = 0
for l, demand in enumerate(demands):
T_d[t][i][j][k].insert(l, lp.addColumn(
'{}-T({}/{})-{}@{}'.format(
satellite.name, demand.name,
protocol, station.name, time),
isBinary=True))
r.add(T_d[t][i][j][k][l], demand.size)
maxSize = max(maxSize, demand.size
if controller.couldTransport(
protocol, demand.generateData(),
satellite, station,
txLocation, rxLocation, context)
and not demand.isDefaultedAt(
time-context.time)
else 0)
for l, contract in enumerate(contracts):
T_c[t][i][j][k].insert(l, lp.addColumn(
'{}-T({}/{})-{}@{}'.format(
satellite.name, contract.name,
protocol, station.name, time),
isBinary=True))
r.add(T_c[t][i][j][k][l], contract.demand.size)
maxSize = max(maxSize, contract.demand.size
if controller.couldTransport(
protocol, contract.demand.generateData(),
satellite, station,
txLocation, rxLocation, context)
and not contract.demand.isDefaultedAt(
contract.elapsedTime+time-context.time)
else 0)
# constrain transmission by visibility
lp.addConstraint(r, 'LE', maxSize, '{}-{} visibility {} at {}'
.format(satellite.name, station.name, protocol, time))
for i, satellite in enumerate(satellites):
for k, protocol in enumerate(protocolsSGL):
r = Row()
for j, station in enumerate(stations):
for l, demand in enumerate(demands):
r.add(T_d[t][i][j][k][l], demand.size)
for l, contract in enumerate(contracts):
r.add(T_c[t][i][j][k][l], contract.demand.size)
# constrain data transmitted by satellite
lp.addConstraint(r, 'LE', satellite.getMaxTransmitted(protocol)
- (satellite.getTransmitted(protocol)
if time == minTime else 0),
'{} max transmit {} at {}'
.format(satellite.name, protocol, time))
for j, station in enumerate(stations):
for k, protocol in enumerate(protocolsSGL):
r = Row()
for i, satellite in enumerate(satellites):
for l, demand in enumerate(demands):
r.add(T_d[t][i][j][k][l], demand.size)
for l, contract in enumerate(contracts):
r.add(T_c[t][i][j][k][l], contract.demand.size)
# constrain data received by station
lp.addConstraint(r, 'LE', station.getMaxReceived(protocol)
- (station.getReceived(protocol)
if time == minTime else 0),
'{} max receive {} at {}'
.format(station.name, protocol, time))
L_d.insert(t, [])
L_c.insert(t, [])
for i, txSatellite in enumerate(satellitesISL):
L_d[t].insert(i, [])
L_c[t].insert(i, [])
txLocation = context.propagate(txSatellite.location, time-context.time)
for j, rxSatellite in enumerate(satellitesISL):
L_d[t][i].insert(j, [])
L_c[t][i].insert(j, [])
rxLocation = context.propagate(rxSatellite.location, time-context.time)
for k, protocol in enumerate(protocolsISL):
L_d[t][i][j].insert(k, [])
L_c[t][i][j].insert(k, [])
r = Row()
maxSize = 0
for l, demand in enumerate(demands):
L_d[t][i][j][k].insert(l, lp.addColumn(
'{}-T({}/{})-{}@{}'.format(
txSatellite.name, demand.name,
protocol, rxSatellite.name, time),
isBinary=True))
# small penalty for opportunity cost
J.add(L_d[t][i][j][k][l], self.islPenalty*demand.size)
r.add(L_d[t][i][j][k][l], demand.size)
maxSize = max(maxSize, demand.size
if controller.couldTransport(
protocol, demand.generateData(),
txSatellite, rxSatellite,
txLocation, rxLocation, context)
and not demand.isDefaultedAt(
time-context.time)
else 0)
for l, contract in enumerate(contracts):
L_c[t][i][j][k].insert(l, lp.addColumn(
'{}-T({}/{})-{}@{}'.format(
txSatellite.name, contract.name,
protocol, rxSatellite.name, time),
isBinary=True))
# small penalty for opportunity cost
J.add(L_c[t][i][j][k][l], self.islPenalty*contract.demand.size)
r.add(L_c[t][i][j][k][l], contract.demand.size)
maxSize = max(maxSize, contract.demand.size
if controller.couldTransport(
protocol, contract.demand.generateData(),
txSatellite, rxSatellite,
txLocation, rxLocation, context)
and not contract.demand.isDefaultedAt(
contract.elapsedTime+time-context.time)
else 0)
# constrain transmission by visibility
lp.addConstraint(r, 'LE', maxSize, '{}-{} visibility {} at {}'
.format(txSatellite.name, rxSatellite.name,
protocol, time))
for i, txSatellite in enumerate(satellitesISL):
for k, protocol in enumerate(protocolsISL):
r = Row()
for j, rxSatellite in enumerate(satellitesISL):
for l, demand in enumerate(demands):
r.add(L_d[t][i][j][k][l], demand.size)
for l, contract in enumerate(contracts):
r.add(L_c[t][i][j][k][l], contract.demand.size)
# constrain data transmitted by satellite
lp.addConstraint(r, 'LE', txSatellite.getMaxTransmitted(protocol)
- (txSatellite.getTransmitted(protocol)
if time == minTime else 0),
'{} max transmit {} at {}'
.format(txSatellite.name, protocol, time))
for j, rxSatellite in enumerate(satellitesISL):
for k, protocol in enumerate(protocolsISL):
r = Row()
for i, txSatellite in enumerate(satellitesISL):
for l, demand in enumerate(demands):
r.add(L_d[t][i][j][k][l], demand.size)
for l, contract in enumerate(contracts):
r.add(L_c[t][i][j][k][l], contract.demand.size)
# constrain data received by station
lp.addConstraint(r, 'LE', rxSatellite.getMaxReceived(protocol)
- (rxSatellite.getReceived(protocol)
if time == minTime else 0),
'{} max receive {} at {}'
.format(rxSatellite.name, protocol, time))
R_d.insert(t, [])
R_c.insert(t, [])
for i, element in enumerate(elements):
location = context.propagate(element.location, time-context.time)
R_d[t].insert(i, [])
R_c[t].insert(i, [])
for j, demand in enumerate(demands):
R_d[t][i].insert(j, lp.addColumn('{}-R-{}@{}'.format(
element.name, demand.name, time), isBinary=True))
J.add(R_d[t][i][j], demand.getValueAt(time-context.time)
if demand.isCompletedAt(location)
else demand.getDefaultValue())
for j, contract in enumerate(contracts):
R_c[t][i].insert(j, lp.addColumn('{}-R-{}@{}'.format(
element.name, contract.name, time), isBinary=True))
J.add(R_c[t][i][j], contract.demand.getValueAt(
contract.elapsedTime + time-context.time)
if contract.demand.isCompletedAt(location)
else contract.demand.getDefaultValue())
for i, satellite in enumerate(satellites):
R_i = elements.index(satellite)
for j, demand in enumerate(demands):
r = Row()
if time==minTime:
r.add(S[i][j], 1)
else:
r.add(E_d[t-1][i][j],1)
r.add(E_d[t][i][j],-1)
r.add(R_d[t][R_i][j],-1)
for k, station in enumerate(stations):
for l, protocol in enumerate(protocolsSGL):
r.add(T_d[t][i][k][l][j],-1)
if satellite in satellitesISL:
isl_i = satellitesISL.index(satellite)
for k, rxSatellite in enumerate(satellitesISL):
for l, protocol in enumerate(protocolsISL):
r.add(L_d[t][isl_i][k][l][j],-1)
r.add(L_d[t][k][isl_i][l][j],1)
# constrain net flow of new contracts at each satellite
lp.addConstraint(r, 'EQ', 0, '{} net flow {} at {}'
.format(satellite.name, demand.name, time))
for j, contract in enumerate(contracts):
r = Row()
if time==minTime:
# existing contracts are initial conditions
pass
else:
r.add(E_c[t-1][i][j],1)
r.add(E_c[t][i][j],-1)
r.add(R_c[t][R_i][j],-1)
for k, station in enumerate(stations):
for l, protocol in enumerate(protocolsSGL):
r.add(T_c[t][i][k][l][j],-1)
if satellite in satellitesISL:
isl_i = satellitesISL.index(satellite)
for k, rxSatellite in enumerate(satellitesISL):
for l, protocol in enumerate(protocolsISL):
r.add(L_c[t][isl_i][k][l][j],-1)
r.add(L_c[t][k][isl_i][l][j],1)
# constrain net flow of contracts at each satellite
lp.addConstraint(r, 'EQ', -1*(E_c0[i][j] if time == minTime else 0),
'{} net flow {} at {}'
.format(satellite.name, contract.name, time))
if time+1 > maxTime and self.planningHorizon > 0:
for i, satellite in enumerate(satellites):
r = Row()
for j, demand in enumerate(demands):
r.add(E_d[t][i][j],1)
for j, contract in enumerate(contracts):
r.add(E_c[t][i][j],1)
# constrain boundary flow of each satellite
lp.addConstraint(r, 'EQ', 0, '{} boundary flow'
.format(satellite.name))
for k, station in enumerate(stations):
R_k = elements.index(station)
for j, demand in enumerate(demands):
r = Row()
r.add(R_d[t][R_k][j],-1)
for i, satellite in enumerate(satellites):
for l, protocol in enumerate(protocolsSGL):
r.add(T_d[t][i][k][l][j],1)
# constrain net flow of new contracts at each station
lp.addConstraint(r, 'EQ', 0, '{} net flow {} at {}'
.format(station.name, demand.name, time))
for j, contract in enumerate(contracts):
r = Row()
r.add(R_c[t][R_k][j],-1)
for i, satellite in enumerate(satellites):
for l, protocol in enumerate(protocolsSGL):
r.add(T_c[t][i][k][l][j],1)
# constrain net flow of contracts at each station
lp.addConstraint(r, 'EQ', 0, '{} net flow {} at {}'
.format(station.name, contract.name, time))
for federate in federates:
r = Row()
for j, demand in enumerate(demands):
if federate.canContract(demand, context): # TODO does not consider priority
for i, element in enumerate(elements):
location = context.propagate(element.location, time-context.time)
r.add(R_d[0][i][j], (demand.getValueAt(0)
if demand.isCompletedAt(location)
else demand.getDefaultValue()))
for j, contract in enumerate(contracts):
if contract in federate.contracts:
for i, element in enumerate(elements):
location = context.propagate(element.location, time-context.time)
r.add(R_c[0][i][j], (contract.demand.getValueAt(contract.elapsedTime)
if contract.demand.isCompletedAt(location)
else contract.demand.getDefaultValue()))
lp.addConstraint(r, 'GE', -1 - federate.cash,
'{} net cash must be positive'
.format(federate.name))
lp.setObjective(J, False)
code, description = lp.solve()
if code > 1:
logging.warning(description)
with open('lp_debug.txt', 'w+') as f:
f.write(lp.dumpProgram())
else:
""" debug code
with open('lp_solution_{}_{}.txt'.format(
controller.name, context.time), 'w+') as f:
f.write(lp.dumpSolution())
with open('lp_program_{}_{}.txt'.format(
controller.name, context.time), 'w+') as f:
f.write(lp.dumpProgram())
"""
def _transportContract(operations, satellite, contract, context):
i = satellites.index(satellite)
R_i = elements.index(satellite)
j = contracts.index(contract)
data = context.getData(contract)
if data is not None:
if lp.get(R_c[0][R_i][j]) > 0:
controller.resolve(contract, context)
elif lp.get(E_c[0][i][j]) > 0:
satellite.store(data)
elif any(any(lp.get(T_c[0][i][k][l][j])
for k, station in enumerate(stations))
for l, protocol in enumerate(protocolsSGL)):
for k, station in enumerate(stations):
for l, protocol in enumerate(protocolsSGL):
if(lp.get(T_c[0][i][k][l][j])):
controller.transport(protocol, data,
satellite, station, context)
controller.resolve(contract, context)
elif satellite in satellitesISL:
isl_i = satellitesISL.index(satellite)
for k, rxSatellite in enumerate(satellitesISL):
for l, protocol in enumerate(protocolsISL):
if(lp.get(L_c[0][isl_i][k][l][j])):
controller.transport(protocol, data,
satellite, rxSatellite, context)
_transportContract(operations, rxSatellite,
contract, context)
def _transportDemand(operations, satellite, demand, context):
i = satellites.index(satellite)
R_i = elements.index(satellite)
j = demands.index(demand)
contract = context.getContract(demand)
data = context.getData(contract)
if contract is not None and data is not None:
if lp.get(R_d[0][R_i][j]) > 0:
controller.resolve(contract, context)
elif lp.get(E_d[0][i][j]) > 0:
satellite.store(data)
elif any(any(lp.get(T_d[0][i][k][l][j])
for k, station in enumerate(stations))
for l, protocol in enumerate(protocolsSGL)):
for k, station in enumerate(stations):
for l, protocol in enumerate(protocolsSGL):
if(lp.get(T_d[0][i][k][l][j])):
controller.transport(protocol, data,
satellite, station, context)
controller.resolve(contract, context)
elif satellite in satellitesISL:
isl_i = satellitesISL.index(satellite)
for k, rxSatellite in enumerate(satellitesISL):
for l, protocol in enumerate(protocolsISL):
if(lp.get(L_d[0][isl_i][k][l][j])):
controller.transport(protocol, data,
satellite, rxSatellite, context)
_transportDemand(operations, rxSatellite,
demand, context)
# first, transport contracts to resolution
for j, contract in enumerate(contracts):
if any(lp.get(R_c[0][i][j]) > 0
for i, element in enumerate(elements)):
logging.debug('Transporting contract {} for resolution...'
.format(contract.name))
satellite = context.getDataElement(contract)
_transportContract(self, satellite, contract, context)
# second, sense and transport demands to resolution
for j, demand in enumerate(demands):
if any(lp.get(R_d[0][i][j]) > 0
for i, element in enumerate(elements)):
logging.debug('Sensing and transporting demand {} for resolution...'
.format(demand.name))
satellite = next(e for i, e in enumerate(satellites)
if lp.get(S[i][j]) > 0)
contract = controller.contract(demand, context)
controller.senseAndStore(contract, satellite, context)
_transportDemand(self, satellite, demand, context)
# third, sense all demands to be stored
for j, demand in enumerate(demands):
if (all(lp.get(R_d[0][i][j]) < 1
for i, element in enumerate(elements))
and any(lp.get(S[i][j]) > 0
for i, element in enumerate(satellites))):
logging.debug('Sensing demand {} for storage...'
.format(demand.name))
satellite = next(e for i, e in enumerate(satellites)
if lp.get(S[i][j]) > 0)
contract = controller.contract(demand, context)
controller.senseAndStore(contract, satellite, context)
# fourth, transport demands to storage
for j, demand in enumerate(demands):
if (all(lp.get(R_d[0][i][j]) < 1
for i, element in enumerate(elements))
and any(lp.get(S[i][j]) > 0
for i, element in enumerate(satellites))):
logging.debug('Transporting demand {} for storage...'
.format(demand.name))
satellite = next(e for i, e in enumerate(satellites)
if lp.get(S[i][j]) > 0)
_transportDemand(self, satellite, demand, context)
# finally, transport contracts to storage
for j, contract in enumerate(contracts):
if all(lp.get(R_c[0][i][j]) < 1
for i, element in enumerate(elements)):
logging.debug('Transporting contract {} for storage...'
.format(contract.name))
satellite = context.getDataElement(contract)
_transportContract(self, satellite, contract, context)
class FixedCostDynamicOperations(DynamicOperations):
def __init__(self, planningHorizon=6, storagePenalty=-100,
islPenalty=-10, costSGL=50, costISL=20):
"""
@param planningHorizon: the planning horizon
@type planningHorizon: L{int}
@param storagePenalty: the storage opportuntiy cost
@type storagePenalty: L{float}
@param islPenalty: the ISL opportuntiy cost
@type islPenalty: L{float}
@param costSGL: the cost to use SGL
@type costSGL: L{float}
@param costISL: the cost to use ISL
@type costISL: L{float}
"""
super(FixedCostDynamicOperations, self).__init__(
planningHorizon, storagePenalty, islPenalty)
self.costSGL = costSGL
self.costISL = costISL
def execute(self, controller, context):
"""
Executes this operations model.
@param controller: the controller for this operations model
@type controller: L{Entity}
@param context: the context of operations
@type context: L{Context}
"""
minTime = context.time
maxTime = (context.time + self.planningHorizon
if context.maxTime is None else
min(context.maxTime, context.time + self.planningHorizon))
allElements = controller.getElements()
allSatellites = [e for e in allElements if e.isSpace()]
allSatellitesISL = [e for e in allSatellites
if any(m.isTransceiver() and m.isISL()
for m in e.modules)]
allStations = [e for e in allElements if e.isGround()]
allContracts = controller.getContracts()
protocolsSGL = list(set([m.protocol for e in allElements
for m in e.modules
if m.isTransceiver()
and m.isSGL()]))
protocolsISL = list(set([m.protocol for e in allElements
for m in e.modules
if m.isTransceiver()
and m.isISL()]))
phenomena = ['VIS','SAR',None]
federates = controller.getFederates()
random.shuffle(federates, context.orderStream.random)
for federate in federates:
with LinearProgram(name='OFS LP for {}'.format(controller.name)) as lp:
S = [] # S[i][j]: own satellite i senses demand j
E_d = [] # E_d[t][i][j]: at time t own satellite i holds data for demand j
E_c0 = [] # E_c0[i][j]: own satellite i initially holds data for own contract j
E_c = [] # E_c[t][i][j]: at time t own satellite i holds data for own contract j
T_d = [] # T_d[t][i][j][k][l]: at time t transmit data from satellite i to ground station j using protocol k for demand l
T_c = [] # T_c[t][i][j][k][l]: at time t transmit data from satellite i to ground station j using protocol k for contract l
L_d = [] # L_d[t][i][j][k][l]: at time t transmit data from isl satellite i to isl satellite j using protocol k for demand l
L_c = [] # L_c[t][i][j][k][l]: at time t transmit data from isl satellite i to isl satellite j using protocol k for contract l
R_d = [] # R_d[t][i][j]: at time t resolve data in system i for demand j
R_c = [] # R_c[t][i][j]: at time t resolve data in system i for contract j
J = Row() # objective function
demands = [e for e in context.currentEvents if e.isDemand()]
ownElements = [e for e in controller.getElements()
if e in federate.elements]
ownSatellites = [e for e in ownElements if e.isSpace()]
ownSatellitesISL = [e for e in ownSatellites
if any(m.isTransceiver()
and m.isISL()
for m in e.modules)]
ownStations = [e for e in ownElements if e.isGround()]
ownContracts = [c for c in controller.getContracts()
if c in federate.contracts]
for i, satellite in enumerate(ownSatellites):
S.insert(i, [])
for j, demand in enumerate(demands):
# satellite i senses data for demand j
S[i].insert(j, lp.addColumn('{}-S-{}'.format(
satellite.name, demand.name), isBinary=True))
# constrain sensing per satellite
lp.addConstraint(Row().add(S[i][j], 1), 'LE',
1 if satellite.canSense(demand) else 0,
'{} can sense {}'.format(satellite.name,
demand.name))
for phenomenon in phenomena:
r = Row()
for j, demand in enumerate(demands):
if phenomenon is None or demand.phenomenon == phenomenon:
r.add(S[i][j], demand.size)
# constrain maximum data sensed by satellite
lp.addConstraint(r, 'LE',
min(satellite.getMaxSensed(phenomenon)
- satellite.getSensed(phenomenon),
satellite.getMaxStored(phenomenon)
- satellite.getStored(phenomenon)),
'{} max sense {}'.format(satellite.name,
phenomenon))
# set initial data stored
E_c0.insert(i, [])
for j, contract in enumerate(ownContracts):
E_c0[i].insert(j, 1 if any(d.contract is contract
for m in satellite.modules
for d in m.data) else 0)
for j, demand in enumerate(demands):
r = Row()
for i, satellite in enumerate(ownSatellites):
r.add(S[i][j], 1)
lp.addConstraint(r, 'LE', 1, '{} max sensed'.format(demand.name))
for t, time in enumerate(range(minTime, maxTime+1)):
E_d.insert(t, [])
E_c.insert(t, [])
for i, satellite in enumerate(ownSatellites):
E_d[t].insert(i, [])
E_c[t].insert(i, [])
for j, demand in enumerate(demands):
# satellite i stores data for new contract j
E_d[t][i].insert(j, lp.addColumn('{}-E-{}@{}'.format(
satellite.name, demand.name, time), isBinary=True))
# penalty for opportunity cost
J.add(E_d[t][i][j], demand.size*(self.storagePenalty
if self.storagePenalty is not None
else self.getStoragePenalty(satellite, context)))
for j, contract in enumerate(ownContracts):
# satellite i stores data for contract j
E_c[t][i].insert(j, lp.addColumn('{}-E-{}@{}'.format(
satellite.name, contract.name, time), isBinary=True))
# penalty for opportunity cost
J.add(E_c[t][i][j], contract.demand.size*(self.storagePenalty
if self.storagePenalty is not None
else self.getStoragePenalty(satellite, context)))
for phenomenon in phenomena:
r = Row()
for j, demand in enumerate(demands):
if phenomenon is None or demand.phenomenon == phenomenon:
r.add(E_d[t][i][j], demand.size)
for j, contract in enumerate(ownContracts):
if phenomenon is None or contract.demand.phenomenon == phenomenon:
r.add(E_c[t][i][j], contract.demand.size)
# constrain data stored in satellite
lp.addConstraint(r, 'LE', satellite.getMaxStored(phenomenon),
'{} max store {} at {}'.format(
satellite.name, phenomenon, time))
T_d.insert(t, [])
T_c.insert(t, [])
for i, satellite in enumerate(allSatellites):
T_d[t].insert(i, [])
T_c[t].insert(i, [])
txLocation = context.propagate(satellite.location, time-context.time)
for j, station in enumerate(allStations):
T_d[t][i].insert(j, [])
T_c[t][i].insert(j, [])
rxLocation = context.propagate(station.location, time-context.time)
for k, protocol in enumerate(protocolsSGL):
T_d[t][i][j].insert(k, [])
T_c[t][i][j].insert(k, [])
r = Row()
maxSize = 0
for l, demand in enumerate(demands):
T_d[t][i][j][k].insert(l, lp.addColumn(
'{}-T({}/{})-{}@{}'.format(
satellite.name, demand.name,
protocol, station.name, time),
isBinary=True))
if station not in ownStations:
J.add(T_d[t][i][j][k][l],
-1*self.costSGL*demand.size)
r.add(T_d[t][i][j][k][l], demand.size)
maxSize = max(maxSize, demand.size
if controller.couldTransport(
protocol, demand.generateData(),
satellite, station,
txLocation, rxLocation, context)
and not demand.isDefaultedAt(
time-context.time)
else 0)
for l, contract in enumerate(ownContracts):
T_c[t][i][j][k].insert(l, lp.addColumn(
'{}-T({}/{})-{}@{}'.format(
satellite.name, contract.name,
protocol, station.name, time),
isBinary=True))
if station not in ownStations:
J.add(T_c[t][i][j][k][l],
-1*self.costSGL*contract.demand.size)
r.add(T_c[t][i][j][k][l], contract.demand.size)
maxSize = max(maxSize, contract.demand.size
if controller.couldTransport(
protocol, contract.demand.generateData(),
satellite, station,
txLocation, rxLocation, context)
and not contract.demand.isDefaultedAt(
contract.elapsedTime + time-context.time)
else 0)
# constrain transmission by visibility
lp.addConstraint(r, 'LE', maxSize, '{}-{} visibility {} at {}'
.format(satellite.name, station.name, protocol, time))
for i, satellite in enumerate(allSatellites):
for k, protocol in enumerate(protocolsSGL):
r = Row()
for j, station in enumerate(allStations):
for l, demand in enumerate(demands):
r.add(T_d[t][i][j][k][l], demand.size)
for l, contract in enumerate(ownContracts):
r.add(T_c[t][i][j][k][l], contract.demand.size)
# constrain data transmitted by satellite
lp.addConstraint(r, 'LE', satellite.getMaxTransmitted(protocol)
- (satellite.getTransmitted(protocol)
if time == minTime else 0),
'{} max transmit {} at {}'
.format(satellite.name, protocol, time))
for j, station in enumerate(allStations):
for k, protocol in enumerate(protocolsSGL):
r = Row()
for i, satellite in enumerate(allSatellites):
for l, demand in enumerate(demands):
r.add(T_d[t][i][j][k][l], demand.size)
for l, contract in enumerate(ownContracts):
r.add(T_c[t][i][j][k][l], contract.demand.size)
# constrain data received by station
if station in ownStations:
lp.addConstraint(r, 'LE', station.getMaxReceived(protocol)
- (station.getReceived(protocol)
if time == minTime else 0),
'{} max receive {} at {}'
.format(station.name, protocol, time))
else:
# do not assume future availability
lp.addConstraint(r, 'LE', (station.getMaxReceived(protocol)
- station.getReceived(protocol))
if time == minTime else 0,
'{} max receive {} at {}'
.format(station.name, protocol, time))
L_d.insert(t, [])
L_c.insert(t, [])
for i, txSatellite in enumerate(allSatellitesISL):
L_d[t].insert(i, [])
L_c[t].insert(i, [])
txLocation = context.propagate(txSatellite.location, time-context.time)
for j, rxSatellite in enumerate(allSatellitesISL):
L_d[t][i].insert(j, [])
L_c[t][i].insert(j, [])
rxLocation = context.propagate(rxSatellite.location, time-context.time)
for k, protocol in enumerate(protocolsISL):
L_d[t][i][j].insert(k, [])
L_c[t][i][j].insert(k, [])
r = Row()
maxSize = 0
for l, demand in enumerate(demands):
L_d[t][i][j][k].insert(l, lp.addColumn(
'{}-T({}/{})-{}@{}'.format(
txSatellite.name, demand.name,
protocol, rxSatellite.name, time),
isBinary=True))
if (txSatellite not in ownSatellites
or rxSatellite not in ownSatellites):
J.add(L_d[t][i][j][k][l],
self.costISL*demand.size)
else:
# small penalty for opportunity cost
J.add(L_d[t][i][j][k][l], self.islPenalty*demand.size)
r.add(L_d[t][i][j][k][l], demand.size)
maxSize = max(maxSize, demand.size
if controller.couldTransport(
protocol, demand.generateData(),
txSatellite, rxSatellite,
txLocation, rxLocation, context)
and not demand.isDefaultedAt(
time-context.time)
else 0)
for l, contract in enumerate(ownContracts):
L_c[t][i][j][k].insert(l, lp.addColumn(
'{}-T({}/{})-{}@{}'.format(
txSatellite.name, contract.name,
protocol, rxSatellite.name, time),
isBinary=True))
if (txSatellite not in ownSatellites
or rxSatellite not in ownSatellites):
J.add(L_c[t][i][j][k][l],
self.costISL*contract.demand.size)
else:
# small penalty for opportunity cost
J.add(L_c[t][i][j][k][l],
self.islPenalty*contract.demand.size)
r.add(L_c[t][i][j][k][l], contract.demand.size)
maxSize = max(maxSize, contract.demand.size
if controller.couldTransport(
protocol, contract.demand.generateData(),
txSatellite, rxSatellite,
txLocation, rxLocation, context)
and not contract.demand.isDefaultedAt(
contract.elapsedTime + time-context.time)
else 0)
# constrain transmission by visibility
lp.addConstraint(r, 'LE', maxSize, '{}-{} visibility {} at {}'
.format(txSatellite.name, rxSatellite.name,
protocol, time))
for i, txSatellite in enumerate(allSatellitesISL):
for k, protocol in enumerate(protocolsISL):
r = Row()
for j, rxSatellite in enumerate(allSatellitesISL):
for l, demand in enumerate(demands):
r.add(L_d[t][i][j][k][l], demand.size)
for l, contract in enumerate(ownContracts):
r.add(L_c[t][i][j][k][l], contract.demand.size)
# constrain data transmitted by satellite
lp.addConstraint(r, 'LE', txSatellite.getMaxTransmitted(protocol)
- (txSatellite.getTransmitted(protocol)
if time == minTime else 0),
'{} max transmit {} at {}'
.format(txSatellite.name, protocol, time))
for j, rxSatellite in enumerate(allSatellitesISL):
for k, protocol in enumerate(protocolsISL):
r = Row()
for i, txSatellite in enumerate(allSatellitesISL):
for l, demand in enumerate(demands):
r.add(L_d[t][i][j][k][l], demand.size)
for l, contract in enumerate(ownContracts):
r.add(L_c[t][i][j][k][l], contract.demand.size)
if rxSatellite in ownSatellites:
# constrain data received by station
lp.addConstraint(r, 'LE', rxSatellite.getMaxReceived(protocol)
- (rxSatellite.getReceived(protocol)
if time == minTime else 0),
'{} max receive {} at {}'
.format(rxSatellite.name, protocol, time))
else:
# do not assume future availability
lp.addConstraint(r, 'LE', (rxSatellite.getMaxReceived(protocol)
- rxSatellite.getReceived(protocol))
if time == minTime else 0,
'{} max receive {} at {}'
.format(rxSatellite.name, protocol, time))
R_d.insert(t, [])
R_c.insert(t, [])
for i, element in enumerate(allElements):
location = context.propagate(element.location, time-context.time)
R_d[t].insert(i, [])
R_c[t].insert(i, [])
for j, demand in enumerate(demands):
R_d[t][i].insert(j, lp.addColumn('{}-R-{}@{}'.format(
element.name, demand.name, time), isBinary=True))
J.add(R_d[t][i][j], demand.getValueAt(time-context.time)
if demand.isCompletedAt(location)
else demand.getDefaultValue())
for j, contract in enumerate(ownContracts):
R_c[t][i].insert(j, lp.addColumn('{}-R-{}@{}'.format(
element.name, contract.name, time), isBinary=True))
J.add(R_c[t][i][j], contract.demand.getValueAt(
contract.elapsedTime + time-context.time)
if contract.demand.isCompletedAt(location)
else contract.demand.getDefaultValue())
for i, satellite in enumerate(allSatellites):
R_i = allElements.index(satellite)
for j, demand in enumerate(demands):
r = Row()
if satellite in ownSatellites:
SE_i = ownSatellites.index(satellite)
if time==minTime:
r.add(S[SE_i][j], 1)
else:
r.add(E_d[t-1][SE_i][j],1)
r.add(E_d[t][SE_i][j],-1)
r.add(R_d[t][R_i][j],-1)
for k, station in enumerate(allStations):
for l, protocol in enumerate(protocolsSGL):
r.add(T_d[t][i][k][l][j],-1)
if satellite in allSatellitesISL:
isl_i = allSatellitesISL.index(satellite)
for k, rxSatellite in enumerate(allSatellitesISL):
for l, protocol in enumerate(protocolsISL):
r.add(L_d[t][isl_i][k][l][j],-1)
r.add(L_d[t][k][isl_i][l][j],1)
# constrain net flow of new contracts at each satellite
lp.addConstraint(r, 'EQ', 0, '{} net flow {} at {}'
.format(satellite.name, demand.name, time))
for j, contract in enumerate(ownContracts):
r = Row()
if satellite in ownSatellites:
SE_i = ownSatellites.index(satellite)
if time==minTime:
# existing contracts are initial conditions
pass
else:
r.add(E_c[t-1][SE_i][j],1)
r.add(E_c[t][SE_i][j],-1)
r.add(R_c[t][R_i][j],-1)
for k, station in enumerate(allStations):
for l, protocol in enumerate(protocolsSGL):
r.add(T_c[t][i][k][l][j],-1)
if satellite in allSatellitesISL:
isl_i = allSatellitesISL.index(satellite)
for k, rxSatellite in enumerate(allSatellitesISL):
for l, protocol in enumerate(protocolsISL):
r.add(L_c[t][isl_i][k][l][j],-1)
r.add(L_c[t][k][isl_i][l][j],1)
# constrain net flow of contracts at each satellite
if satellite in ownSatellites:
SE_i = ownSatellites.index(satellite)
lp.addConstraint(r, 'EQ', -1*(E_c0[SE_i][j] if time == minTime else 0),
'{} net flow {} at {}'
.format(satellite.name, contract.name, time))
else:
lp.addConstraint(r, 'EQ', 0,
'{} net flow {} at {}'
.format(satellite.name, contract.name, time))
if time+1 > maxTime and self.planningHorizon > 0:
for i, satellite in enumerate(ownSatellites):
r = Row()
for j, demand in enumerate(demands):
r.add(E_d[t][i][j],1)
for j, contract in enumerate(ownContracts):
r.add(E_c[t][i][j],1)
# constrain boundary flow of each satellite
lp.addConstraint(r, 'EQ', 0, '{} boundary flow'
.format(satellite.name))
for k, station in enumerate(allStations):
R_k = allElements.index(station)
for j, demand in enumerate(demands):
r = Row()
r.add(R_d[t][R_k][j],-1)
for i, satellite in enumerate(allSatellites):
for l, protocol in enumerate(protocolsSGL):
r.add(T_d[t][i][k][l][j],1)
# constrain net flow of new contracts at each station
lp.addConstraint(r, 'EQ', 0, '{} net flow {} at {}'
.format(station.name, demand.name, time))
for j, contract in enumerate(ownContracts):
r = Row()
r.add(R_c[t][R_k][j],-1)
for i, satellite in enumerate(allSatellites):
for l, protocol in enumerate(protocolsSGL):
r.add(T_c[t][i][k][l][j],1)
# constrain net flow of contracts at each station
lp.addConstraint(r, 'EQ', 0, '{} net flow {} at {}'
.format(station.name, contract.name, time))
r = Row()
for l, demand in enumerate(demands):
for i, element in enumerate(allElements):
location = context.propagate(element.location, time-context.time)
r.add(R_d[0][i][l], (demand.getValueAt(0)
if demand.isCompletedAt(location)
else demand.getDefaultValue()))
for i, satellite in enumerate(allSatellites):
for j, station in enumerate(allStations):
for k, protocol in enumerate(protocolsSGL):
if station not in ownStations:
r.add(T_d[0][i][j][k][l],
-1*self.costSGL*demand.size)
for i, txSatellite in enumerate(allSatellitesISL):
for j, rxSatellite in enumerate(allSatellitesISL):
for k, protocol in enumerate(protocolsISL):
if rxSatellite not in ownSatellites:
r.add(L_d[0][i][j][k][l],
-1*self.costISL*demand.size)
for l, contract in enumerate(ownContracts):
for i, element in enumerate(allElements):
location = context.propagate(element.location, time-context.time)
r.add(R_c[0][i][l], (contract.demand.getValueAt(contract.elapsedTime)
if contract.demand.isCompletedAt(location)
else contract.demand.getDefaultValue()))
for i, satellite in enumerate(allSatellites):
for j, station in enumerate(allStations):
for k, protocol in enumerate(protocolsSGL):
if station not in ownStations:
r.add(T_c[0][i][j][k][l],
-1*self.costSGL*contract.demand.size)
for i, txSatellite in enumerate(allSatellitesISL):
for j, rxSatellite in enumerate(allSatellitesISL):
for k, protocol in enumerate(protocolsISL):
if rxSatellite not in ownSatellites:
r.add(L_c[0][i][j][k][l],
-1*self.costISL*contract.demand.size)
lp.addConstraint(r, 'GE', -1 - federate.cash,
'{} net cash must be positive'
.format(federate.name))
lp.setObjective(J, False)
code, description = lp.solve()
if code > 1:
logging.warning(description)
with open('lp_debug.txt', 'w+') as f:
f.write(lp.dumpProgram())
else:
""" debug code
with open('lp_solution_{}_{}.txt'.format(
federate.name, context.time), 'w+') as f:
f.write(lp.dumpSolution())
with open('lp_program_{}_{}.txt'.format(
federate.name, context.time), 'w+') as f:
f.write(lp.dumpProgram())
"""
def _transportContract(operations, satellite, contract, context):
i = allSatellites.index(satellite)
R_i = allElements.index(satellite)
j = ownContracts.index(contract)
data = context.getData(contract)
if data is not None:
if lp.get(R_c[0][R_i][j]) > 0:
controller.resolve(contract, context)
elif (satellite in ownSatellites
and lp.get(E_c[0][ownSatellites.index(satellite)][j]) > 0):
satellite.store(data)
elif any(any(lp.get(T_c[0][i][k][l][j])
for k, station in enumerate(allStations))
for l, protocol in enumerate(protocolsSGL)):
for k, station in enumerate(allStations):
for l, protocol in enumerate(protocolsSGL):
if(lp.get(T_c[0][i][k][l][j])):
controller.transport(protocol, data,
satellite, station, context)
controller.resolve(contract, context)
if station not in ownStations:
supplier = context.getElementOwner(station)
controller.exchange(operations.costSGL,
federate, supplier)
logging.debug('{} paid {} to {} for SGL'
.format(federate.name,
operations.costSGL,
supplier.name))
elif satellite in allSatellitesISL:
isl_i = allSatellitesISL.index(satellite)
for k, rxSatellite in enumerate(allSatellitesISL):
for l, protocol in enumerate(protocolsISL):
if(lp.get(L_c[0][isl_i][k][l][j])):
controller.transport(protocol, data,
satellite, rxSatellite, context)
_transportContract(operations, rxSatellite,
contract, context)
if rxSatellite not in ownSatellites:
supplier = context.getElementOwner(rxSatellite)
controller.exchange(operations.costISL,
federate, supplier)
logging.debug('{} paid {} to {} for ISL'
.format(federate.name,
operations.costISL,
supplier.name))
def _transportDemand(operations, satellite, demand, context):
i = allSatellites.index(satellite)
R_i = allElements.index(satellite)
j = demands.index(demand)
contract = context.getContract(demand)
data = context.getData(contract)
if contract is not None and data is not None:
if lp.get(R_d[0][R_i][j]) > 0:
controller.resolve(contract, context)
elif (satellite in ownSatellites
and lp.get(E_d[0][ownSatellites.index(satellite)][j]) > 0):
satellite.store(data)
elif any(any(lp.get(T_d[0][i][k][l][j])
for k, station in enumerate(allStations))
for l, protocol in enumerate(protocolsSGL)):
for k, station in enumerate(allStations):
for l, protocol in enumerate(protocolsSGL):
if(lp.get(T_d[0][i][k][l][j])):
controller.transport(protocol, data,
satellite, station, context)
controller.resolve(contract, context)
if station not in ownStations:
supplier = context.getElementOwner(station)
controller.exchange(operations.costSGL,
federate, supplier)
logging.debug('{} paid {} to {} for SGL'
.format(federate.name,
operations.costSGL,
supplier.name))
elif satellite in allSatellitesISL:
isl_i = allSatellitesISL.index(satellite)
for k, rxSatellite in enumerate(allSatellitesISL):
for l, protocol in enumerate(protocolsISL):
if(lp.get(L_d[0][isl_i][k][l][j])):
controller.transport(protocol, data,
satellite, rxSatellite, context)
_transportDemand(operations, rxSatellite,
demand, context)
if rxSatellite not in ownSatellites:
supplier = context.getElementOwner(rxSatellite)
controller.exchange(operations.costISL,
federate, supplier)
logging.debug('{} paid {} to {} for ISL'
.format(federate.name,
operations.costISL,
supplier.name))
# first, transport contracts to resolution
for j, contract in enumerate(ownContracts):
if any(lp.get(R_c[0][i][j]) > 0
for i, element in enumerate(allElements)):
logging.debug('Transporting contract {} for resolution...'
.format(contract.name))
satellite = context.getDataElement(contract)
_transportContract(self, satellite, contract, context)
# second, sense and transport demands to resolution
for j, demand in enumerate(demands):
if any(lp.get(R_d[0][i][j]) > 0
for i, element in enumerate(allElements)):
logging.debug('Sensing and transporting demand {} for resolution...'
.format(demand.name))
satellite = next(e for i, e in enumerate(ownSatellites)
if lp.get(S[i][j]) > 0)
contract = federate.contract(demand, context)
federate.senseAndStore(contract, satellite, context)
_transportDemand(self, satellite, demand, context)
# third, sense all demands to be stored
for j, demand in enumerate(demands):
if (all(lp.get(R_d[0][i][j]) < 1
for i, element in enumerate(allElements))
and any(lp.get(S[i][j]) > 0
for i, element in enumerate(ownSatellites))):
logging.debug('Sensing demand {} for storage...'
.format(demand.name))
satellite = next(e for i, e in enumerate(ownSatellites)
if lp.get(S[i][j]) > 0)
contract = federate.contract(demand, context)
federate.senseAndStore(contract, satellite, context)
# fourth, transport demands to storage
for j, demand in enumerate(demands):
if (all(lp.get(R_d[0][i][j]) < 1
for i, element in enumerate(allElements))
and any(lp.get(S[i][j]) > 0
for i, element in enumerate(ownSatellites))):
logging.debug('Transporting demand {} for storage...'
.format(demand.name))
satellite = next(e for i, e in enumerate(ownSatellites)
if lp.get(S[i][j]) > 0)
_transportDemand(self, satellite, demand, context)
# finally, transport contracts to storage
for j, contract in enumerate(ownContracts):
if all(lp.get(R_c[0][i][j]) < 1
for i, element in enumerate(allElements)):
logging.debug('Transporting contract {} for storage...'
.format(contract.name))
satellite = context.getDataElement(contract)
_transportContract(self, satellite, contract, context)
| 65.059117
| 145
| 0.407891
| 6,821
| 78,136
| 4.625421
| 0.045155
| 0.071823
| 0.007797
| 0.006086
| 0.944057
| 0.940983
| 0.931347
| 0.91664
| 0.910491
| 0.90149
| 0
| 0.005389
| 0.503635
| 78,136
| 1,201
| 146
| 65.059117
| 0.808091
| 0.070467
| 0
| 0.910491
| 0
| 0
| 0.022335
| 0
| 0
| 0
| 0
| 0.000833
| 0
| 1
| 0.0077
| false
| 0.001925
| 0.004812
| 0
| 0.014437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ad069b10db0ea801c59940f8e97de467c4106b5b
| 146
|
py
|
Python
|
examples/__init__.py
|
longw010/PyTorch-F1
|
13be169bddc41316e498c4e4cd20575bc8b51f9a
|
[
"MIT"
] | null | null | null |
examples/__init__.py
|
longw010/PyTorch-F1
|
13be169bddc41316e498c4e4cd20575bc8b51f9a
|
[
"MIT"
] | 3
|
2020-04-04T19:50:28.000Z
|
2021-05-01T10:37:01.000Z
|
examples/__init__.py
|
longw010/PyTorch-F1
|
13be169bddc41316e498c4e4cd20575bc8b51f9a
|
[
"MIT"
] | null | null | null |
import os
# TODO: remove this line during delopyment
os.environ['PYTHONPATH'] = '${PYTHONPATH}:/Users/longw/Documents/github/PyTorch-F1/torchf1'
| 29.2
| 91
| 0.767123
| 19
| 146
| 5.894737
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015038
| 0.089041
| 146
| 4
| 92
| 36.5
| 0.827068
| 0.273973
| 0
| 0
| 0
| 0
| 0.692308
| 0.596154
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
ad757b95a83e1b4b7228302e86634081fb0deb74
| 25,041
|
py
|
Python
|
plot_utils.py
|
bekaiser/tumor_dynamics
|
062c2c7c674ba18acc67712f780095b3db3e6902
|
[
"MIT"
] | null | null | null |
plot_utils.py
|
bekaiser/tumor_dynamics
|
062c2c7c674ba18acc67712f780095b3db3e6902
|
[
"MIT"
] | null | null | null |
plot_utils.py
|
bekaiser/tumor_dynamics
|
062c2c7c674ba18acc67712f780095b3db3e6902
|
[
"MIT"
] | null | null | null |
import numpy as np
import math as ma
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size': 15})
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.serif"] = ["Times New Roman"] + plt.rcParams["font.serif"]
import matplotlib.colors as colors
import easydict
#==============================================================================
# functions
def plot_vars_invasion_noise( f, m, n, t, q, constants ):
X = constants.X
Y = constants.Y
plotname = constants.figure_path +'noise_%i.png' %q
fig = plt.figure(figsize=(16, 4.5))
plt.subplot(1,3,1)
#cs = contour_plot( f , constants )
cs = plt.contourf(X,Y,f,100,cmap='inferno')
plt.title(r"$f_{noise}(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,2)
#cs = contour_plot( m , constants )
cs = plt.contourf(X,Y,m,100,cmap='inferno')
plt.title(r"$m_{noise}(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,3)
#cs = contour_plot( n , constants )
cs = plt.contourf(X,Y,n,100,cmap='inferno')
plt.title(r"$n_{noise}(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.075, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_vars_angio_noise( c, f, n, t, q, constants ):
X = constants.X
Y = constants.Y
plotname = constants.figure_path +'noise_%i.png' %q
fig = plt.figure(figsize=(16, 4.5))
plt.subplot(1,3,1)
#cs = contour_plot( f , constants )
cs = plt.contourf(X,Y,c,100,cmap='inferno')
plt.title(r"$c_{noise}(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,2)
#cs = contour_plot( m , constants )
cs = plt.contourf(X,Y,f,100,cmap='inferno')
plt.title(r"$f_{noise}(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,3)
#cs = contour_plot( n , constants )
cs = plt.contourf(X,Y,n,100,cmap='inferno')
plt.title(r"$n_{noise}(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.075, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_vars_invasion( f, m, n, t, q, constants ):
X = constants.X
Y = constants.Y
plotname = constants.figure_path +'vars_%i.png' %q
fig = plt.figure(figsize=(16, 4.5))
plt.subplot(1,3,1)
#cs = contour_plot( f , constants )
cs = plt.contourf(X,Y,f,100,cmap='inferno')
plt.title(r"$f(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,2)
#cs = contour_plot( m , constants )
cs = plt.contourf(X,Y,m,100,cmap='inferno')
plt.title(r"$m(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,3)
#cs = contour_plot( n , constants )
cs = plt.contourf(X,Y,n,100,cmap='inferno')
plt.title(r"$n(x,y,%.2f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.075, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_vars_angio( c, f, n, t, q, constants ):
X = constants.X
Y = constants.Y
plotname = constants.figure_path +'vars_%i.png' %q
fig = plt.figure(figsize=(16, 4.5))
plt.subplot(1,3,1)
cs = plt.contourf(X,Y,f,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"$f(x,y,%.3f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,2)
cs = plt.contourf(X,Y,c,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"$c(x,y,%.3f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,3)
cs = plt.contourf(X,Y,n,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"$n(x,y,%.3f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.075, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_mterms( mterms, t, q, constants ):
# decay is off: sig = 0
X = constants.X
Y = constants.Y
plotname = constants.figure_path +'mterms_%i.png' %q
fig = plt.figure(figsize=(16, 4.5))
plt.subplot(1,3,1)
#cs = plt.contourf(X,Y,f/np.amax(f),100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
cs = plt.contourf(X,Y,mterms.ddt,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"time rate of change: $\partial{m}/\partial{t}(x,y,%.4f)$" %t,fontsize=14)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,2)
#cs = plt.contourf(X,Y,m/np.amax(m),100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
cs = plt.contourf(X,Y,mterms.diffusion,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"diffusion: $d_m\nabla^2m(x,y,%.4f)$" %t,fontsize=14)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,3)
#cs = plt.contourf(X,Y,n/np.amax(n),100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
cs = plt.contourf(X,Y,mterms.production,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
#plt.plot(X[5,5+1],Y[5,5+1],color='red',marker='o',markersize=16)
plt.title(r"production: $\kappa n(x,y,%.4f)$" %t,fontsize=14)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.075, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_fterms_angio( fterms, t, q, constants ):
# decay is off: sig = 0
X = constants.X
Y = constants.Y
plotname = constants.figure_path +'fterms_%i.png' %q
fig = plt.figure(figsize=(16, 4.5))
plt.subplot(1,3,1)
#cs = plt.contourf(X,Y,f/np.amax(f),100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
cs = plt.contourf(X,Y,fterms.ddt,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"time rate of change: $\partial{f}/\partial{t}(x,y,%.2f)$" %t,fontsize=14)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,2)
cs = plt.contourf(X,Y,fterms.production,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"production: $\beta n(x,y,%.2f)$" %t,fontsize=14)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(1,3,3)
cs = plt.contourf(X,Y,fterms.uptake,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"uptake: $-\gamma n f(x,y,%.2f)$" %t,fontsize=14)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.075, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_nterms( nterms, t, q, constants ):
plotname = constants.figure_path +'nterms_%i.png' %q
fig = plt.figure(figsize=(12, 12))
plt.subplot(2,2,1)
cs = contour_plot( nterms.ddt , constants ) #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"time rate of change: $\partial{n}/\partial{t}(x,y,%.4f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(2,2,2)
cs = contour_plot( nterms.diffusion , constants )
plt.title(r"diffusion: $d_n\nabla^2n(x,y,%.4f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(2,2,3)
cs = contour_plot( nterms.haptotatic_diffusion , constants )
plt.title(r"haptotaxis: $-\rho n\nabla^2{f}(x,y,%.4f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(2,2,4)
cs = contour_plot( nterms.haptotatic_dissipation , constants )
plt.title(r"haptotaxis: $-\rho \nabla n \cdot \nabla{f}(x,y,%.4f)$" %t,fontsize=16)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.075, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_nterms_cterms_angio( nterms, cterms, t, q, constants ):
ifontsize = 19
ititlesize = 17
plotname = constants.figure_path +'nterms_cterms_%i.png' %q
fig = plt.figure(figsize=(24, 12))
plt.subplot(2,4,1)
cs = contour_plot( nterms.ddt , constants ) #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"time rate of change: $\partial{n}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(2,4,2)
cs = contour_plot( nterms.diffusion , constants )
plt.title(r"diffusion: $D\nabla^2n(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(2,4,3)
cs = contour_plot( nterms.hapto1 , constants )
plt.title(r"haptotaxis: $-\rho n\nabla^2{f}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(2,4,4)
cs = contour_plot( nterms.hapto2 , constants )
plt.title(r"haptotaxis: $-\rho \nabla n \cdot \nabla{f}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(2,4,5)
cs = contour_plot( nterms.chemo1 , constants )
plt.title(r"chemotaxis: $-\chi n \nabla^2{c}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(2,4,6)
cs = contour_plot( nterms.chemo2 , constants )
plt.title(r"chemotaxis: $-\chi \nabla n \cdot \nabla{c}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(2,4,7)
cs = contour_plot( nterms.chemo3 , constants )
plt.title(r"chemotaxis: $-n \nabla \chi \cdot \nabla{c}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(2,4,8)
cs = contour_plot( cterms.ddt , constants )
plt.title(r"time rate of change: $\partial{c}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplots_adjust(top=0.96, bottom=0.075, left=0.04, right=0.975, hspace=0.25, wspace=0.2)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_all_terms_invasion( fterms, mterms, nterms, t, q, constants ):
ifontsize = 19
ititlesize = 17
if constants.nonlinear_n_diff == 'on':
plotname = constants.figure_path +'all_terms_%i.png' %q
fig = plt.figure(figsize=(24, 18))
plt.subplot(3,4,1)
cs = contour_plot( nterms.ddt , constants ) #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"$\partial{n}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,2)
cs = contour_plot( nterms.diff1+nterms.diff2 , constants )
plt.title(r"$\nabla\cdot(d_n m \nabla n)(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,3)
cs = contour_plot( nterms.hapto1+nterms.hapto2 , constants )
plt.title(r"$-\nabla \cdot (\rho n \nabla f)(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
if constants.lam > 0:
plt.subplot(3,4,4)
cs = contour_plot( nterms.prolif , constants )
plt.title(r"$\lambda n (1-n-f)(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,5)
cs = contour_plot( nterms.diff1 , constants )
plt.title(r"$d_n\nabla m \cdot \nabla n(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,6)
cs = contour_plot( nterms.diff2 , constants )
plt.title(r"$d_n m \nabla^2 n(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,7)
cs = contour_plot( nterms.hapto1 , constants )
plt.title(r"$-\rho \nabla n \cdot \nabla f(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,8)
cs = contour_plot( nterms.hapto2 , constants )
plt.title(r"$-\rho n \nabla^2 f(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,9)
cs = contour_plot( mterms.ddt , constants )
plt.title(r"$\partial{m}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,10)
cs = contour_plot( mterms.diff , constants )
plt.title(r"$d_m\nabla^2 m(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,11)
cs = contour_plot( mterms.prod , constants )
if constants.nonlinear_m_production == 'on':
plt.title(r"$\kappa n(1-m)(x,y,%.2f)$" %t,fontsize=ititlesize)
else:
plt.title(r"$\kappa n(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,12)
cs = contour_plot( fterms.ddt , constants )
plt.title(r"$\partial{f}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.96, bottom=0.075, left=0.04, right=0.975, hspace=0.25, wspace=0.2)
plt.savefig(plotname,format="png"); plt.close(fig);
else:
plotname = constants.figure_path +'all_terms_%i.png' %q
fig = plt.figure(figsize=(18, 18))
plt.subplot(3,3,1)
cs = contour_plot( nterms.ddt , constants ) #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"$\partial{n}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,3,2)
cs = contour_plot( nterms.diff , constants )
plt.title(r"$\nabla\cdot(d_n \nabla n)(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,3,3)
cs = contour_plot( nterms.hapto1+nterms.hapto2 , constants )
plt.title(r"$-\nabla \cdot (\rho n \nabla f)(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,3,4)
cs = contour_plot( nterms.hapto1 , constants )
plt.title(r"$-\rho \nabla n \cdot \nabla f(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,3,5)
cs = contour_plot( nterms.hapto2 , constants )
plt.title(r"$-\rho n \nabla^2 f(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,3,6)
cs = contour_plot( fterms.ddt , constants )
plt.title(r"$\partial{f}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplot(3,3,7)
cs = contour_plot( mterms.ddt , constants )
plt.title(r"$\partial{m}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,3,8)
cs = contour_plot( mterms.diff , constants )
plt.title(r"$d_m\nabla^2 m(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,3,9)
cs = contour_plot( mterms.prod , constants )
plt.title(r"$\kappa n(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplots_adjust(top=0.96, bottom=0.075, left=0.04, right=0.975, hspace=0.25, wspace=0.2)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_all_terms_angio( cterms, fterms, nterms, t, q, constants ):
ifontsize = 19
ititlesize = 17
plotname = constants.figure_path +'all_terms_%i.png' %q
fig = plt.figure(figsize=(24, 18))
plt.subplot(3,4,1)
cs = contour_plot( nterms.ddt , constants ) #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"$\partial{n}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,2)
cs = contour_plot( nterms.diffusion , constants )
plt.title(r"$D\nabla^2n(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,3)
# hapto1, hapto2 ~ grad(n) dot grad(f) , n laplacian(f)
cs = contour_plot( nterms.hapto1 , constants )
plt.title(r"$-\rho n \nabla^2{f}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,4)
# hapto1, hapto2 ~ grad(n) dot grad(f) , n laplacian(f)
cs = contour_plot( nterms.hapto2 , constants )
plt.title(r"$-\rho\nabla{n}\cdot\nabla{f}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,5)
cs = contour_plot( nterms.chemo1 , constants )
plt.title(r"$-\chi n \nabla^2{c}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,6)
cs = contour_plot( nterms.chemo2 , constants )
plt.title(r"$-\chi\nabla{n}\cdot\nabla{c}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,7)
cs = contour_plot( nterms.chemo3 , constants )
plt.title(r"$-n \nabla\chi\cdot\nabla{c}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,9)
cs = contour_plot( cterms.ddt , constants )
plt.title(r"$\partial{c}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,10)
cs = contour_plot( fterms.ddt , constants )
plt.title(r"$\partial{f}/\partial{t}(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,11)
cs = contour_plot( fterms.production , constants )
plt.title(r"$\beta n(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=ifontsize)
plt.xlabel(r"$x$",fontsize=ifontsize)
plt.subplot(3,4,12)
cs = contour_plot( fterms.uptake , constants )
plt.title(r"$-\gamma n f(x,y,%.2f)$" %t,fontsize=ititlesize)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.96, bottom=0.075, left=0.04, right=0.975, hspace=0.25, wspace=0.2)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_fterms( fterms, t, q, constants ):
X = constants.X
Y = constants.Y
plotname = constants.figure_path +'fterms_%i.png' %q
fig = plt.figure(figsize=(6, 5))
plt.subplot(1,1,1)
#cs = plt.contourf(X,Y,f/np.amax(f),100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
cs = plt.contourf(X,Y,fterms.ddt,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"time rate of change: $\partial{f}/\partial{t}(x,y,%.4f)$" %t,fontsize=14)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.125, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def plot_cterms_angio( cterms, t, q, constants ):
X = constants.X
Y = constants.Y
plotname = constants.figure_path +'cterms_%i.png' %q
fig = plt.figure(figsize=(6, 5))
plt.subplot(1,1,1)
#cs = plt.contourf(X,Y,f/np.amax(f),100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
cs = plt.contourf(X,Y,cterms.ddt,100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
plt.title(r"time rate of change: $\partial{c}/\partial{t}(x,y,%.4f)$" %t,fontsize=14)
plt.colorbar(cs)
plt.ylabel(r"$y$",fontsize=16)
plt.xlabel(r"$x$",fontsize=16)
plt.subplots_adjust(top=0.925, bottom=0.15, left=0.125, right=0.95, hspace=0.3, wspace=0.3)
plt.savefig(plotname,format="png"); plt.close(fig);
return
def contour_plot( var , constants ):
X = constants.X
Y = constants.Y
cmapn = 'seismic'
cmapn2 = 'inferno'
mid_val = 0.
vmin1 = np.amin(var)
vmax1 = np.amax(var)
midnorm = MidpointNormalize(vmin=vmin1, vcenter=0., vmax=vmax1)
#cs = plt.contourf(X,Y,f/np.amax(f),100,cmap='inferno') #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
if abs( np.abs(vmax1)-np.abs(vmin1) ) > 4.*min( np.abs(vmax1), np.abs(vmin1) ):
cs = plt.contourf(X,Y,var,100,cmap=cmapn2)
else:
cs = plt.contourf(X,Y,var,100,cmap=cmapn, norm=midnorm)
return cs
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.vcenter, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
| 40.585089
| 127
| 0.621181
| 3,925
| 25,041
| 3.923057
| 0.052994
| 0.013638
| 0.094818
| 0.066502
| 0.917067
| 0.904403
| 0.890505
| 0.884725
| 0.871477
| 0.863424
| 0
| 0.043436
| 0.173476
| 25,041
| 616
| 128
| 40.650974
| 0.700536
| 0.099078
| 0
| 0.748077
| 0
| 0.001923
| 0.135044
| 0.044689
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028846
| false
| 0
| 0.011538
| 0
| 0.069231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ad813f70295670e77cf5424424eb6f920ba9c9b9
| 1,033
|
py
|
Python
|
tests/basics/tuple_compare.py
|
peterson79/pycom-micropython-sigfox
|
3f93fc2c02567c96f18cff4af9125db8fd7a6fb4
|
[
"MIT"
] | 303
|
2015-07-11T17:12:55.000Z
|
2018-01-08T03:02:37.000Z
|
tests/basics/tuple_compare.py
|
peterson79/pycom-micropython-sigfox
|
3f93fc2c02567c96f18cff4af9125db8fd7a6fb4
|
[
"MIT"
] | 89
|
2017-06-09T20:57:27.000Z
|
2018-03-06T19:54:04.000Z
|
tests/basics/tuple_compare.py
|
peterson79/pycom-micropython-sigfox
|
3f93fc2c02567c96f18cff4af9125db8fd7a6fb4
|
[
"MIT"
] | 26
|
2018-01-18T09:15:33.000Z
|
2022-02-07T13:09:14.000Z
|
print(() == ())
print(() > ())
print(() < ())
print(() == (1,))
print((1,) == ())
print(() > (1,))
print((1,) > ())
print(() < (1,))
print((1,) < ())
print(() >= (1,))
print((1,) >= ())
print(() <= (1,))
print((1,) <= ())
print((1,) == (1,))
print((1,) != (1,))
print((1,) == (2,))
print((1,) == (1, 0,))
print((1,) > (1,))
print((1,) > (2,))
print((2,) > (1,))
print((1, 0,) > (1,))
print((1, -1,) > (1,))
print((1,) > (1, 0,))
print((1,) > (1, -1,))
print((1,) < (1,))
print((2,) < (1,))
print((1,) < (2,))
print((1,) < (1, 0,))
print((1,) < (1, -1,))
print((1, 0,) < (1,))
print((1, -1,) < (1,))
print((1,) >= (1,))
print((1,) >= (2,))
print((2,) >= (1,))
print((1, 0,) >= (1,))
print((1, -1,) >= (1,))
print((1,) >= (1, 0,))
print((1,) >= (1, -1,))
print((1,) <= (1,))
print((2,) <= (1,))
print((1,) <= (2,))
print((1,) <= (1, 0,))
print((1,) <= (1, -1,))
print((1, 0,) <= (1,))
print((1, -1,) <= (1,))
print((10, 0) > (1, 1))
print((10, 0) < (1, 1))
print((0, 0, 10, 0) > (0, 0, 1, 1))
print((0, 0, 10, 0) < (0, 0, 1, 1))
| 18.446429
| 35
| 0.352372
| 164
| 1,033
| 2.219512
| 0.030488
| 0.626374
| 0.557692
| 0.241758
| 0.958791
| 0.958791
| 0.958791
| 0.958791
| 0.925824
| 0.906593
| 0
| 0.140496
| 0.180058
| 1,033
| 55
| 36
| 18.781818
| 0.289256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 12
|
ad83e9d38092238fdcfc7c9464c47761a2e56382
| 21,402
|
py
|
Python
|
semaphore-python-client-generated/semaphore_client/api/default_api.py
|
QCDIS/CONF
|
6ddb37b691754bbba97c85228d266ac050c4baa4
|
[
"Apache-2.0"
] | null | null | null |
semaphore-python-client-generated/semaphore_client/api/default_api.py
|
QCDIS/CONF
|
6ddb37b691754bbba97c85228d266ac050c4baa4
|
[
"Apache-2.0"
] | 41
|
2017-01-23T16:20:55.000Z
|
2019-10-07T12:45:21.000Z
|
semaphore-python-client-generated/semaphore_client/api/default_api.py
|
skoulouzis/CONF
|
8c0596810f7ef5fec001148dd67192b25abbe3c8
|
[
"Apache-2.0"
] | 2
|
2020-05-26T12:53:14.000Z
|
2020-10-08T05:59:46.000Z
|
# coding: utf-8
"""
SEMAPHORE
Semaphore API # noqa: E501
OpenAPI spec version: 2.2.0-oas3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from semaphore_client.api_client import ApiClient
class DefaultApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def events_get(self, **kwargs): # noqa: E501
"""Get Events related to Semaphore and projects you are part of # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.events_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Event]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.events_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.events_get_with_http_info(**kwargs) # noqa: E501
return data
def events_get_with_http_info(self, **kwargs): # noqa: E501
"""Get Events related to Semaphore and projects you are part of # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.events_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Event]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method events_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/plain; charset=utf-8']) # noqa: E501
# Authentication setting
auth_settings = ['bearer', 'cookie'] # noqa: E501
return self.api_client.call_api(
'/events', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Event]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def events_last_get(self, **kwargs): # noqa: E501
"""Get last 200 Events related to Semaphore and projects you are part of # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.events_last_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Event]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.events_last_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.events_last_get_with_http_info(**kwargs) # noqa: E501
return data
def events_last_get_with_http_info(self, **kwargs): # noqa: E501
"""Get last 200 Events related to Semaphore and projects you are part of # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.events_last_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Event]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method events_last_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/plain; charset=utf-8']) # noqa: E501
# Authentication setting
auth_settings = ['bearer', 'cookie'] # noqa: E501
return self.api_client.call_api(
'/events/last', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Event]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def info_get(self, **kwargs): # noqa: E501
"""Fetches information about semaphore # noqa: E501
you must be authenticated to use this # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.info_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: InfoType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.info_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.info_get_with_http_info(**kwargs) # noqa: E501
return data
def info_get_with_http_info(self, **kwargs): # noqa: E501
"""Fetches information about semaphore # noqa: E501
you must be authenticated to use this # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.info_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: InfoType
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method info_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/plain; charset=utf-8']) # noqa: E501
# Authentication setting
auth_settings = ['bearer', 'cookie'] # noqa: E501
return self.api_client.call_api(
'/info', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InfoType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def ping_get(self, **kwargs): # noqa: E501
"""PING test # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ping_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Pong
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.ping_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.ping_get_with_http_info(**kwargs) # noqa: E501
return data
def ping_get_with_http_info(self, **kwargs): # noqa: E501
"""PING test # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ping_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: Pong
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method ping_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/ping', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Pong', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upgrade_get(self, **kwargs): # noqa: E501
"""Check if new updates available and fetch /info # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upgrade_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: InfoType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.upgrade_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.upgrade_get_with_http_info(**kwargs) # noqa: E501
return data
def upgrade_get_with_http_info(self, **kwargs): # noqa: E501
"""Check if new updates available and fetch /info # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upgrade_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: InfoType
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upgrade_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/plain; charset=utf-8']) # noqa: E501
# Authentication setting
auth_settings = ['bearer', 'cookie'] # noqa: E501
return self.api_client.call_api(
'/upgrade', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InfoType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upgrade_post(self, **kwargs): # noqa: E501
"""Upgrade the server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upgrade_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.upgrade_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.upgrade_post_with_http_info(**kwargs) # noqa: E501
return data
def upgrade_post_with_http_info(self, **kwargs): # noqa: E501
"""Upgrade the server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upgrade_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upgrade_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['bearer', 'cookie'] # noqa: E501
return self.api_client.call_api(
'/upgrade', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def ws_get(self, **kwargs): # noqa: E501
"""Websocket handler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ws_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.ws_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.ws_get_with_http_info(**kwargs) # noqa: E501
return data
def ws_get_with_http_info(self, **kwargs): # noqa: E501
"""Websocket handler # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ws_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method ws_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['bearer', 'cookie'] # noqa: E501
return self.api_client.call_api(
'/ws', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 34.35313
| 94
| 0.587749
| 2,428
| 21,402
| 4.912273
| 0.068369
| 0.047623
| 0.032867
| 0.042257
| 0.959336
| 0.957156
| 0.95288
| 0.945502
| 0.937621
| 0.932758
| 0
| 0.016044
| 0.321419
| 21,402
| 622
| 95
| 34.40836
| 0.805206
| 0.30941
| 0
| 0.817365
| 1
| 0
| 0.146851
| 0.034076
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04491
| false
| 0
| 0.011976
| 0
| 0.122754
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ad863fe7e933735542f57611f52f255a1a90218c
| 8,524
|
py
|
Python
|
models/oct_resnet.py
|
PistonY/ModelZoo.pytorch
|
eb4cc16bfbb6bdf1c021f5f6faba7fbfc8c04612
|
[
"MIT"
] | 41
|
2019-11-13T02:09:47.000Z
|
2022-02-20T11:31:18.000Z
|
models/oct_resnet.py
|
PistonY/ModelZoo.pytorch
|
eb4cc16bfbb6bdf1c021f5f6faba7fbfc8c04612
|
[
"MIT"
] | 5
|
2020-09-22T10:47:22.000Z
|
2021-05-11T10:10:37.000Z
|
models/oct_resnet.py
|
PistonY/ModelZoo.pytorch
|
eb4cc16bfbb6bdf1c021f5f6faba7fbfc8c04612
|
[
"MIT"
] | 5
|
2020-07-07T13:27:04.000Z
|
2021-06-09T03:21:27.000Z
|
__all__ = ['oct_resnet50', 'oct_resnet50v2']
from module import *
from torchtoolbox.nn import AdaptiveSequential
from torch import nn
def check_status(alpha_in, alpha_out):
alpha_in = alpha_out if alpha_in == 0 else alpha_in
alpha_in = 0 if alpha_out == 0 else alpha_in
return alpha_in, alpha_out
class OctBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, alpha_in, alpha_out,
stride=1, groups=1, base_width=64):
super(OctBottleneck, self).__init__()
width = int(planes * (base_width / 64.)) * groups
if stride != 1 or inplanes != planes * self.expansion:
self.downsample = AdaptiveSequential(
OctaveConv(inplanes, planes * self.expansion, alpha_in, alpha_out,
1, stride=stride, bias=False),
fs_bn(planes * self.expansion, alpha_out)
)
else:
self.downsample = None
self.conv1 = OctaveConv(inplanes, width, alpha_in, alpha_out, 1, bias=False)
self.bn1 = fs_bn(width, alpha_out)
alpha_in, alpha_out = check_status(alpha_in, alpha_out)
self.conv2 = OctaveConv(width, width, alpha_in, alpha_out, 3, 1, stride,
groups, False)
self.bn2 = fs_bn(width, alpha_out)
self.conv3 = OctaveConv(width, planes * self.expansion, alpha_in, alpha_out,
1, bias=False)
self.bn3 = fs_bn(planes * self.expansion, alpha_out)
self.relu = fs_relu()
def forward(self, x_h, x_l=None):
r_h, r_l = x_h, x_l
x_h, x_l = self.conv1(x_h, x_l)
x_h, x_l = self.bn1(x_h, x_l)
x_h, x_l = self.relu(x_h, x_l)
x_h, x_l = self.conv2(x_h, x_l)
x_h, x_l = self.bn2(x_h, x_l)
x_h, x_l = self.relu(x_h, x_l)
x_h, x_l = self.conv3(x_h, x_l)
x_h, x_l = self.bn3(x_h, x_l)
if self.downsample:
r_h, r_l = self.downsample(r_h, r_l)
y_h, y_l = self.relu(x_h + r_h, None if x_l is None and r_l is None else x_l + r_l)
return y_h, y_l
class OctBottleneckV2(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, alpha_in, alpha_out, stride=1,
groups=1, base_width=64):
super().__init__()
width = int(planes * (base_width / 64.)) * groups
if stride != 1 or inplanes != planes * self.expansion:
self.downsample = AdaptiveSequential(
OctaveConv(inplanes, planes * self.expansion, alpha_in, alpha_out,
1, stride=stride, bias=False),
)
else:
self.downsample = None
self.bn1 = fs_bn(inplanes, alpha_in)
self.conv1 = OctaveConv(inplanes, width, alpha_in, alpha_out, 1, bias=False)
alpha_in, alpha_out = check_status(alpha_in, alpha_out)
self.bn2 = fs_bn(width, alpha_in)
self.conv2 = OctaveConv(width, width, alpha_in, alpha_out, 3, 1,
stride, groups, False)
self.bn3 = fs_bn(width, alpha_in)
self.conv3 = OctaveConv(width, planes * self.expansion, alpha_in, alpha_in,
1, bias=False)
self.relu = fs_relu()
def forward(self, x_h, x_l=None):
r_h, r_l = x_h, x_l
x_h, x_l = self.bn1(x_h, x_l)
x_h, x_l = self.relu(x_h, x_l)
if self.downsample:
r_h, r_l = self.downsample(x_h, x_l)
x_h, x_l = self.conv1(x_h, x_l)
x_h, x_l = self.bn2(x_h, x_l)
x_h, x_l = self.relu(x_h, x_l)
x_h, x_l = self.conv2(x_h, x_l)
x_h, x_l = self.bn3(x_h, x_l)
x_h, x_l = self.relu(x_h, x_l)
x_h, x_l = self.conv3(x_h, x_l)
y_h, y_l = x_h + r_h, None if x_l is None and r_l is None else x_l + r_l
return y_h, y_l
class OctResNet(nn.Module):
def __init__(self, alpha, layers, num_classes=1000, groups=1, width_per_group=64):
super(OctResNet, self).__init__()
self.inplanes = 64
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(alpha, 64, layers[0], 1, 'start')
self.layer2 = self._make_layer(alpha, 128, layers[1], 2)
self.layer3 = self._make_layer(alpha, 256, layers[2], 2)
self.layer4 = self._make_layer(alpha, 512, layers[3], 2, 'end')
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * OctBottleneck.expansion, num_classes)
def _make_layer(self, alpha, planes, blocks, stride=1, status='normal'):
assert status in ('start', 'normal', 'end')
layers = []
layers.append(OctBottleneck(self.inplanes, planes,
alpha if status != 'start' else 0,
alpha if status != 'end' else 0,
stride, self.groups, self.base_width))
self.inplanes = planes * OctBottleneck.expansion
alpha = 0 if status == 'end' else alpha
for _ in range(1, blocks):
layers.append(OctBottleneck(self.inplanes, planes, alpha, alpha, 1,
self.groups, self.base_width))
return AdaptiveSequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x_h, x_l = self.layer1(x)
x_h, x_l = self.layer2(x_h, x_l)
x_h, x_l = self.layer3(x_h, x_l)
x, _ = self.layer4(x_h, x_l)
x = self.avgpool(x)
x = x.reshape(x.size(0), -1)
x = self.fc(x)
return x
class OctResNetV2(nn.Module):
def __init__(self, alpha, layers, num_classes=1000, groups=1, width_per_group=64):
super().__init__()
self.inplanes = 64
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(alpha, 64, layers[0], 1, 'start')
self.layer2 = self._make_layer(alpha, 128, layers[1], 2)
self.layer3 = self._make_layer(alpha, 256, layers[2], 2)
self.layer4 = self._make_layer(alpha, 512, layers[3], 2, 'end')
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * OctBottleneck.expansion, num_classes)
def _make_layer(self, alpha, planes, blocks, stride=1, status='normal'):
assert status in ('start', 'normal', 'end')
layers = []
layers.append(OctBottleneckV2(self.inplanes, planes,
alpha if status != 'start' else 0,
alpha if status != 'end' else 0,
stride, self.groups, self.base_width))
self.inplanes = planes * OctBottleneckV2.expansion
alpha = 0 if status == 'end' else alpha
for _ in range(1, blocks):
layers.append(OctBottleneckV2(self.inplanes, planes, alpha, alpha, 1,
self.groups, self.base_width))
return AdaptiveSequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x_h, x_l = self.layer1(x)
x_h, x_l = self.layer2(x_h, x_l)
x_h, x_l = self.layer3(x_h, x_l)
x, _ = self.layer4(x_h, x_l)
x = self.avgpool(x)
x = x.reshape(x.size(0), -1)
x = self.fc(x)
return x
def oct_resnet50(alpha, **kwargs):
"""Constructs a OctResNet-50 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return OctResNet(alpha, [3, 4, 6, 3], **kwargs)
def oct_resnet50v2(alpha, **kwargs):
"""Constructs a OctResNet-50 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return OctResNetV2(alpha, [3, 4, 6, 3], **kwargs)
| 38.570136
| 91
| 0.572032
| 1,235
| 8,524
| 3.721457
| 0.096356
| 0.023934
| 0.03329
| 0.044386
| 0.917319
| 0.899478
| 0.870104
| 0.84443
| 0.839861
| 0.839861
| 0
| 0.035574
| 0.31077
| 8,524
| 220
| 92
| 38.745455
| 0.746723
| 0.028038
| 0
| 0.729885
| 0
| 0
| 0.012609
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 1
| 0.074713
| false
| 0
| 0.017241
| 0
| 0.178161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a8eba7d2923e75e9983c40897a8e21f1db105751
| 9,933
|
py
|
Python
|
label_studio_withoutsignin/io_storages/migrations/0003_localfilesimportstorage.py
|
DimaVinnitsa/label-studio
|
b33ef9edc5efef5f5a073e3a457832278afbf2cf
|
[
"Apache-2.0"
] | null | null | null |
label_studio_withoutsignin/io_storages/migrations/0003_localfilesimportstorage.py
|
DimaVinnitsa/label-studio
|
b33ef9edc5efef5f5a073e3a457832278afbf2cf
|
[
"Apache-2.0"
] | null | null | null |
label_studio_withoutsignin/io_storages/migrations/0003_localfilesimportstorage.py
|
DimaVinnitsa/label-studio
|
b33ef9edc5efef5f5a073e3a457832278afbf2cf
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.4 on 2021-03-18 14:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("projects", "0008_auto_20210314_1840"),
("tasks", "0005_auto_20210309_1239"),
("io_storages", "0002_auto_20210311_0530"),
]
operations = [
migrations.CreateModel(
name="LocalFilesMixin",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"path",
models.TextField(
blank=True, help_text="Local path", null=True, verbose_name="path"
),
),
(
"regex_filter",
models.TextField(
blank=True,
help_text="Regex for filtering objects",
null=True,
verbose_name="regex_filter",
),
),
(
"use_blob_urls",
models.BooleanField(
default=False,
help_text="Interpret objects as BLOBs and generate URLs",
verbose_name="use_blob_urls",
),
),
],
),
migrations.CreateModel(
name="LocalFilesExportStorage",
fields=[
(
"localfilesmixin_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="io_storages.localfilesmixin",
),
),
(
"title",
models.CharField(
help_text="Cloud storage title",
max_length=256,
null=True,
verbose_name="title",
),
),
(
"description",
models.TextField(
blank=True,
help_text="Cloud storage description",
null=True,
verbose_name="description",
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True, help_text="Creation time", verbose_name="created at"
),
),
(
"last_sync",
models.DateTimeField(
blank=True,
help_text="Last sync finished time",
null=True,
verbose_name="last sync",
),
),
(
"last_sync_count",
models.PositiveIntegerField(
blank=True,
help_text="Count of tasks synced last time",
null=True,
verbose_name="last sync count",
),
),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="io_storages_localfilesexportstorages",
to="projects.project",
),
),
],
options={
"abstract": False,
},
bases=("io_storages.localfilesmixin", models.Model),
),
migrations.CreateModel(
name="LocalFilesImportStorage",
fields=[
(
"localfilesmixin_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="io_storages.localfilesmixin",
),
),
(
"title",
models.CharField(
help_text="Cloud storage title",
max_length=256,
null=True,
verbose_name="title",
),
),
(
"description",
models.TextField(
blank=True,
help_text="Cloud storage description",
null=True,
verbose_name="description",
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True, help_text="Creation time", verbose_name="created at"
),
),
(
"last_sync",
models.DateTimeField(
blank=True,
help_text="Last sync finished time",
null=True,
verbose_name="last sync",
),
),
(
"last_sync_count",
models.PositiveIntegerField(
blank=True,
help_text="Count of tasks synced last time",
null=True,
verbose_name="last sync count",
),
),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="io_storages_localfilesimportstorages",
to="projects.project",
),
),
],
options={
"abstract": False,
},
bases=("io_storages.localfilesmixin", models.Model),
),
migrations.CreateModel(
name="LocalFilesImportStorageLink",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("key", models.TextField(help_text="External link key", verbose_name="key")),
(
"object_exists",
models.BooleanField(
default=True,
help_text="Whether object under external link still exists",
verbose_name="object exists",
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True, help_text="Creation time", verbose_name="created at"
),
),
(
"task",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="io_storages_localfilesimportstoragelink",
to="tasks.task",
),
),
(
"storage",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="links",
to="io_storages.localfilesimportstorage",
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="LocalFilesExportStorageLink",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"object_exists",
models.BooleanField(
default=True,
help_text="Whether object under external link still exists",
verbose_name="object exists",
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True, help_text="Creation time", verbose_name="created at"
),
),
(
"annotation",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="io_storages_localfilesexportstoragelink",
to="tasks.annotation",
),
),
(
"storage",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="links",
to="io_storages.localfilesexportstorage",
),
),
],
options={
"abstract": False,
},
),
]
| 35.475
| 95
| 0.370784
| 602
| 9,933
| 5.920266
| 0.200997
| 0.064815
| 0.047138
| 0.053311
| 0.742144
| 0.742144
| 0.724186
| 0.724186
| 0.724186
| 0.724186
| 0
| 0.015422
| 0.549582
| 9,933
| 279
| 96
| 35.602151
| 0.78118
| 0.00453
| 0
| 0.754579
| 1
| 0
| 0.154157
| 0.050273
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.03663
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d1172f9106ec2fafa7107b06a33accccec65ce3c
| 1,447
|
py
|
Python
|
tests/test_downsample_xy_half_median.py
|
elsandal/pyclesperanto_prototype
|
7bda828813b86b44b63d73d5e8f466d9769cded1
|
[
"BSD-3-Clause"
] | 64
|
2020-03-18T12:11:22.000Z
|
2022-03-31T08:19:18.000Z
|
tests/test_downsample_xy_half_median.py
|
elsandal/pyclesperanto_prototype
|
7bda828813b86b44b63d73d5e8f466d9769cded1
|
[
"BSD-3-Clause"
] | 148
|
2020-05-14T06:14:11.000Z
|
2022-03-26T15:02:31.000Z
|
tests/test_downsample_xy_half_median.py
|
elsandal/pyclesperanto_prototype
|
7bda828813b86b44b63d73d5e8f466d9769cded1
|
[
"BSD-3-Clause"
] | 16
|
2020-05-31T00:53:44.000Z
|
2022-03-23T13:20:57.000Z
|
import pyclesperanto_prototype as cle
import numpy as np
def test_downsample_xy_by_half_median():
test1 = cle.push(np.asarray([
[-3, 0, 1, 2],
[0, 5, 2, 7],
[0, 1, 3, 4],
[1, 6, 4, 8]
]))
reference = cle.push(np.asarray([
[0, 2],
[1, 4]
]))
result = cle.downsample_slice_by_slice_half_median(test1)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
assert (np.array_equal(a, b))
def test_resample_downsample_3d():
test1 = cle.push(np.asarray([
[
[-3, 0, 1, 2],
[0, 5, 2, 7],
[0, 1, 3, 4],
[1, 6, 4, 8]
],[
[-3, 0, 1, 2],
[0, 5, 2, 7],
[0, 1, 3, 4],
[1, 6, 4, 8]
],[
[5, 5, 5, 5],
[5, 5, 5, 5],
[5, 5, 5, 5],
[5, 5, 5, 5]
],[
[5, 5, 5, 5],
[5, 5, 5, 5],
[5, 5, 5, 5],
[5, 5, 5, 5]
]
]))
reference = cle.push(np.asarray([
[
[0, 2],
[1, 4]
], [
[0, 2],
[1, 4]
], [
[5, 5],
[5, 5]
], [
[5, 5],
[5, 5]
]
]))
result = cle.downsample_slice_by_slice_half_median(test1)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
assert (np.array_equal(a, b))
| 19.821918
| 61
| 0.36282
| 191
| 1,447
| 2.638743
| 0.198953
| 0.150794
| 0.214286
| 0.269841
| 0.781746
| 0.781746
| 0.781746
| 0.781746
| 0.765873
| 0.650794
| 0
| 0.134271
| 0.459572
| 1,447
| 72
| 62
| 20.097222
| 0.51023
| 0
| 0
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 1
| 0.032258
| false
| 0
| 0.032258
| 0
| 0.064516
| 0.032258
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d143f6f158970d3403302aa2c2314cc1c4bf679e
| 8,241
|
py
|
Python
|
model/A2J/a2j_utilities/a2j_branchs.py
|
Rebel-C/NVidia_Jetson_HandPose
|
41e485fae189a85624686dcf644883868540cf1d
|
[
"MIT"
] | null | null | null |
model/A2J/a2j_utilities/a2j_branchs.py
|
Rebel-C/NVidia_Jetson_HandPose
|
41e485fae189a85624686dcf644883868540cf1d
|
[
"MIT"
] | null | null | null |
model/A2J/a2j_utilities/a2j_branchs.py
|
Rebel-C/NVidia_Jetson_HandPose
|
41e485fae189a85624686dcf644883868540cf1d
|
[
"MIT"
] | null | null | null |
import os
import sys
import torch.nn as nn
# PROJ ROOT DIR
DIR_PATH = os.path.dirname(os.path.abspath(__file__)) # a2j_utilities
A2J_PATH = os.path.join(DIR_PATH, os.path.pardir) # A2J
MODEL_PATH = os.path.join(A2J_PATH, os.path.pardir) # model
ROOT_PATH = os.path.join(MODEL_PATH, os.path.pardir) # root
sys.path.append(ROOT_PATH)
# Import Project Library
from model.A2J.back_bone.resnet import get_ResNet
class DepthRegression(nn.Module):
"""
Depth regression module
regress the depth of the joints from the anchor points
"""
def __init__(self, input_channels, output_channels=256, num_anchors=16, num_joints=18):
"""
Class initializer
:param input_channels: number of input channels
:param output_channels: number of output channels
:param num_anchors: total number of anchor points
:param num_joints: total number of joints to predict
"""
super(DepthRegression, self).__init__()
self.num_joints = num_joints
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(input_channels, output_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(output_channels)
self.conv2 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(output_channels)
self.conv3 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(output_channels)
self.conv4 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(output_channels)
self.output = nn.Conv2d(output_channels, num_anchors*num_joints, kernel_size=3, padding=1)
# Activation Function
self.relu = nn.LeakyReLU(inplace=True)
self._initialize()
def _initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
# (N, inChannels, 10, 9)
out = self.conv1(x) # (N, 256, 10, 9)
out = self.bn1(out) # (N, 256, 10, 9)
out = self.relu(out) # (N, 256, 10, 9)
out = self.conv2(out) # (N, 256, 10, 9)
out = self.bn2(out) # (N, 256, 10, 9)
out = self.relu(out) # (N, 256, 10, 9)
out = self.conv3(out) # (N, 256, 10, 9)
out = self.bn3(out) # (N, 256, 10, 9)
out = self.relu(out) # (N, 256, 10, 9)
out = self.conv4(out) # (N, 256, 10, 9)
out = self.bn4(out) # (N, 256, 10, 9)
out = self.relu(out) # (N, 256, 10, 9)
out = self.output(out) # (N, num_joints*num_anchors, 10, 9)
out = out.permute(0, 3, 2, 1) # (N, 9, 10, num_joints*num_anchors)
batch_size, width, height, channels = out.shape
out = out.view(batch_size, width, height, self.num_anchors, self.num_joints) # (N, 9, 10, num_anchors, num_joints)
return out.contiguous().view(batch_size, -1, self.num_joints) # (N, 9*10*num_anchors, num_joint)
class OffsetRegression(nn.Module):
"""
Offset Regression class
estimate the joint offsets from the anchorpoints
"""
def __init__(self, input_channels, output_channels=256, num_anchors=16, num_joints=18):
"""
Class initializer
:param input_channels: number of input channels
:param output_channels: number of output channels
:param num_anchors: total number of anchor points
:param num_joints: total number of joints to predict
"""
super(OffsetRegression, self).__init__()
self.num_anchors = num_anchors
self.num_joints = num_joints
self.conv1 = nn.Conv2d(input_channels, output_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(output_channels)
self.conv2 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(output_channels)
self.conv3 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(output_channels)
self.conv4 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(output_channels)
self.output = nn.Conv2d(output_channels, num_anchors*num_joints*2, kernel_size=3, padding=1)
# Activation Function
self.relu = nn.LeakyReLU(inplace=True)
self._initialize()
def _initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x) # (N, 256, 10, 9)
out = self.bn1(out) # (N, 256, 10, 9)
out = self.relu(out) # (N, 256, 10, 9)
out = self.conv2(out) # (N, 256, 10, 9)
out = self.bn2(out) # (N, 256, 10, 9)
out = self.relu(out) # (N, 256, 10, 9)
out = self.conv3(out) # (N, 256, 10, 9)
out = self.bn3(out) # (N, 256, 10, 9)
out = self.relu(out) # (N, 256, 10, 9)
out = self.conv4(out) # (N, 256, 10, 9)
out = self.bn4(out) # (N, 256, 10, 9)
out = self.relu(out) # (N, 256, 10, 9)
out = self.output(out) # (N, num_joints*num_anchors*2, 10, 9)
out = out.permute(0, 3, 2, 1) # (N, 9, 10, num_joints*num_anchors*2)
batch_size, width, height, channels = out.shape
out = out.view(batch_size, width, height, self.num_anchors, self.num_joints, 2) # (N, 9, 10, num_anchors, num_joints, 2)
return out.contiguous().view(batch_size, -1, self.num_joints, 2) # (N, 9*10*num_anchors, num_joints, 2)
class JointClassification(nn.Module):
"""
Joint classification class
"""
def __init__(self, input_channels, output_channels=256, num_anchors=16, num_joints=18):
"""
Class initializer
:param input_channels: number of input channels
:param output_channels: number of output channels
:param num_anchors: total number of anchor points
:param num_joints: total number of joints to predict
"""
super(JointClassification, self).__init__()
self.num_anchors = num_anchors
self.num_joints = num_joints
self.conv1 = nn.Conv2d(input_channels, output_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(output_channels)
self.conv2 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(output_channels)
self.conv3 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(output_channels)
self.conv4 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(output_channels)
self.output = nn.Conv2d(output_channels, num_anchors*num_joints, kernel_size=3, padding=1)
# Activation Function
self.relu = nn.LeakyReLU(inplace=True)
self._initialize()
def _initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv4(out)
out = self.bn4(out)
out = self.relu(out)
out = self.output(out)
out = out.permute(0, 3, 2, 1)
batch_size, width, height, channels = out.shape
out = out.view(batch_size, width, height, self.num_anchors, self.num_joints)
return out.contiguous().view(batch_size, -1, self.num_joints)
| 36.464602
| 128
| 0.619585
| 1,141
| 8,241
| 4.311131
| 0.101665
| 0.128075
| 0.032934
| 0.050823
| 0.859118
| 0.858508
| 0.849156
| 0.826184
| 0.826184
| 0.819882
| 0
| 0.054554
| 0.259313
| 8,241
| 225
| 129
| 36.626667
| 0.751311
| 0.201796
| 0
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.030303
| 0
| 0.143939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0f36e954ea7bb6f76c01393b2d60f36af4f58cad
| 194
|
py
|
Python
|
crazycar/agents/__init__.py
|
nutorbit/crazycar
|
f265c3f799525774c66529bb39ccc8380c2a3b2e
|
[
"MIT"
] | 1
|
2020-03-25T10:16:50.000Z
|
2020-03-25T10:16:50.000Z
|
crazycar/agents/__init__.py
|
nutorbit/CrazyCar
|
f265c3f799525774c66529bb39ccc8380c2a3b2e
|
[
"MIT"
] | 3
|
2021-06-08T21:12:21.000Z
|
2022-03-12T00:27:30.000Z
|
crazycar/agents/__init__.py
|
nutorbit/CrazyCar
|
f265c3f799525774c66529bb39ccc8380c2a3b2e
|
[
"MIT"
] | null | null | null |
from crazycar.agents.base import BaseAgent
from crazycar.agents.full_agent import Racecar
from crazycar.agents.image_agent import ImageAgent
from crazycar.agents.sensor_agent import SensorAgent
| 38.8
| 52
| 0.876289
| 27
| 194
| 6.185185
| 0.481481
| 0.287425
| 0.431138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082474
| 194
| 4
| 53
| 48.5
| 0.938202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0f4171c0345608e4bfd27bdf972493e2ff7a24e0
| 186
|
py
|
Python
|
models/dao/aps/__init__.py
|
muhammetbolat/pythondataintegrator
|
5b274db8d39ca1340d535a500f04f6e734f1d54d
|
[
"MIT"
] | 14
|
2020-12-19T15:06:13.000Z
|
2022-01-12T19:52:17.000Z
|
models/dao/aps/__init__.py
|
muhammetbolat/pythondataintegrator
|
5b274db8d39ca1340d535a500f04f6e734f1d54d
|
[
"MIT"
] | 43
|
2021-01-06T22:05:22.000Z
|
2022-03-10T10:30:30.000Z
|
models/dao/aps/__init__.py
|
muhammetbolat/pythondataintegrator
|
5b274db8d39ca1340d535a500f04f6e734f1d54d
|
[
"MIT"
] | 4
|
2020-12-18T23:10:09.000Z
|
2021-04-02T13:03:12.000Z
|
from models.dao.aps.ApSchedulerEvent import ApSchedulerEvent
from models.dao.aps.ApSchedulerJobEvent import ApSchedulerJobEvent
from models.dao.aps.ApSchedulerJob import ApSchedulerJob
| 37.2
| 66
| 0.88172
| 21
| 186
| 7.809524
| 0.380952
| 0.182927
| 0.237805
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069892
| 186
| 4
| 67
| 46.5
| 0.947977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0f774173c263eb8783a20804ee54aedfb5731aab
| 171
|
py
|
Python
|
cornflow-dags/DAG/vrp/solvers/__init__.py
|
ggsdc/corn
|
4c17c46a70f95b8882bcb6a55ef7daa1f69e0456
|
[
"MIT"
] | 2
|
2020-07-09T20:58:47.000Z
|
2020-07-20T20:40:46.000Z
|
cornflow-dags/DAG/vrp/solvers/__init__.py
|
baobabsoluciones/cornflow
|
bd7cae22107e5fe148704d5f41d4f58f9c410b40
|
[
"Apache-2.0"
] | 2
|
2022-03-31T08:42:10.000Z
|
2022-03-31T12:05:23.000Z
|
cornflow-dags/DAG/vrp/solvers/__init__.py
|
ggsdc/corn
|
4c17c46a70f95b8882bcb6a55ef7daa1f69e0456
|
[
"MIT"
] | null | null | null |
from .model import Algorithm
from .model_ortools import Algorithm as ORT_Algorithm
from .modelClosestNeighbor import Algorithm as Heuristic
from .modelMIP import modelMIP
| 34.2
| 56
| 0.859649
| 22
| 171
| 6.590909
| 0.454545
| 0.310345
| 0.234483
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116959
| 171
| 4
| 57
| 42.75
| 0.960265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0f929a8caff469c4e14d85e4884a3791f4e937cf
| 3,912
|
py
|
Python
|
tests/terraform/checks/resource/azure/test_SynapseWorkspaceEnablesManagedVirtualNetworks.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
tests/terraform/checks/resource/azure/test_SynapseWorkspaceEnablesManagedVirtualNetworks.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
tests/terraform/checks/resource/azure/test_SynapseWorkspaceEnablesManagedVirtualNetworks.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
import unittest
import hcl2
from checkov.terraform.checks.resource.azure.SynapseWorkspaceEnablesManagedVirtualNetworks import check
from checkov.common.models.enums import CheckResult
class TestSynapseWorkspaceEnablesManagedVirtualNetworks(unittest.TestCase):
def test_failure_1(self):
hcl_res = hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "H@Sh1CoR3!"
managed_virtual_network_enabled = false
aad_admin {
login = "AzureAD Admin"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_synapse_workspace']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_2(self):
hcl_res = hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "H@Sh1CoR3!"
aad_admin {
login = "AzureAD Admin"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_synapse_workspace']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "H@Sh1CoR3!"
managed_virtual_network_enabled = true
aad_admin {
login = "AzureAD Admin"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_synapse_workspace']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 44.454545
| 105
| 0.565184
| 341
| 3,912
| 6.111437
| 0.217009
| 0.046065
| 0.066219
| 0.086372
| 0.850768
| 0.850768
| 0.850768
| 0.850768
| 0.850768
| 0.850768
| 0
| 0.0852
| 0.360941
| 3,912
| 87
| 106
| 44.965517
| 0.7484
| 0
| 0
| 0.706667
| 0
| 0
| 0.738497
| 0.280419
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.04
| false
| 0.053333
| 0.053333
| 0
| 0.106667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
7e2767c09d9f67e859865fe299d88481dd1af526
| 1,002
|
py
|
Python
|
qcportal/outputstore/test_models.py
|
bennybp/QCPortal
|
c1d0f4e239c9363875680e93b4357c1680d6825c
|
[
"BSD-3-Clause"
] | null | null | null |
qcportal/outputstore/test_models.py
|
bennybp/QCPortal
|
c1d0f4e239c9363875680e93b4357c1680d6825c
|
[
"BSD-3-Clause"
] | null | null | null |
qcportal/outputstore/test_models.py
|
bennybp/QCPortal
|
c1d0f4e239c9363875680e93b4357c1680d6825c
|
[
"BSD-3-Clause"
] | 1
|
2022-03-18T16:37:54.000Z
|
2022-03-18T16:37:54.000Z
|
import pytest
from .models import OutputStore
def test_models_outputstore_fail():
# data is a string, but compression is not none
with pytest.raises(ValueError, match=r"Compression is set, but input is a"):
OutputStore(**{"output_type": "stdout", "data": "123", "compression": "bzip2"})
# data is a dict, but compression is not none
with pytest.raises(ValueError, match=r"Compression is set, but input is a"):
OutputStore(**{"output_type": "stdout", "data": {"123": 123}, "compression": "bzip2"})
# data is a string, but compression level is not 0
with pytest.raises(ValueError, match=r"Compression level is set, but input is a"):
OutputStore(**{"output_type": "stdout", "data": "123", "compression_level": 1})
# data is a string, but compression level is not 0
with pytest.raises(ValueError, match=r"Compression level is set, but input is a"):
OutputStore(**{"output_type": "stdout", "data": {"123": 123}, "compression_level": 1})
| 45.545455
| 94
| 0.674651
| 139
| 1,002
| 4.798561
| 0.230216
| 0.035982
| 0.041979
| 0.155922
| 0.875562
| 0.875562
| 0.830585
| 0.830585
| 0.830585
| 0.830585
| 0
| 0.02934
| 0.183633
| 1,002
| 21
| 95
| 47.714286
| 0.786064
| 0.186627
| 0
| 0.363636
| 0
| 0
| 0.382716
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| true
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7e4777e5696cdcb4157d79ca7d647c960e722115
| 32,070
|
py
|
Python
|
tests/ml/adhoc_compare.py
|
brenobeirigo/mod
|
e860198887f6dcb9ad9865830cc5f97041948bdc
|
[
"MIT"
] | null | null | null |
tests/ml/adhoc_compare.py
|
brenobeirigo/mod
|
e860198887f6dcb9ad9865830cc5f97041948bdc
|
[
"MIT"
] | null | null | null |
tests/ml/adhoc_compare.py
|
brenobeirigo/mod
|
e860198887f6dcb9ad9865830cc5f97041948bdc
|
[
"MIT"
] | null | null | null |
import os
import sys
# Adding project folder to import modules
root = os.getcwd().replace("\\", "/")
sys.path.append(root)
import mod.env.config as conf
from mod.env.config import ConfigNetwork
import pandas as pd
from copy import deepcopy
from collections import defaultdict
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks")
context = "paper"
fig_format = "pdf"
def movingaverage(data, w, start=0, start_den=2):
new_data = np.zeros(len(data))
for i in range(len(data)):
if i < start:
new_data[i] = sum(data[i : i + int(w / start_den)]) / int(
w / start_den
)
continue
if i + w < len(data):
new_data[i] = sum(data[i : i + w]) / w
else:
new_data[i] = sum(data[i - w : i]) / w
return new_data
if __name__ == "__main__":
adhoc_compare = dict()
adhoc_compare_labels = dict()
colors = dict()
markers = dict()
linewidth = dict()
test_label = "hire500"
# test_label = "rebalance"
# test_label = "pavfav"
# test_label = "exploration"
# test_label = "flood"
# test_label = "unlimited"
# test_label = "policy"
# test_label = "b"
# adhoc_compare["p"] = [
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_1.00_B_2.40_10.00_0.00_0.00_0.00",
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_4.80_1.00_B_2.40_10.00_0.00_2.40_0.00",
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_0.00_1.00_B_2.40_10.00_5.00_0.00_0.00",
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_1.00_B_2.40_15.00_0.00_0.00_0.00",
# ]
d = "0.01"
adhoc_compare["hire500"] = [
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[5]=(10102, 10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(10202, 32303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[4]=(10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(10303, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(32202, 33303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[5]=(10102, 10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(10202, 32303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[4]=(10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(10303, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(32202, 33303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[5]=(10102, 10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10202, 32303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[4]=(10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10303, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(32202, 33303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10202, 32303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[4]=(10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10303, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
]
colors["hire500"] = ["k", "g", "r", "b", "k", "g", "r", "b", "k", "g", "r", "b"]
markers["hire500"] = [None, None, None, None, "o", "o", "o","o", "x","x","x","x"]
adhoc_compare_labels["hire500"] = [
f"{d} - (10102, 10203, 1030-, 32-0-, 33-0-)",
f"{d} - (10202, 32302, 33-0-)",
f"{d} - (10-02, 32-02, 33-0-)",
f"{d} - (10-0-, 32-0-, 33-0-)",
f"{d} - (10203, 1030-, 32-0-, 33-0-)",
f"{d} - (10303, 32-0-, 33-0-)",
f"{d} - (32202, 33303, 33-0-)",
"0.10 - (10102, 10203, 1030-, 32-0-, 33-0-)",
"0.10 - (10202, 32302, 33-0-)",
"0.10 - (10-02, 32-02, 33-0-)",
"0.10 - (10-0-, 32-0-, 33-0-)",
"0.10 - (10203, 1030-, 32-0-, 33-0-)",
"0.10 - (10303, 32-0-, 33-0-)",
"0.10 - (32202, 33303, 33-0-)",
"0.10 - (10102, 10203, 1030-, 32-0-, 33-0-)",
"0.10 - (10202, 32302, 33-0-)",
"0.10 - (10-02, 32-02, 33-0-)",
"0.10 - (10-0-, 32-0-, 33-0-)",
"0.10 - (10203, 1030-, 32-0-, 33-0-)",
"0.10 - (10303, 32-0-, 33-0-)",
"0.10 - (32202, 33303, 33-0-)",
]
# adhoc_compare["hire500m"] = [
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-01, 32-02, 33-03)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-01, 32-02, 33-03)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-02, 32-0-, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-02, 32-0-, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[4]=(10-02, 10-03, 32-0-, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[4]=(10-02, 10-03, 32-0-, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# ]
# adhoc_compare_labels["hire500m"] = [
# "500[M] - (10-01, 32-02, 33-03) - 1",
# "500[M] - (10-01, 32-02, 33-03) - 2",
# "500[M] - (10-02, 32-0-, 33-0-) - 1",
# "500[M] - (10-02, 32-0-, 33-0-) - 2",
# "500[M] - (10-02, 32-03, 33-0-) - 1",
# "500[M] - (10-02, 32-03, 33-0-) - 2",
# "500[M] - (10-02, 10-03, 32-0-, 33-0-) - 1",
# "500[M] - (10-02, 10-03, 32-0-, 33-0-) - 2",
# ]
adhoc_compare["b"] = [
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_0.00_B_2.40_10.00_0.00_0.00_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_4.80_0.00_B_2.40_10.00_0.00_2.40_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_0.00_0.00_B_2.40_10.00_5.00_0.00_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_7.20_0.00_B_2.40_10.00_5.00_4.80_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_9.60_0.00_B_2.40_10.00_5.00_7.20_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_0.00_B_2.40_15.00_0.00_0.00_1.00",
]
adhoc_compare_labels["b"] = [
r"10min (max. pk. delay)",
r"10min (max. pk. delay) + 1 $\times$ RP",
r"10min (max. pk. delay) + 5min (pen. tolerance)",
r"10min (max. pk. delay) + 5min (pen. tolerance) + 1 $\times$ RP",
r"10min (max. pk. delay) + 5min (pen. tolerance) + 2 $\times$ RP",
r"10min (max. pk. delay) + 5min (pen. tolerance) + 3 $\times$ RP",
r"15min (max. pk. delay) + 5min (pen. tolerance)",
]
adhoc_compare_labels["sensitivity_analysis"] = [
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_9.60_10.00_0.00_0.00_1.00_B_7.20_15.00_0.00_0.0,0_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_9.60_10.00_0.00_0.00_0.00_B_7.20_15.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_9.60_5.00_0.00_0.00_1.00_B_7.20_10.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_9.60_5.00_0.00_0.00_0.00_B_7.20_10.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_7.20_10.00_0.00_0.00_1.00_B_4.80_15.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_7.20_10.00_0.00_0.00_0.00_B_4.80_15.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_7.20_5.00_0.00_0.00_1.00_B_4.80_10.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_7.20_5.00_0.00_0.00_0.00_B_4.80_10.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_1.00_B_2.40_15.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_0.00_B_2.40_15.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_1.00_B_2.40_10.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_0.00_B_2.40_10.00_0.00_0.00_1.00",
]
adhoc_compare["a"] = [
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_1.00_B_2.40_10.00_0.00_0.00_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_4.80_1.00_B_2.40_10.00_0.00_2.40_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_0.00_1.00_B_2.40_10.00_5.00_0.00_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_7.20_1.00_B_2.40_10.00_5.00_4.80_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_9.60_1.00_B_2.40_10.00_5.00_7.20_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_1.00_B_2.40_15.00_0.00_0.00_0.00",
]
adhoc_compare_labels["a"] = [
"5",
"5+P",
"5+5",
"5+5+2P",
"5+5+3P",
"5+5+4P",
"10",
]
adhoc_compare["penalty"] = [
"baselineB10_disable_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-4, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"baselineB10_pen_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-4, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"baselineB10_pen_rej_pen_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-4, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["penalty"] = [
"10 min. pickup delay",
"10 min. pickup delay + 5 min. tolerance",
"10 min. pickup delay + 5 min. tolerance + rejection penalty",
]
# ################################################################ #
# Discount function ############################################## #
# ################################################################ #
adhoc_compare["penalize"] = [
"base_LIN_V=0300-0000(R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([0-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_0.00_B_2.40_10.00_0.00_0.00_1.00",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([2-8][tabu=00])[L(05)]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([2-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([3-8][tabu=00])[L(05)]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([3-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["penalize"] = [
"Adjacent neighbors (30s)",
r"8 $\times$ RC1",
r"8 $\times$ RC1 [P]",
r"8 $\times$ RC5",
r"8 $\times$ RC5 [P]",
r"8 $\times$ RC10",
r"8 $\times$ RC10 [P]",
]
colors["penalize"] = ["k", "g", "g", "r", "r", "b", "b"]
markers["penalize"] = [None, None, "o", None, "o", None, "o"]
linewidth["penalize"] = [1, 1, 1, 1, 1, 1, 1]
# ################################################################ #
# Rebalance ###################################################### #
# ################################################################ #
adhoc_compare["rebalance"] = [
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 3-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"far_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4, 3-2][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["rebalance"] = [
r"8 $\times$ RC1",
r"8 $\times$ RC1 + 4 $\times$ RC5",
r"8 $\times$ RC1 + 4 $\times$ RC10",
r"8 $\times$ RC1 + 4 $\times$ RC5 + 2 $\times$ RC10",
]
linewidth["rebalance"] = [1, 1, 1, 1, 1, 1, 1]
markers["rebalance"] = [None, "o", "x", "D"]
colors["rebalance"] = [
"k",
"g",
"r",
"b",
"magenta",
"gold",
"gray",
"pink",
"#cab2d6",
]
# ################################################################ #
# Max. number of cars ############################################ #
# ################################################################ #
adhoc_compare["flood"] = [
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"far_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(02)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(10)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["flood"] = [
# "unlimited",
"2",
"5",
"10",
]
colors["flood"] = ["r", "k", "g", "r"]
linewidth["flood"] = [1, 1, 1, 1, 1, 1, 1]
markers["flood"] = ["x", None, "o"]
# ################################################################ #
# Max. number of cars (unlimited)################################# #
# ################################################################ #
adhoc_compare["unlimited"] = [
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["unlimited"] = [
r"8 $\times$ RC1",
r"8 $\times$ RC1 (unlimited)",
r"8 $\times$ RC1 + 4 $\times$ RC5",
r"8 $\times$ RC1 + 4 $\times$ RC5 (unlimited)",
]
colors["unlimited"] = ["k", "k", "r", "r"]
linewidth["unlimited"] = [1, 1, 1, 1, 1, 1, 1]
markers["unlimited"] = [None, "o", None, "o"]
adhoc_compare["policy"] = [
"myopic_[MY]_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# "myopic_[RA]_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing_hire_LIN_cars=0300-0200(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["policy"] = [
"Myopic",
# "Random rebalance",
"VFA (300 PAVs)",
"VFA (300 PAVs + 200 FAVs)",
]
# Rebalance
# adhoc_compare["flood"] = [
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# ]
# adhoc_compare_labels["flood"] = [
# "8 x RC1",
# "8 x RC1 (unlimited)",
# "8 x RC1 + 4 x RC5",
# "8 x RC1 + 4 x RC5 (unlimited)",
# ]
# adhoc_compare_labels["avoidflood"] = [
# ]
adhoc_compare["exploration"] = [
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing_[X]LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing0.25_[X]LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"far_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][thompson=06][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing_[X]LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-16][thompson=08][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["exploration"] = [
"8 x RC1 + 4 x RC5",
# "16 x RC1",
# "16 x RC1 (annealing)",
# "16 x RC1 (annealing thompson 8)",
"8 x RC1 (annealing)",
"8 x RC1 (annealing 0.25)",
"8 x RC1 + 4 x RC5 (thompson 6)",
"16 x RC1 (thompson 6)",
]
adhoc_compare["pavfav"] = [
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing_hire_LIN_cars=0300-0200(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["pavfav"] = ["300 PAVs", "300 PAVs + 200 FAVs"]
# adhoc_compare_labels = [
# "Rebalance to closest nodes",
# "Rebalance to closest nodes + 1min RCs",
# "Rebalance to 1min RCs",
# "Rebalance to 1min RCs [P]",
# "Rebalance to 1min (8), 5min(4), 10min(2) RCs [P]",
# "Rebalance to 1min (8), 5min(4), 10min(2) RCs [P] + annealing",
# "Rebalance to 1min RCs [P] + annealing",
# "Rebalance to 1min RCs [P] + annealing (0.1)",
# # "Annealing",
# # "Annealing + Thompson (0.5)",
# # "Annealing + Thompson (0.2)",
# ]
colors["policy"] = ["k", "r", "g"]
markers["policy"] = [None, "x", "D"]
colors["pavfav"] = ["k", "r"]
colors["exploration"] = [
"k",
"g",
"r",
"b",
"magenta",
"gold",
"gray",
"pink",
"#cab2d6",
]
# linewidth["penalize"] = [2, 2, 1, 2, 1, 2, 1]
# linewidth["policy"] = [1, 1, 1, 1, 1, 1, 1]
linewidth["pavfav"] = [1, 1, 1, 1, 1, 1, 1]
markers["pavfav"] = [None, "o", "x"]
linewidth["exploration"] = [1, 1, 1, 1, 1, 1, 1]
# markers["exploration"] = [None, "o", "x"]
colors_default = [
"k",
"g",
"r",
"b",
"#fb9a99",
"#e31a1c",
"#fdbf6f",
"#ff7f00",
"#cab2d6",
]
legend_pos = dict()
legend_pos["policy"] = "center right"
SL = "Requests serviced"
OF = "Objective function"
TIME = "Time(s)"
XLABEL = "Iteration"
window = 50
ITERATIONS = 1000
markers_default = [None] * len(adhoc_compare[test_label])
# markers = [None, "o", "*", "x", "|", None]
shadow = False
dpi = 1200
d = dict()
d_plot = defaultdict(list)
for exp, sum_label in zip(
adhoc_compare[test_label], adhoc_compare_labels[test_label]
):
# folder = "O:/phd/output_paper/"
# folder = conf.FOLDER_OUTPUT
# path_all_stats = folder + exp + "/overall_stats.csv"
# config_exp = ConfigNetwork()
# Comparison is drawn from training
path_all_stats = conf.FOLDER_OUTPUT + exp + "/adp/train/overall_stats.csv"
print(sum_label, path_all_stats)
config_exp = ConfigNetwork()
try:
# config_exp.load(folder + exp + "/exp_settings.json")
config_exp.load(conf.FOLDER_OUTPUT + exp + "/exp_settings.json")
df = pd.read_csv(path_all_stats)
except Exception as e:
print(f"Cannot load file!Exception: \"{e}\"")
continue
print(sum_label)
# spatiotemporal_levels = exp[2].get_levels()
# neighbors = exp[2].get_reb_neighbors()
id_label = exp # spatiotemporal_levels + neighbors
d["reward_" + id_label] = df["Total reward"][:ITERATIONS]
d["service_rate_" + id_label] = df["Service rate"][:ITERATIONS]
d["time_" + id_label] = df["time"][:ITERATIONS]
d_plot[OF].append(
(id_label, sum_label, df["Total reward"][:ITERATIONS].values)
)
d_plot[SL].append(
(id_label, sum_label, df["Service rate"][:ITERATIONS].values)
)
# d_plot["Time(s)"].append(
# (id_label, sum_label, df["time"][:ITERATIONS])
# )
# print(f" - {id_label}")\
yticks = dict()
yticks_labels = dict()
yticks[OF] = np.linspace(0, 600, 8)
yticks[SL] = np.linspace(0.5, 0.7, 5)
# Policy
# yticks[OF] = np.linspace(13000, 19000, 13)
# yticks[SL] = np.linspace(0.5, 0.95, 10)
# yticks[OF] = np.linspace(13000, 19000, 7)
# yticks[SL] = np.linspace(0.5, 0.95, 8)
yticks_labels[SL] = [f"{s:3.0%}" for s in yticks[SL]]
yticks_labels[OF] = [f"{p:,.0f}" for p in yticks[OF]]
yticks[TIME] = np.linspace(0, 300, 5)
yticks_labels["Time(s)"] = np.linspace(0, 300, 5)
df_outcome = pd.DataFrame(d)
df_outcome = df_outcome[sorted(df_outcome.columns.values)]
df_outcome.to_csv("outcome_tuning.csv", index=False)
sns.set_context(context)
np.set_printoptions(precision=3)
fig, axs = plt.subplots(1, len(d_plot), figsize=(8 * len(d_plot), 6))
for i, cat_label_data in enumerate(d_plot.items()):
cat, label_data = cat_label_data
if shadow:
for j, label_data in enumerate(label_data):
label, sum_label, data = label_data
axs[i].plot(
data,
color=colors.get(test_label, colors_default)[j],
linewidth=linewidth.get(test_label, [2] * len(label_data))[
j
],
marker=markers.get(test_label, markers_default)[j],
alpha=0.25,
label="",
)
cat, label_data = cat_label_data
for j, ld in enumerate(label_data):
label, sum_label, data = ld
mavg = movingaverage(data, window)
axs[i].plot(
mavg,
color=colors.get(test_label, colors_default)[j],
linewidth=linewidth.get(test_label, [1] * len(label_data))[j],
marker=markers.get(test_label, markers_default)[j],
fillstyle="none",
markevery=25,
# linestyle=':',
label=sum_label,
)
# And add a special annotation for the group we are interested in
# axs[i].text(ITERATIONS+0.2, mavg[-1], sum_label, horizontalalignment='left', size='small', color='k')
# axs[i].set_title(vst)
axs[i].set_xlabel(XLABEL)
axs[i].set_ylabel(cat)
axs[i].set_xlim(0, len(data))
axs[i].set_yticks(yticks[cat])
axs[i].set_yticklabels(yticks_labels[cat])
plt.legend(
loc=legend_pos.get(test_label, "lower right"),
frameon=False,
bbox_to_anchor=(1, 0, 0, 1), # (0.5, -0.15),
ncol=1,
# title="Max. #cars/location"
)
# plt.show()
print(f'Saving "{test_label}.{fig_format}"...')
plt.savefig(f"{test_label}.{fig_format}", bbox_inches="tight", dpi=dpi)
| 56.560847
| 208
| 0.570939
| 6,819
| 32,070
| 2.395952
| 0.048981
| 0.040029
| 0.033052
| 0.074917
| 0.751928
| 0.735892
| 0.715938
| 0.70639
| 0.682213
| 0.671502
| 0
| 0.275513
| 0.165544
| 32,070
| 566
| 209
| 56.660777
| 0.335002
| 0.285688
| 0
| 0.189802
| 0
| 0.189802
| 0.601667
| 0.452972
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002833
| false
| 0
| 0.031161
| 0
| 0.036827
| 0.016997
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0e2b2372c0dd9b2648c7d23ca563843a5241a55e
| 17,295
|
py
|
Python
|
oneflow/python/test/ops/test_optimizers.py
|
ashing-zhang/oneflow
|
70db228a4d361c916f8f8d85e908795b479e5d20
|
[
"Apache-2.0"
] | 1
|
2020-10-13T03:03:40.000Z
|
2020-10-13T03:03:40.000Z
|
oneflow/python/test/ops/test_optimizers.py
|
ashing-zhang/oneflow
|
70db228a4d361c916f8f8d85e908795b479e5d20
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/test/ops/test_optimizers.py
|
ashing-zhang/oneflow
|
70db228a4d361c916f8f8d85e908795b479e5d20
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
import test_global_storage
from test_util import GenArgList
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow_rmsprop(
device_type, x_shape, centered, decay_rate, learning_rate, train_iters
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testRmsprop(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.RMSProp(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
decay_rate=decay_rate,
epsilon=0,
centered=centered,
).minimize(loss)
return x
checkpoint = flow.train.CheckPoint()
checkpoint.init()
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testRmsprop(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
var = tf.Variable(init_value)
opt = tf.keras.optimizers.RMSprop(
learning_rate=learning_rate,
rho=decay_rate,
momentum=0.0,
epsilon=0,
centered=centered,
)
for i in range(train_iters):
with tf.GradientTape() as tape:
random_mask = tf.Variable(random_masks_seq[i])
loss = tf.reduce_mean(var * random_mask)
gradients = tape.gradient(loss, var)
opt.apply_gradients(zip([gradients], [var]))
assert np.allclose(x.flatten(), var.numpy().flatten(), rtol=1e-3, atol=1e-3,)
def compare_with_tensorflow_adam(
device_type, x_shape, beta1, beta2, epsilon, learning_rate, train_iters
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=flow.FunctionConfig())
def testAdam(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
do_bias_correction=True,
).minimize(loss)
return x
checkpoint = flow.train.CheckPoint()
checkpoint.init()
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testAdam(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
var = tf.Variable(init_value)
opt = tf.keras.optimizers.Adam(
learning_rate=learning_rate,
beta_1=beta1,
beta_2=beta2,
epsilon=epsilon,
amsgrad=False,
)
for i in range(train_iters):
with tf.GradientTape() as tape:
random_mask = tf.Variable(random_masks_seq[i])
loss = tf.reduce_mean(var * random_mask)
gradients = tape.gradient(loss, var)
opt.apply_gradients(zip([gradients], [var]))
assert np.allclose(x.flatten(), var.numpy().flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_adamw(
device_type,
x_shape,
beta1,
beta2,
epsilon,
weight_decay,
learning_rate,
train_iters,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=flow.FunctionConfig())
def testAdamW(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.AdamW(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
weight_decay=weight_decay,
do_bias_correction=True,
).minimize(loss)
return x
checkpoint = flow.train.CheckPoint()
checkpoint.init()
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testAdamW(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
def adamw_update_numpy(
param,
gradient,
iter,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
weight_decay=0.9,
):
lr_t = lr * np.sqrt(1 - beta2 ** (iter + 1)) / (1 - beta1 ** (iter + 1))
m_t = beta1 * m + (1 - beta1) * gradient
v_t = beta2 * v + (1 - beta2) * gradient * gradient
param_t = param - lr_t * (m_t / (np.sqrt(v_t) + epsilon) + weight_decay * param)
return param_t, m_t, v_t
param = init_value
gradient = np.full(param.shape, 1.0 / np.prod(param.shape))
m = np.zeros(param.shape)
v = np.zeros(param.shape)
for i in range(train_iters):
param, m, v = adamw_update_numpy(
param,
gradient * random_masks_seq[i],
i,
m,
v,
learning_rate,
beta1,
beta2,
epsilon,
weight_decay,
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_lazy_adam(
device_type, x_shape, beta1, beta2, epsilon, learning_rate, train_iters,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testLazyAdam(
mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * mask)
flow.optimizer.LazyAdam(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
).minimize(loss)
return x
checkpoint = flow.train.CheckPoint()
checkpoint.init()
mask = np.random.randint(2, size=x_shape)
mask_float = mask.astype(np.float32)
init_value = None
for i in range(train_iters + 1):
x = testLazyAdam(mask_float)
if i == 0:
init_value = np.copy(x)
def lazy_adam_update_numpy(
param,
mask,
gradient,
iter,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
):
lr_t = lr * np.sqrt(1 - beta2 ** (iter + 1)) / (1 - beta1 ** (iter + 1))
m_t = np.copy(m)
v_t = np.copy(v)
m_t_o = beta1 * m + (1 - beta1) * gradient
v_t_o = beta2 * v + (1 - beta2) * gradient * gradient
m_t[mask == 1] = m_t_o[mask == 1]
v_t[mask == 1] = v_t_o[mask == 1]
param_t = np.copy(param)
param_t_o = param - lr_t * m_t / (np.sqrt(v_t) + epsilon)
param_t[mask == 1] = param_t_o[mask == 1]
return param_t, m_t, v_t
param = init_value
gradient = np.full(param.shape, 1.0 / np.prod(param.shape))
m = np.zeros(param.shape)
v = np.zeros(param.shape)
for i in range(train_iters):
param, m, v = lazy_adam_update_numpy(
param, mask, gradient, i, m, v, learning_rate, beta1, beta2, epsilon
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_lars(
device_type,
x_shape,
momentum_beta,
epsilon,
lars_coefficient,
learning_rate,
train_iters,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testLars(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.LARS(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
momentum_beta=momentum_beta,
epsilon=epsilon,
lars_coefficient=lars_coefficient,
).minimize(loss)
return x
checkpoint = flow.train.CheckPoint()
checkpoint.init()
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testLars(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
def lars_update_numpy(
param,
gradient,
iter,
momentum,
learning_rate=0.001,
momentum_beta=0.9,
epsilon=1e-9,
lars_coefficient=0.0001,
):
import math
model_norm = math.sqrt(np.mean(param * param))
model_diff_norm = math.sqrt(np.mean(gradient * gradient))
local_learning_rate = (
learning_rate * lars_coefficient * model_norm / (epsilon + model_diff_norm)
)
momentum_t = momentum_beta * momentum - local_learning_rate * gradient
param_t = param + momentum_t
return param_t, momentum_t
param = init_value
gradient = np.full(param.shape, 1.0 / np.prod(param.shape))
momentum = np.zeros(param.shape)
for i in range(train_iters):
param, momentum = lars_update_numpy(
param,
gradient * random_masks_seq[i],
i,
momentum,
learning_rate,
momentum_beta,
epsilon,
lars_coefficient,
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_tensorflow_sgd(
device_type, x_shape, momentum, learning_rate, train_iters
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=flow.FunctionConfig())
def testSGD(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
momentum=momentum,
).minimize(loss)
return x
checkpoint = flow.train.CheckPoint()
checkpoint.init()
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testSGD(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
var = tf.Variable(init_value)
opt = tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=momentum, nesterov=False
)
for i in range(train_iters):
with tf.GradientTape() as tape:
random_mask = tf.Variable(random_masks_seq[i])
loss = tf.reduce_mean(var * random_mask)
gradients = tape.gradient(loss, var)
opt.apply_gradients(zip([gradients], [var]))
assert np.allclose(x.flatten(), var.numpy().flatten(), rtol=1e-4, atol=1e-4,)
def test_rmsprop(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["centered"] = [True, False]
arg_dict["decay_rate"] = [0.9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_rmsprop(*arg)
def test_adam(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_adam(*arg)
def test_lazy_adam(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_numpy_lazy_adam(*arg)
def test_adamw(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["weight_decay"] = [0.9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_numpy_adamw(*arg)
def test_lars(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["momentum_beta"] = [0.9]
arg_dict["epsilon"] = [1e-9]
arg_dict["lars_coefficient"] = [0.0001]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_numpy_lars(*arg)
def test_sgd(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["momentum"] = [0.9, 0.0]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_sgd(*arg)
| 30.828877
| 88
| 0.604568
| 2,198
| 17,295
| 4.540946
| 0.102821
| 0.036469
| 0.028053
| 0.018736
| 0.80012
| 0.770965
| 0.753532
| 0.737
| 0.730688
| 0.720369
| 0
| 0.02585
| 0.277537
| 17,295
| 560
| 89
| 30.883929
| 0.772949
| 0.043192
| 0
| 0.72949
| 0
| 0
| 0.031326
| 0
| 0
| 0
| 0
| 0
| 0.026608
| 1
| 0.046563
| false
| 0
| 0.017738
| 0
| 0.084257
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e6aeaeddf1a8a1737678af3911fa67658746f01
| 2,677
|
py
|
Python
|
src/python/test/affinities/test_affinities.py
|
constantinpape/affogato
|
22ea369313b01e10f5cfefa21b7db0df719f75b0
|
[
"MIT"
] | 6
|
2021-04-11T00:47:37.000Z
|
2021-10-03T23:41:06.000Z
|
src/python/test/affinities/test_affinities.py
|
constantinpape/affogato
|
22ea369313b01e10f5cfefa21b7db0df719f75b0
|
[
"MIT"
] | 8
|
2019-05-28T16:12:07.000Z
|
2022-01-10T18:21:03.000Z
|
src/python/test/affinities/test_affinities.py
|
constantinpape/affogato
|
22ea369313b01e10f5cfefa21b7db0df719f75b0
|
[
"MIT"
] | 1
|
2021-06-01T12:16:23.000Z
|
2021-06-01T12:16:23.000Z
|
import unittest
import numpy as np
class TestAffinities(unittest.TestCase):
def test_affs_2d(self):
from affogato.affinities import compute_affinities
shape = (100, 100)
labels = np.random.randint(0, 100, size=shape)
offsets = [[-1, 0], [0, -1],
[-5, 0], [0, -5],
[10, 10], [3, 9]]
affs, mask = compute_affinities(labels, offsets)
expected_shape = (len(offsets),) + labels.shape
self.assertEqual(affs.shape, expected_shape)
self.assertEqual(mask.shape, expected_shape)
self.assertNotEqual(np.sum(affs == 0), 0)
self.assertNotEqual(np.sum(mask == 0), 0)
def test_affs_ignore_2d(self):
from affogato.affinities import compute_affinities
shape = (100, 100)
labels = np.random.randint(0, 100, size=shape)
offsets = [[-1, 0], [0, -1],
[-5, 0], [0, -5],
[10, 10], [3, 9]]
affs, mask = compute_affinities(labels, offsets, True)
expected_shape = (len(offsets),) + labels.shape
self.assertEqual(affs.shape, expected_shape)
self.assertEqual(mask.shape, expected_shape)
self.assertNotEqual(np.sum(affs == 0), 0)
self.assertNotEqual(np.sum(mask == 0), 0)
def test_affs_3d(self):
from affogato.affinities import compute_affinities
shape = (100, 100, 100)
labels = np.random.randint(0, 100, size=shape)
offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1],
[-5, 0, 0], [0, -5, 0], [0, 0, -5],
[10, 10, 10], [3, 9, 27], [0, 9, 8]]
affs, mask = compute_affinities(labels, offsets)
expected_shape = (len(offsets),) + labels.shape
self.assertEqual(affs.shape, expected_shape)
self.assertEqual(mask.shape, expected_shape)
self.assertNotEqual(np.sum(affs == 0), 0)
self.assertNotEqual(np.sum(mask == 0), 0)
def test_affs_ignore_3d(self):
from affogato.affinities import compute_affinities
shape = (100, 100, 100)
labels = np.random.randint(0, 100, size=shape)
offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1],
[-5, 0, 0], [0, -5, 0], [0, 0, -5],
[10, 10, 10], [3, 9, 27], [0, 9, 8]]
affs, mask = compute_affinities(labels, offsets, True)
expected_shape = (len(offsets),) + labels.shape
self.assertEqual(affs.shape, expected_shape)
self.assertEqual(mask.shape, expected_shape)
self.assertNotEqual(np.sum(affs == 0), 0)
self.assertNotEqual(np.sum(mask == 0), 0)
if __name__ == '__main__':
unittest.main()
| 38.242857
| 62
| 0.564811
| 343
| 2,677
| 4.297376
| 0.12828
| 0.037992
| 0.108548
| 0.119403
| 0.934193
| 0.934193
| 0.934193
| 0.934193
| 0.934193
| 0.934193
| 0
| 0.081165
| 0.282032
| 2,677
| 69
| 63
| 38.797101
| 0.685744
| 0
| 0
| 0.842105
| 0
| 0
| 0.002988
| 0
| 0
| 0
| 0
| 0
| 0.280702
| 1
| 0.070175
| false
| 0
| 0.105263
| 0
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e7baef44655735893989175b58ea1c9261d9d02
| 11,681
|
py
|
Python
|
UnityEngine/Debug/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/Debug/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/Debug/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class Debug:
def __new__(cls, arg1=None):
'''
:returns: Debug
:rtype: UnityEngine.Debug
'''
pass
@staticmethod
def ctor():
'''
:returns: Debug
:rtype: UnityEngine.Debug
'''
pass
@staticmethod
@overload
def DrawLine(arg1, arg2, arg3, arg4):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
:param arg3: Color
:type arg3: UnityEngine.Color
:param arg4: Single
:type arg4: System.Single or float
'''
pass
@staticmethod
@overload
def DrawLine(arg1, arg2, arg3):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
:param arg3: Color
:type arg3: UnityEngine.Color
'''
pass
@staticmethod
@overload
def DrawLine(arg1, arg2):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def DrawLine(arg1, arg2, arg3, arg4, arg5):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
:param arg3: Color
:type arg3: UnityEngine.Color
:param arg4: Single
:type arg4: System.Single or float
:param arg5: Boolean
:type arg5: System.Boolean or bool
'''
pass
@staticmethod
def DrawLine(arg1=None, arg2=None, arg3=None, arg4=None, arg5=None):
pass
@staticmethod
@overload
def DrawRay(arg1, arg2, arg3, arg4):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
:param arg3: Color
:type arg3: UnityEngine.Color
:param arg4: Single
:type arg4: System.Single or float
'''
pass
@staticmethod
@overload
def DrawRay(arg1, arg2, arg3):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
:param arg3: Color
:type arg3: UnityEngine.Color
'''
pass
@staticmethod
@overload
def DrawRay(arg1, arg2):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def DrawRay(arg1, arg2, arg3, arg4, arg5):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
:param arg3: Color
:type arg3: UnityEngine.Color
:param arg4: Single
:type arg4: System.Single or float
:param arg5: Boolean
:type arg5: System.Boolean or bool
'''
pass
@staticmethod
def DrawRay(arg1=None, arg2=None, arg3=None, arg4=None, arg5=None):
pass
@staticmethod
def DebugBreak():
pass
@staticmethod
@overload
def Log(arg1):
'''
:param arg1: Object
:type arg1: System.Object
'''
pass
@staticmethod
@overload
def Log(arg1, arg2):
'''
:param arg1: Object
:type arg1: System.Object
:param arg2: Object
:type arg2: UnityEngine.Object
'''
pass
@staticmethod
def Log(arg1=None, arg2=None):
pass
@staticmethod
@overload
def LogFormat(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: ObjectArray
:type arg2: System.ObjectArray
'''
pass
@staticmethod
@overload
def LogFormat(arg1, arg2, arg3):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: String
:type arg2: System.String or str
:param arg3: ObjectArray
:type arg3: System.ObjectArray
'''
pass
@staticmethod
def LogFormat(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
@overload
def LogError(arg1):
'''
:param arg1: Object
:type arg1: System.Object
'''
pass
@staticmethod
@overload
def LogError(arg1, arg2):
'''
:param arg1: Object
:type arg1: System.Object
:param arg2: Object
:type arg2: UnityEngine.Object
'''
pass
@staticmethod
def LogError(arg1=None, arg2=None):
pass
@staticmethod
@overload
def LogErrorFormat(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: ObjectArray
:type arg2: System.ObjectArray
'''
pass
@staticmethod
@overload
def LogErrorFormat(arg1, arg2, arg3):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: String
:type arg2: System.String or str
:param arg3: ObjectArray
:type arg3: System.ObjectArray
'''
pass
@staticmethod
def LogErrorFormat(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
def get_developerConsoleVisible():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_developerConsoleVisible(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
@overload
def LogException(arg1):
'''
:param arg1: Exception
:type arg1: System.Exception
'''
pass
@staticmethod
@overload
def LogException(arg1, arg2):
'''
:param arg1: Exception
:type arg1: System.Exception
:param arg2: Object
:type arg2: UnityEngine.Object
'''
pass
@staticmethod
def LogException(arg1=None, arg2=None):
pass
@staticmethod
@overload
def LogWarning(arg1):
'''
:param arg1: Object
:type arg1: System.Object
'''
pass
@staticmethod
@overload
def LogWarning(arg1, arg2):
'''
:param arg1: Object
:type arg1: System.Object
:param arg2: Object
:type arg2: UnityEngine.Object
'''
pass
@staticmethod
def LogWarning(arg1=None, arg2=None):
pass
@staticmethod
@overload
def LogWarningFormat(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: ObjectArray
:type arg2: System.ObjectArray
'''
pass
@staticmethod
@overload
def LogWarningFormat(arg1, arg2, arg3):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: String
:type arg2: System.String or str
:param arg3: ObjectArray
:type arg3: System.ObjectArray
'''
pass
@staticmethod
def LogWarningFormat(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
@overload
def Assert(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
@overload
def Assert(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Object
:type arg2: UnityEngine.Object
'''
pass
@staticmethod
@overload
def Assert(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Object
:type arg2: System.Object
'''
pass
@staticmethod
@overload
def Assert(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: String
:type arg2: System.String or str
'''
pass
@staticmethod
@overload
def Assert(arg1, arg2, arg3):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Object
:type arg2: System.Object
:param arg3: Object
:type arg3: UnityEngine.Object
'''
pass
@staticmethod
@overload
def Assert(arg1, arg2, arg3):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: String
:type arg2: System.String or str
:param arg3: Object
:type arg3: UnityEngine.Object
'''
pass
@staticmethod
def Assert(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
@overload
def AssertFormat(arg1, arg2, arg3):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: String
:type arg2: System.String or str
:param arg3: ObjectArray
:type arg3: System.ObjectArray
'''
pass
@staticmethod
@overload
def AssertFormat(arg1, arg2, arg3, arg4):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Object
:type arg2: UnityEngine.Object
:param arg3: String
:type arg3: System.String or str
:param arg4: ObjectArray
:type arg4: System.ObjectArray
'''
pass
@staticmethod
def AssertFormat(arg1=None, arg2=None, arg3=None, arg4=None):
pass
@staticmethod
@overload
def LogAssertion(arg1):
'''
:param arg1: Object
:type arg1: System.Object
'''
pass
@staticmethod
@overload
def LogAssertion(arg1, arg2):
'''
:param arg1: Object
:type arg1: System.Object
:param arg2: Object
:type arg2: UnityEngine.Object
'''
pass
@staticmethod
def LogAssertion(arg1=None, arg2=None):
pass
@staticmethod
@overload
def LogAssertionFormat(arg1, arg2):
'''
:param arg1: String
:type arg1: System.String or str
:param arg2: ObjectArray
:type arg2: System.ObjectArray
'''
pass
@staticmethod
@overload
def LogAssertionFormat(arg1, arg2, arg3):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: String
:type arg2: System.String or str
:param arg3: ObjectArray
:type arg3: System.ObjectArray
'''
pass
@staticmethod
def LogAssertionFormat(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
| 21.915572
| 72
| 0.542419
| 1,127
| 11,681
| 5.616681
| 0.053239
| 0.139021
| 0.12891
| 0.145024
| 0.914218
| 0.904581
| 0.874724
| 0.828436
| 0.757346
| 0.719273
| 0
| 0.044585
| 0.364438
| 11,681
| 532
| 73
| 21.956767
| 0.808055
| 0.404332
| 0
| 0.728155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07767
| 1
| 0.271845
| false
| 0.271845
| 0.019417
| 0
| 0.296117
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
0e92690e4f4cd2da668840c536cb293dacb3ca94
| 132
|
py
|
Python
|
flaxseed/datasets/vision/__init__.py
|
fkodom/flaxseed
|
45244056fc8614a5d0fe9b7d9e3f2aa7d7875123
|
[
"MIT"
] | 1
|
2021-03-22T15:52:37.000Z
|
2021-03-22T15:52:37.000Z
|
flaxseed/datasets/vision/__init__.py
|
fkodom/flaxseed
|
45244056fc8614a5d0fe9b7d9e3f2aa7d7875123
|
[
"MIT"
] | null | null | null |
flaxseed/datasets/vision/__init__.py
|
fkodom/flaxseed
|
45244056fc8614a5d0fe9b7d9e3f2aa7d7875123
|
[
"MIT"
] | null | null | null |
from flaxseed.datasets.vision.cifar import CIFAR10, CIFAR100
from flaxseed.datasets.vision.mnist import MNIST, FashionMNIST, KMNIST
| 44
| 70
| 0.848485
| 17
| 132
| 6.588235
| 0.647059
| 0.214286
| 0.357143
| 0.464286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041322
| 0.083333
| 132
| 2
| 71
| 66
| 0.884298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
7ecb06e55296121c54db4965dd63cd329d841eee
| 1,329
|
py
|
Python
|
test_pydes.py
|
n0irx/python-DES
|
12a5d8fa8341a035762a91cb7612dfd5330976ae
|
[
"MIT"
] | null | null | null |
test_pydes.py
|
n0irx/python-DES
|
12a5d8fa8341a035762a91cb7612dfd5330976ae
|
[
"MIT"
] | null | null | null |
test_pydes.py
|
n0irx/python-DES
|
12a5d8fa8341a035762a91cb7612dfd5330976ae
|
[
"MIT"
] | null | null | null |
from pyDes import *
#############################################################################
# Examples #
#############################################################################
def example_16_rounds_DES():
from time import time
# example of DES encrypting in CBC mode with the IV of "\0\0\0\0\0\0\0\0"
print("Example 16 Rounds DES")
t = time()
k = des("DESCRYPT")
data = "DES encryption algorithm"
print("Key : %r" % k.getKey())
print("Data : %r" % data)
d = k.encrypt(data)
print("Encrypted: %r" % d)
d = k.decrypt(d)
print("Decrypted: %r" % d)
print("DES time taken: %f (6 crypt operations)" % (time() - t))
print("")
def example_3_rounds_DES():
from time import time
# example of DES encrypting in CBC mode with the IV of "\0\0\0\0\0\0\0\0"
print("Example 3 Rounds DES")
t = time()
k = des("DESCRYPT", number_of_rounds=3)
data = "DES encryption algorithm"
print("Key : %r" % k.getKey())
print("Data : %r" % data)
d = k.encrypt(data)
print("Encrypted: %r" % d)
d = k.decrypt(d)
print("Decrypted: %r" % d)
print("DES time taken: %f (6 crypt operations)" % (time() - t))
print("")
if __name__ == '__main__':
example_16_rounds_DES()
example_3_rounds_DES()
| 26.058824
| 77
| 0.504891
| 175
| 1,329
| 3.708571
| 0.251429
| 0.043143
| 0.05547
| 0.061633
| 0.832049
| 0.832049
| 0.832049
| 0.751926
| 0.751926
| 0.751926
| 0
| 0.027695
| 0.239278
| 1,329
| 51
| 78
| 26.058824
| 0.614243
| 0.120391
| 0
| 0.6875
| 0
| 0
| 0.292949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0
| 0.15625
| 0.4375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
bc28ce210b40395783b6be356ab229789d486dd5
| 12,622
|
py
|
Python
|
tests/api/test_machine.py
|
ggardet/mr-provisioner
|
8a57806e702d9953a3af304fe52dec8e5de25aff
|
[
"ECL-2.0",
"Apache-2.0"
] | 25
|
2018-04-12T15:56:47.000Z
|
2021-05-17T04:31:20.000Z
|
tests/api/test_machine.py
|
ggardet/mr-provisioner
|
8a57806e702d9953a3af304fe52dec8e5de25aff
|
[
"ECL-2.0",
"Apache-2.0"
] | 72
|
2017-06-21T10:59:05.000Z
|
2018-03-22T08:52:18.000Z
|
tests/api/test_machine.py
|
ggardet/mr-provisioner
|
8a57806e702d9953a3af304fe52dec8e5de25aff
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2017-06-16T08:17:03.000Z
|
2019-10-03T09:49:46.000Z
|
import json
from mr_provisioner.models import Machine
def test_empty_machine_list_no_machines(client, valid_headers_nonadmin):
r = client.get('/api/v1/machine', headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data == []
def test_machine_list_nonadmin_only(client, valid_headers_nonadmin, valid_plain_machine, valid_moonshot_machine):
r = client.get('/api/v1/machine', headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert len(data) == 0
def test_machine_list_query(client, valid_headers_nonadmin, machines_for_reservation):
q = """
(= bmc_type "moonshot")
"""
r = client.get('/api/v1/machine?show_all=true&q=%s' % q, headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert set([m['name'] for m in data]) == set(['machine1', 'machine4'])
def test_machine_list_query2(client, valid_headers_nonadmin, machines_for_reservation):
q = """
(or (= bmc_type "moonshot")
(= bmc_type "plain"))
"""
r = client.get('/api/v1/machine?show_all=true&q=%s' % q, headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert set([m['name'] for m in data]) == set(['machine0', 'machine1', 'machine4'])
def test_machine_list_nonadmin(client, valid_headers_nonadmin, valid_plain_machine, valid_moonshot_machine):
r = client.get('/api/v1/machine?show_all=true', headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert len(data) == 2
def test_machine_list_admin(client, valid_headers_nonadmin, valid_plain_machine, valid_moonshot_machine):
r = client.get('/api/v1/machine?show_all=true', headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert len(data) == 2
def test_get_existing_machine(client, valid_headers_nonadmin, valid_plain_machine):
r = client.get('/api/v1/machine/%d' % valid_plain_machine.id, headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert valid_plain_machine.name == data['name']
assert valid_plain_machine.id == data['id']
assert valid_plain_machine.hostname == data['hostname']
def test_get_non_existing_machine(client, valid_headers_nonadmin):
r = client.get('/api/v1/machine/123', headers=valid_headers_nonadmin)
assert r.status_code == 404
def test_list_machine_assignees_none(client, valid_headers_admin, valid_plain_machine):
r = client.get('/api/v1/machine/%d/assignee' % valid_plain_machine.id, headers=valid_headers_admin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert len(data) == 0
def test_list_machine_assignees_one(client, valid_headers_nonadmin, valid_assignment_nonadmin):
r = client.get('/api/v1/machine/%d/assignee' % valid_assignment_nonadmin.machine_id, headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert len(data) == 1
assert data[0]['user'] == valid_assignment_nonadmin.user.username
assert data[0]['reason'] == valid_assignment_nonadmin.reason
def test_assign_machine_user(client, valid_headers_admin, user_nonadmin, valid_plain_machine):
body = json.dumps({
'user': user_nonadmin.username,
'reason': 'API testing',
})
r = client.post('/api/v1/machine/%d/assignee' % valid_plain_machine.id, headers=valid_headers_admin, data=body)
assert r.status_code == 201
data = json.loads(r.data.decode('utf-8'))
assignees = valid_plain_machine.assignments
assert len(assignees) == 1
assert assignees[0].user.id == user_nonadmin.id
assert assignees[0].reason == 'API testing'
assert data['id'] == assignees[0].id
assert data['user'] == user_nonadmin.username
assert data['reason'] == 'API testing'
def test_remove_assignee(client, valid_headers_admin, valid_plain_machine, valid_assignment_nonadmin):
r = client.delete('/api/v1/machine/%d/assignee/%d' % (valid_assignment_nonadmin.machine_id,
valid_assignment_nonadmin.id),
headers=valid_headers_admin)
assert r.status_code == 204
assignees = valid_plain_machine.assignments
assert len(assignees) == 0
def test_change_assignee(client, valid_headers_admin, valid_plain_machine, valid_assignment_nonadmin):
body = json.dumps({
'reason': 'API testing',
})
r = client.put('/api/v1/machine/%d/assignee/%d' % (valid_assignment_nonadmin.machine_id,
valid_assignment_nonadmin.id),
headers=valid_headers_admin,
data=body)
assert r.status_code == 200
assignees = valid_plain_machine.assignments
data = json.loads(r.data.decode('utf-8'))
assert len(assignees) == 1
assert assignees[0].reason == 'API testing'
assert data['reason'] == 'API testing'
def test_remove_assignee_self(client, valid_headers_nonadmin, valid_plain_machine, valid_assignment_nonadmin):
r = client.delete('/api/v1/machine/%d/assignee/self' % valid_plain_machine.id,
headers=valid_headers_nonadmin)
assert r.status_code == 204
assignees = valid_plain_machine.assignments
assert len(assignees) == 0
def test_change_assignee_self(client, valid_headers_nonadmin, valid_plain_machine, valid_assignment_nonadmin):
body = json.dumps({
'reason': 'API testing',
})
r = client.put('/api/v1/machine/%d/assignee/self' % valid_plain_machine.id,
headers=valid_headers_nonadmin,
data=body)
assert r.status_code == 200
assignees = valid_plain_machine.assignments
data = json.loads(r.data.decode('utf-8'))
assert len(assignees) == 1
assert assignees[0].reason == 'API testing'
assert data['reason'] == 'API testing'
def test_set_machine_parameters(client, valid_headers_nonadmin,
valid_plain_machine, valid_image_initrd, valid_image_kernel,
valid_preseed, valid_subarch_bl):
data = json.dumps({
"kernel_id": valid_image_kernel.id,
"initrd_id": valid_image_initrd.id,
"preseed_id": valid_preseed.id,
"subarch": valid_subarch_bl.name,
"kernel_opts": "",
"netboot_enabled": True,
})
r = client.put('/api/v1/machine/%d' % valid_plain_machine.id,
headers=valid_headers_nonadmin,
data=data)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['initrd_id'] == valid_image_initrd.id
assert data['kernel_id'] == valid_image_kernel.id
assert data['subarch'] == valid_subarch_bl.name
assert data['netboot_enabled'] # is true
def test_set_netboot_without_bootloader(client, valid_headers_nonadmin,
valid_plain_machine, valid_image_initrd, valid_image_kernel,
valid_preseed, valid_subarch_no_bl):
data = json.dumps({
"kernel_id": valid_image_kernel.id,
"initrd_id": valid_image_initrd.id,
"preseed_id": valid_preseed.id,
"subarch": valid_subarch_no_bl.name,
"kernel_opts": "",
"netboot_enabled": True,
})
r = client.put('/api/v1/machine/%d' % valid_plain_machine.id,
headers=valid_headers_nonadmin,
data=data)
assert r.status_code == 400
def test_machine_interface_empty_list(client, valid_headers_nonadmin, valid_plain_machine):
r = client.get('/api/v1/machine/%d/interface' % valid_plain_machine.id, headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data == []
def test_machine_interface_list(client, valid_headers_nonadmin, valid_interface_1, valid_plain_machine):
r = client.get('/api/v1/machine/%d/interface' % valid_plain_machine.id, headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert len(data) == 1
def test_get_machine_interface(client, valid_headers_nonadmin, valid_interface_1, valid_plain_machine):
r = client.get('/api/v1/machine/%d/interface/%d' % (valid_plain_machine.id, valid_interface_1.id),
headers=valid_headers_nonadmin)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['id'] == valid_interface_1.id
assert data['mac'] == valid_interface_1.mac
assert data['network_name'] == valid_interface_1.network.name
def test_reserve_machine_any(client, valid_headers_nonadmin, machines_for_reservation):
body = json.dumps({
'query': None,
})
r = client.post('/api/v1/machine/reservation',
headers=valid_headers_nonadmin,
data=body)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['name'] in ['machine0', 'machine3', 'machine4']
def test_reserve_machine_bmc_type_eq(client, valid_headers_nonadmin, machines_for_reservation):
body = json.dumps({
'query': '(= bmc_type "moonshot")',
})
r = client.post('/api/v1/machine/reservation',
headers=valid_headers_nonadmin,
data=body)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['name'] in ['machine4']
m = Machine.query.get(data['id'])
assert len(m.assignees) == 1
def test_reserve_machine_bmc_type_and_ne(client, valid_headers_nonadmin, machines_for_reservation):
body = json.dumps({
'query': """
(and (!= bmc_type "plain")
(!= bmc_type "moonshot"))
""",
})
r = client.post('/api/v1/machine/reservation',
headers=valid_headers_nonadmin,
data=body)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['name'] in ['machine3']
m = Machine.query.get(data['id'])
assert len(m.assignees) == 1
def test_reserve_machine_name_like(client, valid_headers_nonadmin, machines_for_reservation):
body = json.dumps({
'query': """
(or (=~ name "ine2")
(=~ name "ine3"))
""",
})
r = client.post('/api/v1/machine/reservation',
headers=valid_headers_nonadmin,
data=body)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['name'] in ['machine3']
m = Machine.query.get(data['id'])
assert len(m.assignees) == 1
def test_reserve_machine_arch_like(client, valid_headers_nonadmin, machines_for_reservation):
body = json.dumps({
'query': """
(= arch "some-other-arch")
""",
})
r = client.post('/api/v1/machine/reservation',
headers=valid_headers_nonadmin,
data=body)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['name'] in ['machine4']
m = Machine.query.get(data['id'])
assert len(m.assignees) == 1
def test_reserve_machine_empty_result(client, valid_headers_nonadmin, machines_for_reservation):
body = json.dumps({
'query': """
(and (= bmc_type "plain")
(= bmc_type "moonshot"))
""",
})
r = client.post('/api/v1/machine/reservation',
headers=valid_headers_nonadmin,
data=body)
assert r.status_code == 404
def test_change_subarch(client, valid_headers_nonadmin, valid_plain_machine, valid_subarch_bl, valid_image_kernel, valid_image_initrd):
data = json.dumps({
"subarch": valid_subarch_bl.name,
"kernel_id": valid_image_kernel.id,
"initrd_id": valid_image_initrd.id,
"preseed_id": None,
"kernel_opts": "",
"netboot_enabled": True,
})
r = client.put('/api/v1/machine/%d' % valid_plain_machine.id,
headers=valid_headers_nonadmin,
data=data)
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['subarch'] == valid_subarch_bl.name
assert data['initrd_id'] == valid_image_initrd.id
assert data['kernel_id'] == valid_image_kernel.id
assert data['netboot_enabled'] # is true
| 32.28133
| 135
| 0.661939
| 1,643
| 12,622
| 4.809495
| 0.069994
| 0.082005
| 0.116426
| 0.058087
| 0.879398
| 0.853581
| 0.82612
| 0.817515
| 0.777651
| 0.759808
| 0
| 0.017658
| 0.210347
| 12,622
| 390
| 136
| 32.364103
| 0.775158
| 0.001188
| 0
| 0.719697
| 0
| 0
| 0.147652
| 0.046017
| 0
| 0
| 0
| 0
| 0.287879
| 1
| 0.102273
| false
| 0
| 0.007576
| 0
| 0.109848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc38af1184d6f396477e10296de18b2e321fcc46
| 13,388
|
py
|
Python
|
tests/test_interface.py
|
czerwe/zabbixmgm
|
4fd33d56d1800c062f4cac326ea2238cc11d6c05
|
[
"MIT"
] | null | null | null |
tests/test_interface.py
|
czerwe/zabbixmgm
|
4fd33d56d1800c062f4cac326ea2238cc11d6c05
|
[
"MIT"
] | null | null | null |
tests/test_interface.py
|
czerwe/zabbixmgm
|
4fd33d56d1800c062f4cac326ea2238cc11d6c05
|
[
"MIT"
] | null | null | null |
import unittest2
import zabbixmgm
from mock import Mock, call
# import responses_interface
from pprint import pprint
class interface_tests(unittest2.TestCase):
def setUp(self):
self.apimock = Mock()
def tearDown(self):
pass
def test_intf_get_auto_create_missing_hostid(self):
iface = zabbixmgm.zbxinterface(self.apimock)
with self.assertRaises(zabbixmgm.core.MissingField):
command, param = iface.get()
# self.assertEqual(command, 'hostinterface.create')
def test_intf_get_auto_create_dict(self):
iface = zabbixmgm.zbxinterface(self.apimock, mask={'hostid': 777})
command, param = iface.get()
self.assertEqual(command, 'hostinterface.create')
self.assertTrue(not 'interfaceid' in param)
self.assertTrue('hostid' in param)
self.assertTrue('main' in param)
self.assertTrue('useip' in param)
self.assertTrue('dns' in param)
self.assertTrue('ip' in param)
self.assertTrue('port' in param)
self.assertTrue('bulk' in param)
self.assertTrue('type' in param)
def test_intf_get_auto_update_dict(self):
iface = zabbixmgm.zbxinterface(self.apimock, mask={'hostid': 777, 'interfaceid': 888})
command, param = iface.get()
self.assertEqual(command, 'hostinterface.update')
self.assertTrue('hostid' in param)
self.assertTrue('main' in param)
self.assertTrue('useip' in param)
self.assertTrue('dns' in param)
self.assertTrue('ip' in param)
self.assertTrue('port' in param)
self.assertTrue('bulk' in param)
self.assertTrue('type' in param)
self.assertTrue('interfaceid' in param)
def test_intf_get_auto_hostcreate_dict(self):
iface = zabbixmgm.zbxinterface(self.apimock, mask={'hostid': 777, 'interfaceid': 888})
command, param = iface.get('hostcreate')
self.assertEqual(command, 'hostinterface.create')
self.assertTrue(not 'hostid' in param)
self.assertTrue(not 'interfaceid' in param)
self.assertTrue('main' in param)
self.assertTrue('useip' in param)
self.assertTrue('dns' in param)
self.assertTrue('ip' in param)
self.assertTrue('port' in param)
self.assertTrue('bulk' in param)
self.assertTrue('type' in param)
def test_intf_get_auto_create_defaults(self):
iface = zabbixmgm.zbxinterface(self.apimock, hostid=123)
command, param = iface.get()
self.assertEqual(command, 'hostinterface.create')
self.assertTrue(not 'interfaceid' in param)
self.assertTrue('hostid' in param)
self.assertEqual(param['hostid'], '123')
self.assertTrue('main' in param)
self.assertEqual(param['main'], 0)
self.assertTrue('useip' in param)
self.assertEqual(param['useip'], 1)
self.assertTrue('dns' in param)
self.assertEqual(param['dns'], '')
self.assertTrue('ip' in param)
self.assertEqual(param['ip'], '127.0.0.1')
self.assertTrue('port' in param)
self.assertEqual(param['port'], '10050')
self.assertTrue('bulk' in param)
self.assertEqual(param['bulk'], 1)
self.assertTrue('type' in param)
self.assertEqual(param['type'], zabbixmgm.zbxinterface.TYPE_AGENT)
def test_intf_get_auto_create_hostChanges(self):
iface = zabbixmgm.zbxinterface(self.apimock, hostid=123, host='testhost.local')
command, param = iface.get()
self.assertEqual(command, 'hostinterface.create')
self.assertTrue(not 'interfaceid' in param)
self.assertTrue('hostid' in param)
self.assertEqual(param['hostid'], '123')
self.assertTrue('main' in param)
self.assertEqual(param['main'], 0)
self.assertTrue('useip' in param)
self.assertEqual(param['useip'], 0)
self.assertTrue('dns' in param)
self.assertEqual(param['dns'], 'testhost.local')
self.assertTrue('ip' in param)
self.assertEqual(param['ip'], '')
self.assertTrue('port' in param)
self.assertEqual(param['port'], '10050')
self.assertTrue('bulk' in param)
self.assertEqual(param['bulk'], 1)
self.assertTrue('type' in param)
self.assertEqual(param['type'], zabbixmgm.zbxinterface.TYPE_AGENT)
def test_intf_get_auto_create_changevalues(self):
iface = zabbixmgm.zbxinterface(self.apimock, hostid=123, main=1, bulk=0, type=zabbixmgm.zbxinterface.TYPE_JMX)
command, param = iface.get()
self.assertEqual(command, 'hostinterface.create')
self.assertTrue(not 'interfaceid' in param)
self.assertTrue('hostid' in param)
self.assertEqual(param['hostid'], '123')
self.assertTrue('main' in param)
self.assertEqual(param['main'], 1)
self.assertTrue('useip' in param)
self.assertEqual(param['useip'], 1)
self.assertTrue('dns' in param)
self.assertEqual(param['dns'], '')
self.assertTrue('ip' in param)
self.assertEqual(param['ip'], '127.0.0.1')
self.assertTrue('port' in param)
self.assertEqual(param['port'], '10050')
self.assertTrue('bulk' in param)
self.assertEqual(param['bulk'], 0)
self.assertTrue('type' in param)
self.assertEqual(param['type'], zabbixmgm.zbxinterface.TYPE_JMX)
def test_intf_get_auto_update_dict_correctcheck(self):
iface = zabbixmgm.zbxinterface(self.apimock, mask={'hostid': 777, 'interfaceid': 888})
command, param = iface.get()
self.assertEqual(command, 'hostinterface.update')
self.assertTrue('hostid' in param)
self.assertEqual(param['hostid'], '777')
self.assertTrue('main' in param)
self.assertTrue('useip' in param)
self.assertTrue('dns' in param)
self.assertTrue('ip' in param)
self.assertTrue('port' in param)
self.assertTrue('bulk' in param)
self.assertTrue('type' in param)
self.assertTrue('interfaceid' in param)
self.assertEqual(param['interfaceid'], '888')
def test_intf_host_dns(self):
iface = zabbixmgm.zbxinterface(self.apimock)
self.assertEqual(iface.ip, '127.0.0.1')
self.assertEqual(iface.dns, '')
self.assertEqual(iface.useip, 1)
iface.host = 'test.host.local'
self.assertEqual(iface.ip, '')
self.assertEqual(iface.dns, 'test.host.local')
self.assertEqual(iface.useip, 0)
def test_intf_host_ip(self):
iface = zabbixmgm.zbxinterface(self.apimock)
self.assertEqual(iface.ip, '127.0.0.1')
self.assertEqual(iface.dns, '')
self.assertEqual(iface.useip, 1)
iface.host = '10.0.0.1'
self.assertEqual(iface.ip, '10.0.0.1')
self.assertEqual(iface.dns, '')
self.assertEqual(iface.useip, 1)
def test_intf_host_ip2dns2ip(self):
iface = zabbixmgm.zbxinterface(self.apimock)
self.assertEqual(iface.ip, '127.0.0.1')
self.assertEqual(iface.dns, '')
self.assertEqual(iface.useip, 1)
iface.host = '10.0.0.1'
self.assertEqual(iface.ip, '10.0.0.1')
self.assertEqual(iface.dns, '')
self.assertEqual(iface.useip, 1)
iface.host = 'test2.host.local'
self.assertEqual(iface.ip, '')
self.assertEqual(iface.dns, 'test2.host.local')
self.assertEqual(iface.useip, 0)
iface.host = '10.0.1.1'
self.assertEqual(iface.ip, '10.0.1.1')
self.assertEqual(iface.dns, '')
self.assertEqual(iface.useip, 1)
# DEPRICATED TESTS
# def test_intf_main(self):
# iface = zabbixmgm.zbxinterface(self.apimock)
# self.assertEqual(iface.main, 0)
# iface.main = 1
# self.assertEqual(iface.main, 1)
# iface.main = False
# self.assertEqual(iface.main, 0)
# iface.main = True
# self.assertEqual(iface.main, 1)
# iface.main = 'no'
# self.assertEqual(iface.main, 0)
# iface.main = 'yes'
# self.assertEqual(iface.main, 1)
# with self.assertRaises(zabbixmgm.core.InvalidFieldValue):
# iface.main = 'blub'
# def test_intf_port(self):
# iface = zabbixmgm.zbxinterface(self.apimock)
# self.assertEqual(iface.port, '10050')
# self.assertEqual(iface.port, iface.port)
# iface.port = 12345
# self.assertEqual(iface.port, '12345')
# self.assertEqual(iface.port, iface.port)
# self.assertEqual(type(iface.port), str)
# def test_intf_type(self):
# iface = zabbixmgm.zbxinterface(self.apimock)
# self.assertEqual(iface.type, 1)
# self.assertEqual(type(iface.type), int)
# iface.type = zabbixmgm.zbxinterface.TYPE_JMX
# self.assertEqual(iface.type, 4)
# self.assertEqual(type(iface.type), int)
# iface.type = '3'
# self.assertEqual(iface.type, 3)
# self.assertEqual(type(iface.type), int)
# with self.assertRaises(zabbixmgm.core.InvalidFieldValue):
# iface.type = '6'
# def test_intf_diff_simple(self):
# iface = zabbixmgm.zbxinterface(self.apimock)
# iface2 = zabbixmgm.zbxinterface(self.apimock)
# iface2.port = 1234
# command, param = iface2.get('hostcreate')
# left, right, total = iface.diff(param)
# self.assertEqual(left['port'], '10050')
# self.assertEqual(right['port'], '1234')
# def test_intf_diff_complex(self):
# iface1 = zabbixmgm.zbxinterface(self.apimock)
# iface2 = zabbixmgm.zbxinterface(self.apimock)
# iface1.port = 5432
# iface2.port = 1234
# iface2.bulk = 1
# iface2.main = 0
# command, param = iface2.get('hostcreate')
# left, right, total = iface1.diff(param)
# self.assertEqual(left['port'], '5432')
# self.assertEqual(right['port'], '1234')
# self.assertEqual(right['bulk'], 1)
# self.assertEqual(total['bulk'], 1)
# def test_intf_merge_simple2(self):
# iface = zabbixmgm.zbxinterface(self.apimock)
# iface2 = zabbixmgm.zbxinterface(self.apimock)
# iface2.port = 1234
# command, param = iface2.get('hostcreate')
# left, right, total = iface.diff(param)
# self.assertEqual(left['port'], '10050')
# self.assertEqual(right['port'], '1234')
# iface.merge(iface2.get('hostcreate')[1])
# left, right, total = iface.diff(iface2.get('hostcreate')[1])
# self.assertEqual(len(left.keys()), 0)
# self.assertEqual(len(right.keys()), 0)
# self.assertEqual(len(total.keys()), 0)
# ARTEFICIAL
# def test_intf_index_exist(self):
# self.apimock.do_request.side_effect = [responses_group.group_existing_one]
# grp = zabbixmgm.zbxgroup(self.apimock, 'myhosts')
# self.apimock.assert_has_calls([call.do_request('hostgroup.get', {'filter': {'name': 'myhosts'}, 'output': 'extend'})])
# self.assertEqual(int(grp.get_id()), 8, 'GroupID does not match')
# self.assertEqual(len(self.apimock.mock_calls), 1)
# def test_intf_create_nonexist(self):
# self.apimock.do_request.side_effect = [responses_group.empty, responses_group.group_created_success, responses_group.group_existing_one]
# grp = zabbixmgm.zbxgroup(self.apimock, 'myhosts')
# grp.create()
# self.apimock.assert_has_calls(
# [
# call.do_request('hostgroup.get', {'filter': {'name': 'myhosts'}, 'output': 'extend'}),
# call.do_request('hostgroup.create', {'name': 'myhosts'}),
# call.do_request('hostgroup.get', {'filter': {'name': 'myhosts'}, 'output': 'extend'})
# ]
# )
# self.assertEqual(len(self.apimock.mock_calls), 3)
# self.assertEqual(int(grp.get_id()), 8)
# def test_intf_create_exist(self):
# self.apimock.do_request.side_effect = [responses_group.group_existing_one, responses_group.group_created_success, responses_group.group_existing_one]
# grp = zabbixmgm.zbxgroup(self.apimock, 'myhosts')
# grp.create()
# self.apimock.assert_has_calls(
# [
# call.do_request('hostgroup.get', {'filter': {'name': 'myhosts'}, 'output': 'extend'})
# ]
# )
# self.assertEqual(len(self.apimock.mock_calls), 1)
# self.assertEqual(int(grp.get_id()), 8)
# def test_intf_delete_exist(self):
# self.apimock.do_request.side_effect = [responses_group.group_existing_one, responses_group.group_delete_success, responses_group.empty]#,, responses_group.group_existing_one]
# grp = zabbixmgm.zbxgroup(self.apimock, 'myhosts')
# self.assertEqual(int(grp.get_id()), 8)
# grp.delete()
# self.apimock.assert_has_calls(
# [
# call.do_request('hostgroup.get', {'filter': {'name': 'myhosts'}, 'output': 'extend'})
# ]
# )
# self.assertEqual(len(self.apimock.mock_calls), 3)
# self.assertEqual(int(grp.get_id()), 0)
| 36.78022
| 184
| 0.616298
| 1,529
| 13,388
| 5.301504
| 0.083061
| 0.175796
| 0.081421
| 0.088083
| 0.880212
| 0.84604
| 0.816186
| 0.752652
| 0.703183
| 0.644461
| 0
| 0.024583
| 0.243427
| 13,388
| 363
| 185
| 36.881543
| 0.775694
| 0.373469
| 0
| 0.751515
| 0
| 0
| 0.100846
| 0
| 0
| 0
| 0
| 0
| 0.733333
| 1
| 0.078788
| false
| 0.006061
| 0.024242
| 0
| 0.109091
| 0.006061
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
70e41a49bfb4b3a2d426ead745d3b9fa1af4b74a
| 109
|
py
|
Python
|
tests/test_make_dataset.py
|
vladkyr/ium-21z
|
fa81a35491d04760d5e0037318a930790b632a7a
|
[
"MIT"
] | null | null | null |
tests/test_make_dataset.py
|
vladkyr/ium-21z
|
fa81a35491d04760d5e0037318a930790b632a7a
|
[
"MIT"
] | null | null | null |
tests/test_make_dataset.py
|
vladkyr/ium-21z
|
fa81a35491d04760d5e0037318a930790b632a7a
|
[
"MIT"
] | null | null | null |
from project_name.data.make_dataset import dummy_sum
def test_dummy_sum():
assert dummy_sum(2, 3) == 5
| 18.166667
| 52
| 0.752294
| 19
| 109
| 4
| 0.789474
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032609
| 0.155963
| 109
| 5
| 53
| 21.8
| 0.793478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cb85f50b49dca0ba59361dd654f996c5f569ef14
| 2,828
|
py
|
Python
|
python/tests/test_encoder.py
|
jimenezjose/fusion-engine-client
|
2de4dbccfb6b9a0746b7f3ef5a170f1332f93bea
|
[
"MIT"
] | 8
|
2020-08-29T22:03:37.000Z
|
2022-01-31T00:54:56.000Z
|
python/tests/test_encoder.py
|
jimenezjose/fusion-engine-client
|
2de4dbccfb6b9a0746b7f3ef5a170f1332f93bea
|
[
"MIT"
] | 8
|
2020-09-06T05:32:18.000Z
|
2022-01-16T20:34:21.000Z
|
python/tests/test_encoder.py
|
jimenezjose/fusion-engine-client
|
2de4dbccfb6b9a0746b7f3ef5a170f1332f93bea
|
[
"MIT"
] | 8
|
2020-09-18T19:05:58.000Z
|
2021-12-29T20:55:36.000Z
|
import numpy as np
from fusion_engine_client.messages import PoseMessage, PoseAuxMessage
from fusion_engine_client.parsers import FusionEngineEncoder
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.getLogger('point_one').setLevel(logging.DEBUG)
P1_POSE_MESSAGE1 = b".1\x00\x00\xc0@\xdb\x1a\x02\x01\x10'\x00\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x08@\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f"
P1_POSE_MESSAGE2 = b".1\x00\x00q\x95\xfd\x8a\x02\x01\x10'\x01\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x08@\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f"
P1_POSE_AUX_MESSAGE3 = b".1\x00\x00\xac\xa4\x08\x94\x02\x00\x13'\x02\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\x00\x00\x00\x00\xf8\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f\x00\x00\xc0\x7f"
def test_pose_encode():
encoder = FusionEngineEncoder()
pose = PoseMessage()
pose.velocity_body_mps = np.array([1.0, 2.0, 3.0])
pose_aux = PoseAuxMessage()
data = encoder.encode_message(pose)
assert data == P1_POSE_MESSAGE1
data = encoder.encode_message(pose)
assert data == P1_POSE_MESSAGE2
data = encoder.encode_message(pose_aux)
assert data == P1_POSE_AUX_MESSAGE3
| 97.517241
| 753
| 0.74116
| 608
| 2,828
| 3.401316
| 0.118421
| 0.681818
| 0.709381
| 0.713733
| 0.733559
| 0.720019
| 0.720019
| 0.720019
| 0.720019
| 0.677466
| 0
| 0.297287
| 0.048444
| 2,828
| 28
| 754
| 101
| 0.4712
| 0
| 0
| 0.095238
| 0
| 0.142857
| 0.727723
| 0.706153
| 0
| 1
| 0
| 0
| 0.142857
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
cb8c55ceda90f2326f66376127b53b1ba3cef28d
| 180
|
py
|
Python
|
LorisBallsBasedModel/Models/__init__.py
|
LorisPilotto/LorisBallsBasedModel
|
8e63575f9b9df7dc65bdaea00c0983aaa1001600
|
[
"MIT"
] | null | null | null |
LorisBallsBasedModel/Models/__init__.py
|
LorisPilotto/LorisBallsBasedModel
|
8e63575f9b9df7dc65bdaea00c0983aaa1001600
|
[
"MIT"
] | null | null | null |
LorisBallsBasedModel/Models/__init__.py
|
LorisPilotto/LorisBallsBasedModel
|
8e63575f9b9df7dc65bdaea00c0983aaa1001600
|
[
"MIT"
] | null | null | null |
from LorisBallsBasedModel.Models.Models import SingleLayerPerceptron, MultiLayerPerceptron, LorisBallsBasedModel, LorisBallsBasedModelTransferLearning, StackedLorisBallsBasedModels
| 180
| 180
| 0.927778
| 10
| 180
| 16.7
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038889
| 180
| 1
| 180
| 180
| 0.965318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
38106a3315af72ca82a5a9ba375a61e383df27e3
| 74,019
|
py
|
Python
|
polymorphic/tests/migrations/0001_initial.py
|
Vayel/django-polymorphic
|
b4efb59cd5d6b1ce3e10fdb5c495fbe239d91ad7
|
[
"BSD-3-Clause"
] | 1
|
2021-03-12T17:42:37.000Z
|
2021-03-12T17:42:37.000Z
|
polymorphic/tests/migrations/0001_initial.py
|
Vayel/django-polymorphic
|
b4efb59cd5d6b1ce3e10fdb5c495fbe239d91ad7
|
[
"BSD-3-Clause"
] | null | null | null |
polymorphic/tests/migrations/0001_initial.py
|
Vayel/django-polymorphic
|
b4efb59cd5d6b1ce3e10fdb5c495fbe239d91ad7
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T12:56:38.000Z
|
2021-01-19T12:56:38.000Z
|
import uuid
import django.db.models.deletion
import django.db.models.manager
from django.db import migrations, models
import polymorphic.showfields
class Migration(migrations.Migration):
initial = True
dependencies = [("contenttypes", "0002_remove_content_type_name")]
operations = [
migrations.CreateModel(
name="SwappableModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.swappablemodel_set+",
to="contenttypes.ContentType",
),
),
],
options={"swappable": "POLYMORPHIC_TEST_SWAPPABLE"},
),
migrations.CreateModel(
name="Base",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field_b", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldType, models.Model),
),
migrations.CreateModel(
name="BlogBase",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="BlogEntry",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(max_length=10)),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.blogentry_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="BlogEntry_limit_choices_to",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="ChildModelWithManager",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="CustomPkBase",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("b", models.CharField(max_length=1)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="DateModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date", models.DateTimeField()),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.datemodel_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="Enhance_Base",
fields=[
("base_id", models.AutoField(primary_key=True, serialize=False)),
("field_b", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="Enhance_Plain",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field_p", models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name="InitTestModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("bar", models.CharField(max_length=100)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldType, models.Model),
),
migrations.CreateModel(
name="MgrInheritA",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
managers=[("mgrA", django.db.models.manager.Manager())],
),
migrations.CreateModel(
name="Model2A",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldType, models.Model),
),
migrations.CreateModel(
name="ModelExtraA",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="ModelExtraExternal",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("topic", models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name="ModelFieldNameTest",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("modelfieldnametest", models.CharField(max_length=10)),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.modelfieldnametest_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldType, models.Model),
),
migrations.CreateModel(
name="ModelShow1",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
(
"m2m",
models.ManyToManyField(
related_name="_modelshow1_m2m_+", to="tests.ModelShow1"
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.modelshow1_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldType, models.Model),
),
migrations.CreateModel(
name="ModelShow1_plain",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="ModelShow2",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
(
"m2m",
models.ManyToManyField(
related_name="_modelshow2_m2m_+", to="tests.ModelShow2"
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.modelshow2_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldContent, models.Model),
),
migrations.CreateModel(
name="ModelShow3",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
(
"m2m",
models.ManyToManyField(
related_name="_modelshow3_m2m_+", to="tests.ModelShow3"
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.modelshow3_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="ModelUnderRelChild",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("_private2", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="ModelUnderRelParent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
("_private", models.CharField(max_length=10)),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.modelunderrelparent_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="MROBase1",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldType, models.Model),
),
migrations.CreateModel(
name="MROBase3",
fields=[("base_3_id", models.AutoField(primary_key=True, serialize=False))],
),
migrations.CreateModel(
name="One2OneRelatingModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="ParentModelWithManager",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.parentmodelwithmanager_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="PlainA",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name="PlainChildModelWithManager",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
),
migrations.CreateModel(
name="PlainParentModelWithManager",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
),
migrations.CreateModel(
name="ProxiedBase",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=10)),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.proxiedbase_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="ProxyBase",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("some_data", models.CharField(max_length=128)),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="RelatedNameClash",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.relatednameclash_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldType, models.Model),
),
migrations.CreateModel(
name="RelatingModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
),
migrations.CreateModel(
name="RelationBase",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field_base", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="SwappedModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.swappedmodel_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Top",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=50)),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="UUIDPlainA",
fields=[
(
"uuid_primary_key",
models.UUIDField(
default=uuid.uuid1, primary_key=True, serialize=False
),
),
("field1", models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name="UUIDProject",
fields=[
(
"uuid_primary_key",
models.UUIDField(
default=uuid.uuid1, primary_key=True, serialize=False
),
),
("topic", models.CharField(max_length=30)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, models.Model),
),
migrations.CreateModel(
name="BlogA",
fields=[
(
"blogbase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.BlogBase",
),
),
("info", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.blogbase",),
),
migrations.CreateModel(
name="BlogB",
fields=[
(
"blogbase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.BlogBase",
),
)
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.blogbase",),
),
migrations.CreateModel(
name="CustomPkInherit",
fields=[
(
"custompkbase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
to="tests.CustomPkBase",
),
),
("custom_id", models.AutoField(primary_key=True, serialize=False)),
("i", models.CharField(max_length=1)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.custompkbase",),
),
migrations.CreateModel(
name="Enhance_Inherit",
fields=[
(
"enhance_plain_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
to="tests.Enhance_Plain",
),
),
(
"enhance_base_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Enhance_Base",
),
),
("field_i", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.enhance_base", "tests.enhance_plain"),
),
migrations.CreateModel(
name="InitTestModelSubclass",
fields=[
(
"inittestmodel_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.InitTestModel",
),
)
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.inittestmodel",),
),
migrations.CreateModel(
name="MgrInheritB",
fields=[
(
"mgrinherita_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.MgrInheritA",
),
),
("field2", models.CharField(max_length=10)),
],
bases=("tests.mgrinherita",),
managers=[("mgrB", django.db.models.manager.Manager())],
),
migrations.CreateModel(
name="Middle",
fields=[
(
"top_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Top",
),
),
("description", models.TextField()),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.top",),
),
migrations.CreateModel(
name="Model2B",
fields=[
(
"model2a_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Model2A",
),
),
("field2", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.model2a",),
),
migrations.CreateModel(
name="ModelExtraB",
fields=[
(
"modelextraa_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.ModelExtraA",
),
),
("field2", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.modelextraa",),
),
migrations.CreateModel(
name="ModelShow2_plain",
fields=[
(
"modelshow1_plain_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.ModelShow1_plain",
),
),
("field2", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.modelshow1_plain",),
),
migrations.CreateModel(
name="ModelWithMyManager",
fields=[
(
"model2a_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Model2A",
),
),
("field4", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, "tests.model2a"),
),
migrations.CreateModel(
name="ModelWithMyManager2",
fields=[
(
"model2a_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Model2A",
),
),
("field4", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, "tests.model2a"),
),
migrations.CreateModel(
name="ModelWithMyManagerDefault",
fields=[
(
"model2a_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Model2A",
),
),
("field4", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, "tests.model2a"),
managers=[
("my_objects", django.db.models.manager.Manager()),
("objects", django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name="ModelWithMyManagerNoDefault",
fields=[
(
"model2a_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Model2A",
),
),
("field4", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=(polymorphic.showfields.ShowFieldTypeAndContent, "tests.model2a"),
),
migrations.CreateModel(
name="ModelX",
fields=[
(
"base_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Base",
),
),
("field_x", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.base",),
),
migrations.CreateModel(
name="ModelY",
fields=[
(
"base_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Base",
),
),
("field_y", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.base",),
),
migrations.CreateModel(
name="MROBase2",
fields=[
(
"mrobase1_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.MROBase1",
),
)
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.mrobase1",),
managers=[
("objects", django.db.models.manager.Manager()),
("base_objects", django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name="NonProxyChild",
fields=[
(
"proxybase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.ProxyBase",
),
),
("name", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.proxybase",),
),
migrations.CreateModel(
name="One2OneRelatingModelDerived",
fields=[
(
"one2onerelatingmodel_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.One2OneRelatingModel",
),
),
("field2", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.one2onerelatingmodel",),
),
migrations.CreateModel(
name="PlainB",
fields=[
(
"plaina_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.PlainA",
),
),
("field2", models.CharField(max_length=10)),
],
bases=("tests.plaina",),
),
migrations.CreateModel(
name="RelationA",
fields=[
(
"relationbase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.RelationBase",
),
),
("field_a", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.relationbase",),
),
migrations.CreateModel(
name="RelationB",
fields=[
(
"relationbase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.RelationBase",
),
),
("field_b", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.relationbase",),
),
migrations.CreateModel(
name="TestParentLinkAndRelatedName",
fields=[
(
"superclass",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
related_name="related_name_subclass",
serialize=False,
to="tests.ModelShow1_plain",
),
)
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.modelshow1_plain",),
),
migrations.CreateModel(
name="UUIDArtProject",
fields=[
(
"uuidproject_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.UUIDProject",
),
),
("artist", models.CharField(max_length=30)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.uuidproject",),
),
migrations.CreateModel(
name="UUIDPlainB",
fields=[
(
"uuidplaina_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.UUIDPlainA",
),
),
("field2", models.CharField(max_length=10)),
],
bases=("tests.uuidplaina",),
),
migrations.CreateModel(
name="UUIDResearchProject",
fields=[
(
"uuidproject_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.UUIDProject",
),
),
("supervisor", models.CharField(max_length=30)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.uuidproject",),
),
migrations.AddField(
model_name="uuidproject",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.uuidproject_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="top",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.top_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="relationbase",
name="fk",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="relationbase_set",
to="tests.RelationBase",
),
),
migrations.AddField(
model_name="relationbase",
name="m2m",
field=models.ManyToManyField(
related_name="_relationbase_m2m_+", to="tests.RelationBase"
),
),
migrations.AddField(
model_name="relationbase",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.relationbase_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="relatingmodel",
name="many2many",
field=models.ManyToManyField(to="tests.Model2A"),
),
migrations.AddField(
model_name="proxybase",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.proxybase_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="plainchildmodelwithmanager",
name="fk",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="childmodel_set",
to="tests.PlainParentModelWithManager",
),
),
migrations.AddField(
model_name="one2onerelatingmodel",
name="one2one",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to="tests.Model2A"
),
),
migrations.AddField(
model_name="one2onerelatingmodel",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.one2onerelatingmodel_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="mrobase1",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.mrobase1_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="modelunderrelchild",
name="parent",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="tests.ModelUnderRelParent",
),
),
migrations.AddField(
model_name="modelunderrelchild",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.modelunderrelchild_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="modelshow1_plain",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.modelshow1_plain_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="modelextraa",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.modelextraa_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="model2a",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.model2a_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="inittestmodel",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.inittestmodel_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="enhance_base",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.enhance_base_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="custompkbase",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.custompkbase_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="childmodelwithmanager",
name="fk",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="childmodel_set",
to="tests.ParentModelWithManager",
),
),
migrations.AddField(
model_name="childmodelwithmanager",
name="field1",
field=models.CharField(
max_length=10,
),
),
migrations.AddField(
model_name="childmodelwithmanager",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.childmodelwithmanager_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="blogentry_limit_choices_to",
name="blog",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="tests.BlogBase"
),
),
migrations.AddField(
model_name="blogentry_limit_choices_to",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.blogentry_limit_choices_to_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="blogbase",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.blogbase_set+",
to="contenttypes.ContentType",
),
),
migrations.AddField(
model_name="base",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.base_set+",
to="contenttypes.ContentType",
),
),
migrations.CreateModel(
name="ProxyChild",
fields=[],
options={"proxy": True},
bases=("tests.proxybase",),
),
migrations.CreateModel(
name="ProxyModelBase",
fields=[],
options={"proxy": True},
bases=("tests.proxiedbase",),
),
migrations.CreateModel(
name="Bottom",
fields=[
(
"middle_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Middle",
),
),
("author", models.CharField(max_length=50)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.middle",),
),
migrations.CreateModel(
name="MgrInheritC",
fields=[
(
"mgrinheritb_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.MgrInheritB",
),
)
],
bases=(polymorphic.showfields.ShowFieldTypeAndContent, "tests.mgrinheritb"),
),
migrations.CreateModel(
name="Model2C",
fields=[
(
"model2b_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Model2B",
),
),
("field3", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.model2b",),
),
migrations.CreateModel(
name="ModelExtraC",
fields=[
(
"modelextrab_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.ModelExtraB",
),
),
("field3", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.modelextrab",),
),
migrations.CreateModel(
name="MRODerived",
fields=[
(
"mrobase3_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
to="tests.MROBase3",
),
),
(
"mrobase2_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.MROBase2",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.mrobase2", "tests.mrobase3"),
managers=[
("objects", django.db.models.manager.Manager()),
("base_objects", django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name="PlainC",
fields=[
(
"plainb_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.PlainB",
),
),
("field3", models.CharField(max_length=10)),
],
bases=("tests.plainb",),
),
migrations.CreateModel(
name="ProxyModelA",
fields=[
(
"proxiedbase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.ProxiedBase",
),
),
("field1", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.proxymodelbase",),
),
migrations.CreateModel(
name="ProxyModelB",
fields=[
(
"proxiedbase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.ProxiedBase",
),
),
("field2", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.proxymodelbase",),
),
migrations.CreateModel(
name="RelationBC",
fields=[
(
"relationb_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.RelationB",
),
),
("field_c", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.relationb",),
),
migrations.CreateModel(
name="UUIDPlainC",
fields=[
(
"uuidplainb_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.UUIDPlainB",
),
),
("field3", models.CharField(max_length=10)),
],
bases=("tests.uuidplainb",),
),
migrations.AddField(
model_name="blogentry",
name="blog",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="tests.BlogA"
),
),
migrations.CreateModel(
name="Model2D",
fields=[
(
"model2c_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.Model2C",
),
),
("field4", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.model2c",),
),
migrations.CreateModel(
name="InlineModelBase",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="InlineParent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name="InlineModelA",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="InlineModelB",
fields=[
(
"inlinemodela_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.InlineModelA",
),
),
("field2", models.CharField(max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.inlinemodela",),
),
migrations.AddField(
model_name="inlinemodela",
name="parent",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="tests.InlineParent",
related_name="inline_children",
),
),
migrations.AddField(
model_name="inlinemodela",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.inlinemodela_set+",
to="contenttypes.ContentType",
),
),
migrations.CreateModel(
name="ArtProject",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("topic", models.CharField(max_length=30)),
("artist", models.CharField(max_length=30)),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.artproject_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Duck",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=30)),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.duck_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="MultiTableBase",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field1", models.CharField(max_length=10)),
],
options={"abstract": False},
),
migrations.CreateModel(
name="MultiTableDerived",
fields=[
(
"multitablebase_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.MultiTableBase",
),
),
("field2", models.CharField(max_length=10)),
],
options={"abstract": False},
bases=("tests.multitablebase",),
),
migrations.AddField(
model_name="multitablebase",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.multitablebase_set+",
to="contenttypes.ContentType",
),
),
migrations.CreateModel(
name="RedheadDuck",
fields=[],
options={"proxy": True},
bases=("tests.duck",),
),
migrations.CreateModel(
name="RubberDuck", fields=[], options={"proxy": True}, bases=("tests.duck",)
),
migrations.CreateModel(
name="SubclassSelectorAbstractBaseModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("base_field", models.CharField(default="test_bf", max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="SubclassSelectorProxyBaseModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("base_field", models.CharField(default="test_bf", max_length=10)),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.subclassselectorproxybasemodel_set+",
to="contenttypes.ContentType",
),
),
],
options={"abstract": False, "base_manager_name": "objects"},
),
migrations.CreateModel(
name="SubclassSelectorAbstractConcreteModel",
fields=[
(
"subclassselectorabstractbasemodel_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.SubclassSelectorAbstractBaseModel",
),
),
("abstract_field", models.CharField(default="test_af", max_length=10)),
("concrete_field", models.CharField(default="test_cf", max_length=10)),
],
options={"abstract": False},
bases=("tests.subclassselectorabstractbasemodel",),
),
migrations.AddField(
model_name="subclassselectorabstractbasemodel",
name="polymorphic_ctype",
field=models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_tests.subclassselectorabstractbasemodel_set+",
to="contenttypes.ContentType",
),
),
migrations.CreateModel(
name="SubclassSelectorProxyModel",
fields=[],
options={"proxy": True, "indexes": []},
bases=("tests.subclassselectorproxybasemodel",),
),
migrations.CreateModel(
name="SubclassSelectorProxyConcreteModel",
fields=[
(
"subclassselectorproxybasemodel_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="tests.SubclassSelectorProxyBaseModel",
),
),
("concrete_field", models.CharField(default="test_cf", max_length=10)),
],
options={"abstract": False, "base_manager_name": "objects"},
bases=("tests.subclassselectorproxymodel",),
),
]
| 36.301618
| 93
| 0.417285
| 4,758
| 74,019
| 6.311475
| 0.045187
| 0.026107
| 0.045221
| 0.064469
| 0.849784
| 0.840992
| 0.819514
| 0.809957
| 0.761505
| 0.737562
| 0
| 0.007141
| 0.485389
| 74,019
| 2,038
| 94
| 36.319431
| 0.781234
| 0
| 0
| 0.798621
| 0
| 0
| 0.136114
| 0.044664
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.002462
| 0
| 0.004431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e13c483c38f383041f5b9269f7e49fca6e31e32
| 13,797
|
py
|
Python
|
app.py
|
jorgeMorfinezM/pyBase64Encoder
|
f8d69eafe3971ddbe29f3174267a07e73dcee476
|
[
"Apache-2.0"
] | 1
|
2019-09-11T16:47:32.000Z
|
2019-09-11T16:47:32.000Z
|
app.py
|
jorgeMorfinezM/pyBase64Encoder
|
f8d69eafe3971ddbe29f3174267a07e73dcee476
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
jorgeMorfinezM/pyBase64Encoder
|
f8d69eafe3971ddbe29f3174267a07e73dcee476
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Requires Python 3.0 or later
"""
__author__ = "Jorge Morfinez Mojica (jorge.morfinez.m@gmail.com)"
__copyright__ = "Copyright 2019, Jorge Morfinez Mojica"
__license__ = "Apache 2.0"
__history__ = """ """
__version__ = "1.19.I11.Prod ($Rev: 3 $)"
import base64
# Metodo para leer un XML desde un archivo almacenado en cualquier directorio:
def read_xml_from_file_to_encode():
pass
# se setea el nombre del archivo con su directorio en una variable,
# en este caso el archivo esta dentro del proyecto
_file_name = 'xml_solo.xml'
_file = open(_file_name, 'rb')
_data = _file.read()
_file.close()
byte_array = xml_docto_base64_encoded(_data.decode())
return byte_array
# Metodo principal para encodear a base64
def xml_docto_base64_encoded(self):
pass
xml_string = self
xml_string = xml_string.replace("\n", "").replace("b'", "").replace("'", "")
xml_to_bytes = bytes(xml_string, 'utf-8')
xml_decoded = xml_to_bytes.decode('utf-8')
xml_decoded = xml_decoded.replace("b'", "")
xml_decoded = xml_decoded.replace("'", "")
xml_base64_encodeaded = base64.b64encode(bytes(xml_decoded, 'utf-8'))
xml_base64_encodeaded = xml_base64_encodeaded.decode('utf-8')
return xml_base64_encodeaded
def main():
pass
"""Fase 1:
Leer un XML en cadena de caracteres
------------------------------------
Fase 2:
Leer un XML desde un archivo almacenado en cualquier directorio
"""
'''
Llama al metodo xml_docto_base64_encoded() para pasarle por parametro una cadena de caracteres,
en este caso, la cadena de caracteres a encodear a Base64 sera un XML de CFDI v.3.3 timbrado por el SAT
'''
# Prueba con XML version 3.2 del SAT ya timbrado:
_xml_to_encode_str = """
<?xml version="1.0" encoding="utf-8"?><cfdi:Comprobante xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:detallista="http://www.sat.gob.mx/detallista" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv32.xsd http://www.sat.gob.mx/detallista http://www.sat.gob.mx/sitio_internet/cfd/detallista/detallista.xsd"
version="3.2" folio="60" fecha="2017-03-31T10:24:54" sello="FTOUrHPRuM+tKCKKBoPf6JfyKfXWMeaVhEcAaGQ/LynhoGmQhPxlz5BuFQOeJUbtnJMJvY55oazfXzSmmD7p/kLj98FVhNmwjZIq1KdUNgRUZEks/haXV/gKdn1ysJ1IXU3SVYk42BSmuREkOf17uaw1sChsUVet41krwNiCI7h45s38ZD7k0dx9kYI7alFla+ifFLW/kONTg1oodhEc6cwHFUwAT4AA2OSKdFr3qIEy63SBgf4iazgbzTNASIfgzYsOMlR5c/lqTTQ1K8GBC9F/I0gnTk0RxneljAkM5roHjkW7W1zfC63OZJPK+WZNpaETVqq+ofvtGG1d75wASA==" noCertificado="00001000000405087348" certificado="MIIGETCCA/mgAwIBAgIUMDAwMDEwMDAwMDA0MDUwODczNDgwDQYJKoZIhvcNAQELBQAwggGyMTgwNgYDVQQDDC9BLkMuIGRlbCBTZXJ2aWNpbyBkZSBBZG1pbmlzdHJhY2nDs24gVHJpYnV0YXJpYTEvMC0GA1UECgwmU2VydmljaW8gZGUgQWRtaW5pc3RyYWNpw7NuIFRyaWJ1dGFyaWExODA2BgNVBAsML0FkbWluaXN0cmFjacOzbiBkZSBTZWd1cmlkYWQgZGUgbGEgSW5mb3JtYWNpw7NuMR8wHQYJKoZIhvcNAQkBFhBhY29kc0BzYXQuZ29iLm14MSYwJAYDVQQJDB1Bdi4gSGlkYWxnbyA3NywgQ29sLiBHdWVycmVybzEOMAwGA1UEEQwFMDYzMDAxCzAJBgNVBAYTAk1YMRkwFwYDVQQIDBBEaXN0cml0byBGZWRlcmFsMRQwEgYDVQQHDAtDdWF1aHTDqW1vYzEVMBMGA1UELRMMU0FUOTcwNzAxTk4zMV0wWwYJKoZIhvcNAQkCDE5SZXNwb25zYWJsZTogQWRtaW5pc3RyYWNpw7NuIENlbnRyYWwgZGUgU2VydmljaW9zIFRyaWJ1dGFyaW9zIGFsIENvbnRyaWJ1eWVudGUwHhcNMTcwMjA4MTQ1NTM0WhcNMjEwMjA4MTQ1NTM0WjCBsTEWMBQGA1UEAxMNT0ZJWCBTQSBERSBDVjEWMBQGA1UEKRMNT0ZJWCBTQSBERSBDVjEWMBQGA1UEChMNT0ZJWCBTQSBERSBDVjElMCMGA1UELRMcT0ZJOTIwMTEzS1o4IC8gQ0VCQTY2MDIyMEZWMzEeMBwGA1UEBRMVIC8gQ0VCQTY2MDIyMEhWWkhMTjAyMSAwHgYDVQQLExdGQUNUVVJBQ0lPTiBFTEVDVFJPTklDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIDaXBSmbT8sYCoDCylmlzMMlJHpEVgchrNUFKYIxg03uBoRfhw0LC25nDB+T+qx9W9y6KkNxpWzZnN+xRklFwsiCCyh+9KEm6p+KFjtHMLBFuukqts24BRzExe+d/7BiXtNP5klwZCfWsxuSVXNYaVpXlhWWzexUBtBnjnrCuYVE8LiFJi3k4uh6rsvBlYBdduaiAtZxDaJeNyv8o2Ccas+h+Ga5AFfIQlLWNqhS4ZdP02OUF/fCN9kJQ8+20YFrLnnfUGkMeEfSORK3ClT7CQw2h28g6binK2ka0/mZEV4cF6lkU4MjCuesnymYryjXBmV7o5b9/R2ROwp5PFf4sMCAwEAAaMdMBswDAYDVR0TAQH/BAIwADALBgNVHQ8EBAMCBsAwDQYJKoZIhvcNAQELBQADggIBABsYQ80N5m6tZ1zIQHP19/8vTf622fKccxFLMf4HO5aaY6mTSTX4rvdoH3hnHs4Ah8GUtIro3ld2ntiZiuL1u0I0fefu9NZ/64hhXHv178mlK9GHdFg5bGvXnIy5J9hCwb0dOcN5CfzbA1pMqNa/AM/t8BwZ3vnXPC6drJFYLWdthbtvLqGl/k2IZ2DmKj7DQJb/ZXlacbKSCjRn5BTziLtDk40QODKjPpZM2eFnYjq4Qh3WOHhU2Mt6AtLboNWHVrzmJjDbOnUJ/fEOQ6WJt82JG0jDxBG9JYIchrmD3oMFHn4PFU8dM9ZCbC1f1MEDP5KaPDI9uTPVr5AGJzUikqNCsGQdBHW0AeVEhGmuWMkHb1jYMwtoa/injcEcS8R35Yjj2zUlRWEk1J/Z7XjYHRa/337NzLkNxxzMmPTuBwG11+kgv9Yn1SKeMB+DgY91PBWX9RpdUhiyviqwtvbRS5b1MFKMbQFgDNseB12mUDU0RWpZrmSGkmNJ/JhnbPKfRaWUlCPxOkZ6AzdBcVY/Ai52KUE/3LFqUzJ35+QA0Hiv2WxyfpKQ9SFxC/i69sjJSpQ34h9otjIc2IfsgxXonHArB7Q7rouoVVWZnozR+BBVzYaJnYORLAxbJis6WVL4B8bcNHJdZv7+f8EqjGboAWoBZzR4l68/19ixZLuufdFC" formaDePago="PAGO EN UNA SOLA EXHIBICION" subTotal="4173.71" descuento="0" Moneda="MXP" total="4841.50" tipoDeComprobante="ingreso" metodoDePago="03" LugarExpedicion="BLV. BELISARIO DOMINGUEZ NO. 1057 EDIFICIO B Y C, TUXTLA GUTIERREZ,CHIS.,MX., MEXICO"><cfdi:Emisor rfc="OFI920113KZ8" nombre="OFIX S.A. de C.V."><cfdi:DomicilioFiscal calle="AVENIDA URANO" noExterior="585" colonia="YLANG YLANG" localidad="BOCA DEL RIO" municipio="BOCA DEL RIO" estado="VERACRUZ" pais="MEXICO" codigoPostal="94298"/><cfdi:ExpedidoEn calle="BLV. BELISARIO DOMINGUEZ NO. 1057 EDIFICIO B Y C" localidad="TUXTLA GUTIERREZ,CHIS.,MX." pais="MEXICO"/><cfdi:RegimenFiscal Regimen="Regimen General de Ley Personas Morales"/></cfdi:Emisor><cfdi:Receptor rfc="CGI160330R94" nombre="CFE GENERACION IV"><cfdi:Domicilio calle="AV. PASEO DE LA REFORMA" noExterior="N°. 164" colonia="JUAREZ" municipio="DELEGACION CUAHUTEMOC" localidad="MEXICO" estado="CIUDAD DE MEXICO" pais="MEXICO" codigoPostal="06600"/></cfdi:Receptor><cfdi:Conceptos><cfdi:Concepto cantidad="2" unidad="PIEZA" noIdentificacion="050-0002" descripcion="GRAPA STANDARD CON 5000PZAS PILOT 400" valorUnitario="21.98" importe="43.96"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="064-0004" descripcion="SUJETA DOCUMENTOS 32MM NEGRO C/12PZAS" valorUnitario="19.28" importe="57.84"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="064-0005" descripcion="SUJETA DOCUMENTOS 19MM NEGRO C/12PZAS" valorUnitario="10.29" importe="30.87"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="064-0010" descripcion="SUJETA DOCUMENTOS 51MM NEGRO C/12PZAS" valorUnitario="54.19" importe="162.57"/><cfdi:Concepto cantidad="3" unidad="CAJA 12" noIdentificacion="068-0162" descripcion="BOLIGRAFO MEDIANO STICK AZUL DIAMANTE BIC" valorUnitario="32.84" importe="98.52"/><cfdi:Concepto cantidad="6" unidad="PIEZA" noIdentificacion="070-0016" descripcion="CORRECTOR LIQUIDO TIPO PLUMA 8ML ZEBRA" valorUnitario="25.00" importe="150.00"/><cfdi:Concepto cantidad="2" unidad="PIEZA" noIdentificacion="075-0128" descripcion="LAPIZ BICOLOR HEXAG JUMBO SMART BOL CON 2" valorUnitario="8.94" importe="17.88"/><cfdi:Concepto cantidad="1" unidad="PIEZA" noIdentificacion="075-0386" descripcion="LAPIZ GRAFITO CON GOMA #2 TRIANGULAR MIRADO BLT CON 4" valorUnitario="15.38" importe="15.38"/><cfdi:Concepto cantidad="2" unidad="PIEZA" noIdentificacion="076-0406" descripcion="MARCADOR PERMANENTE DOBLE PUNTA SURTIDOS CON 3 SHARPIE" valorUnitario="44.20" importe="88.40"/><cfdi:Concepto cantidad="2" unidad="PIEZA" noIdentificacion="076-0610" descripcion="MARCADOR PERMANENTE FINO SURTIDOS CON 3 ELECTRO POP SHARPIE" valorUnitario="32.13" importe="64.26"/><cfdi:Concepto cantidad="5" unidad="PIEZA" noIdentificacion="087-0096" descripcion="NOTAS ADHESIVAS 3x4 NEON C/100H PAQUETE CON 6" valorUnitario="70.25" importe="351.25"/><cfdi:Concepto cantidad="5" unidad="PIEZA" noIdentificacion="087-0104" descripcion="NOTAS ADHESIVAS 3X3" PASTEL C/6BLK MEMOTIP" valorUnitario="41.79" importe="208.95"/><cfdi:Concepto cantidad="10" unidad="PIEZA" noIdentificacion="087-0107" descripcion="NOTAS ADHESIVAS 2x2 CUBO ULTRA C/400H" valorUnitario="34.56" importe="345.60"/><cfdi:Concepto cantidad="1" unidad="PIEZA" noIdentificacion="092-0009" descripcion="CINTA EMPAQUE TRANSP TUK .48X50M" valorUnitario="10.80" importe="10.80"/><cfdi:Concepto cantidad="1" unidad="PIEZA" noIdentificacion="092-0012" descripcion="CINTA EMPAQUE CANELA TUK .48X50M" valorUnitario="10.80" importe="10.80"/><cfdi:Concepto cantidad="1" unidad="PIEZA" noIdentificacion="092-0057" descripcion="CINTA ADHESIVA TRANSP 205 .18X33M CELOFAN" valorUnitario="17.51" importe="17.51"/><cfdi:Concepto cantidad="2" unidad="PIEZA" noIdentificacion="093-0006" descripcion="CLIP MARIPOSA #2 CON 50" valorUnitario="25.21" importe="50.42"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="093-0012" descripcion="CLIP REDONDO #1 CON 100" valorUnitario="14.16" importe="42.48"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="093-0013" descripcion="CLIP REDONDO #2 CON 100" valorUnitario="10.89" importe="32.67"/><cfdi:Concepto cantidad="4" unidad="PIEZA" noIdentificacion="095-0279" descripcion="CUADERNO ESPIRAL FRANCES 100H RAYAS KIUT" valorUnitario="30.64" importe="122.56"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="095-0360" descripcion="CUADERNO PROFESIONAL 100H RAYAS NORMA COLOR" valorUnitario="39.19" importe="117.57"/><cfdi:Concepto cantidad="1" unidad="PIEZA 12" noIdentificacion="110-0034" descripcion="LAPIZ ADHESIVO 11GRS PRITT" valorUnitario="105.16" importe="105.16"/><cfdi:Concepto cantidad="6" unidad="PIEZA" noIdentificacion="110-0036" descripcion="LAPIZ ADHESIVO 22GRS PRITT" valorUnitario="17.44" importe="104.64"/><cfdi:Concepto cantidad="1" unidad="PIEZA" noIdentificacion="117-0033" descripcion="SOBRE BOLSA ANTE 60K COIN 5 CON 50" valorUnitario="15.58" importe="15.58"/><cfdi:Concepto cantidad="1" unidad="PIEZA" noIdentificacion="117-0037" descripcion="SOBRE BOLSA ANTE 88K CARTA CON 50" valorUnitario="72.10" importe="72.10"/><cfdi:Concepto cantidad="1" unidad="PIEZA" noIdentificacion="117-0038" descripcion="SOBRE BOLSA ANTE 88K OFICIO CON 50" valorUnitario="87.49" importe="87.49"/><cfdi:Concepto cantidad="10" unidad="PIEZA" noIdentificacion="117-0044" descripcion="SOBRE BOLSA ANTE 88K RADIOGRAFIA" valorUnitario="5.41" importe="54.10"/><cfdi:Concepto cantidad="2" unidad="PIEZA" noIdentificacion="248-0010" descripcion="PAPEL OPALINA CARTA BLANCA C/100HJS 120GRS" valorUnitario="38.50" importe="77.00"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="248-0088" descripcion="PAPEL CARTA PASTEL VERDE 37K C/100HJS" valorUnitario="31.02" importe="93.06"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="248-0091" descripcion="PAPEL CARTA PASTEL AMARILLO 37K C/100HJS" valorUnitario="31.02" importe="93.06"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="269-0182" descripcion="CARPETA ARGOLLA O 1.5" CARTA BLANCO CON VENTANA" valorUnitario="38.96" importe="116.88"/><cfdi:Concepto cantidad="3" unidad="PIEZA" noIdentificacion="269-0260" descripcion="CARPETA ARGOLLA D 4" CARTA NEGRO CON VENTANA" valorUnitario="88.64" importe="265.92"/><cfdi:Concepto cantidad="6" unidad="PIEZA" noIdentificacion="269-0302" descripcion="CARPETA OFIX ARGOLLA D 2" CARTA BLANCO CON VENTANA" valorUnitario="52.73" importe="316.38"/><cfdi:Concepto cantidad="5" unidad="PIEZA" noIdentificacion="285-0011" descripcion="SEPARADOR 15DIV COLORINDEX COLORES SURTIDOS" valorUnitario="43.35" importe="216.75"/><cfdi:Concepto cantidad="5" unidad="PIEZA" noIdentificacion="285-0015" descripcion="SEPARADOR 5DIV COLORINDEX COLORES SURTIDOS" valorUnitario="18.16" importe="90.80"/><cfdi:Concepto cantidad="5" unidad="PIEZA" noIdentificacion="285-0016" descripcion="SEPARADOR 8DIV COLORINDEX COLORES SURTIDOS" valorUnitario="22.45" importe="112.25"/><cfdi:Concepto cantidad="5" unidad="PIEZA" noIdentificacion="285-0072" descripcion="SEPARADOR 10DIV COLORINDEX COLORES SURTIDOS" valorUnitario="25.20" importe="126.00"/><cfdi:Concepto cantidad="5" unidad="PIEZA" noIdentificacion="285-0216" descripcion="MINIBANDERITAS FLECHA CON 96HJS COLORES BASICOS" valorUnitario="37.25" importe="186.25"/></cfdi:Conceptos><cfdi:Impuestos totalImpuestosTrasladados="667.79"><cfdi:Traslados><cfdi:Traslado tasa="16" impuesto="IVA" importe="667.79"/></cfdi:Traslados></cfdi:Impuestos><cfdi:Complemento><tfd:TimbreFiscalDigital xmlns:tfd="http://www.sat.gob.mx/TimbreFiscalDigital" version="1.0" UUID="8E2C1724-EEA3-48C0-859E-69D96ABAF00D" FechaTimbrado="2017-03-31T11:45:23" selloCFD="FTOUrHPRuM+tKCKKBoPf6JfyKfXWMeaVhEcAaGQ/LynhoGmQhPxlz5BuFQOeJUbtnJMJvY55oazfXzSmmD7p/kLj98FVhNmwjZIq1KdUNgRUZEks/haXV/gKdn1ysJ1IXU3SVYk42BSmuREkOf17uaw1sChsUVet41krwNiCI7h45s38ZD7k0dx9kYI7alFla+ifFLW/kONTg1oodhEc6cwHFUwAT4AA2OSKdFr3qIEy63SBgf4iazgbzTNASIfgzYsOMlR5c/lqTTQ1K8GBC9F/I0gnTk0RxneljAkM5roHjkW7W1zfC63OZJPK+WZNpaETVqq+ofvtGG1d75wASA==" noCertificadoSAT="00001000000300250292" selloSAT="qcann7uVLSxSEAQBY2Q2XL1s45aJM86PsOQTsCwgC4x+vExaNEmtIIycbUYo/Yrb0ca9Sd4CrnLI7i3ekBjO0k7GVND7n7l6AZQu5C5AsTXo3I6GjiMjxVBXk7jHkYvgJmXCKubsikytkgxBY3AYN0Qu8G5Sj5QKNlVjJuBoUHQ=" xsi:schemaLocation="http://www.sat.gob.mx/TimbreFiscalDigital http://www.sat.gob.mx/TimbreFiscalDigital/TimbreFiscalDigital.xsd"/></cfdi:Complemento>
</cfdi:Comprobante>
"""
# Llama al metodo para encodear la cadena de caracteres que es un XML,
# a su vez, se debe imprimir el resultado que es el XML encodeado
xml_result_endoded = xml_docto_base64_encoded(_xml_to_encode_str)
print("XML Encodeado desde Str: ", xml_result_endoded)
# Prueba de encodeo a partir de un archivo XML almacenado en un directorio
xml_byte_encode = read_xml_from_file_to_encode()
print("XML Encodeado desde File: ", xml_byte_encode)
if __name__ == "__main__":
main()
| 146.776596
| 10,935
| 0.795028
| 1,556
| 13,797
| 6.983933
| 0.355398
| 0.041962
| 0.069937
| 0.021257
| 0.315634
| 0.294469
| 0.267139
| 0.210362
| 0.109874
| 0.10288
| 0
| 0.106765
| 0.082844
| 13,797
| 93
| 10,936
| 148.354839
| 0.752015
| 0.038922
| 0
| 0.078947
| 0
| 0.052632
| 0.910155
| 0.538131
| 0
| 1
| 0
| 0.032258
| 0
| 1
| 0.078947
| false
| 0.078947
| 0.052632
| 0
| 0.184211
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2a228ab633bf77349453db6b3af1a1e078a4cbdb
| 363,523
|
py
|
Python
|
src/highdicom/_iods.py
|
malaterre/highdicom
|
1d02c328d1f7aee028d5d61c124d2aff11396603
|
[
"MIT"
] | null | null | null |
src/highdicom/_iods.py
|
malaterre/highdicom
|
1d02c328d1f7aee028d5d61c124d2aff11396603
|
[
"MIT"
] | null | null | null |
src/highdicom/_iods.py
|
malaterre/highdicom
|
1d02c328d1f7aee028d5d61c124d2aff11396603
|
[
"MIT"
] | null | null | null |
"""DICOM Information Object Definitions (IODs)
auto-generated on 2021-08-12 at 18:23:25.
"""
from typing import Dict, List
IOD_MODULE_MAP: Dict[str, List[Dict[str, str]]] = {
"12-lead-ecg": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"acquisition-context-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"advanced-blending-presentation-state": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-identification",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "advanced-blending-presentation-state",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "advanced-blending-presentation-state-display",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "displayed-area",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "graphic-annotation",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "spatial-transformation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-layer",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-group",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "icc-profile",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
}
],
"ambulatory-ecg": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"arterial-pulse-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"autorefraction-measurements": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "autorefraction-measurements-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Measurements",
"key": "general-ophthalmic-refractive-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "autorefraction-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "sop-common",
"usage": "M"
}
],
"basic-cardiac-electrophysiology-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"basic-directory": [
{
"ie": "Basic Directory",
"key": "file-set-identification",
"usage": "M"
},
{
"ie": "Basic Directory",
"key": "directory-information",
"usage": "U"
}
],
"basic-structured-display": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "structured-display",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "structured-display-image-box",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "structured-display-annotation",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
}
],
"basic-text-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"basic-voice-audio-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "U"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"blending-softcopy-presentation-state": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-identification",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-blending",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "displayed-area",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "graphic-annotation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "spatial-transformation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-layer",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-group",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "palette-color-lookup-table",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "icc-profile",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
}
],
"body-position-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"breast-projection-x-ray-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "dx-series",
"usage": "M"
},
{
"ie": "Series",
"key": "enhanced-mammography-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-mammography-image",
"usage": "M"
},
{
"ie": "Image",
"key": "breast-view",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "breast-projection-x-ray-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "patient-orientation",
"usage": "M"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"breast-tomosynthesis-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-mammography-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "breast-tomosynthesis-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "image---equipment-coordinate-relationship",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-3d-image",
"usage": "M"
},
{
"ie": "Image",
"key": "breast-tomosynthesis-contributing-sources",
"usage": "U"
},
{
"ie": "Image",
"key": "breast-tomosynthesis-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-3d-reconstruction",
"usage": "U"
},
{
"ie": "Image",
"key": "breast-view",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"c-arm-photon-electron-radiation": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "rt-delivery-device-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "rt-radiation-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "c-arm-photon-electron-delivery-device",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "c-arm-photon-electron-beam",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"c-arm-photon-electron-radiation-record": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "RT Delivered Radiation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-delivery-device-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-radiation-record-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "c-arm-photon-electron-delivery-device",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "c-arm-photon-electron-beam",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"chest-cad-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"colon-cad-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"color-palette": [
{
"ie": "Color Palette",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Color Palette",
"key": "color-palette-definition",
"usage": "M"
},
{
"ie": "Color Palette",
"key": "palette-color-lookup-table",
"usage": "M"
},
{
"ie": "Color Palette",
"key": "icc-profile",
"usage": "M"
}
],
"color-softcopy-presentation-state": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-identification",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-relationship",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-shutter",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "display-shutter",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "bitmap-display-shutter",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "overlay-plane",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "overlay-activation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "displayed-area",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "graphic-annotation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "spatial-transformation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-layer",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-group",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "icc-profile",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
}
],
"comprehensive-3d-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"comprehensive-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"content-assessment-results": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Content Assessment Results",
"key": "content-assessment-results",
"usage": "M"
},
{
"ie": "Content Assessment Results",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Content Assessment Results",
"key": "common-instance-reference",
"usage": "M"
}
],
"corneal-topography-map": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "corneal-topography-map-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "palette-color-lookup-table",
"usage": "M"
},
{
"ie": "Image",
"key": "corneal-topography-map-image",
"usage": "M"
},
{
"ie": "Image",
"key": "corneal-topography-map-analysis",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photography-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
}
],
"cr-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "cr-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "display-shutter",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "cr-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "modality-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"ct-defined-procedure-protocol": [
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "protocol-context",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "clinical-trial-context",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "patient-specification",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "equipment-specification",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "instructions",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "patient-positioning",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "general-defined-acquisition",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "general-defined-reconstruction",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "defined-storage",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "sop-common",
"usage": "M"
}
],
"ct-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-plane",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "ct-image",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-energy-ct-image",
"usage": "C"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"ct-performed-procedure-protocol": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ct-protocol-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "protocol-context",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "patient-protocol-context",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "instructions",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "patient-positioning",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "performed-ct-acquisition",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "performed-ct-reconstruction",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "performed-storage",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "sop-common",
"usage": "M"
}
],
"deformable-spatial-registration": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "spatial-registration-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Spatial Registration",
"key": "deformable-spatial-registration",
"usage": "M"
},
{
"ie": "Spatial Registration",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Spatial Registration",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Spatial Registration",
"key": "sop-common",
"usage": "M"
}
],
"dermoscopic-photography-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "vl-photographic-equipment",
"usage": "U"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "vl-photographic-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "dermoscopic-image",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"digital-intra-oral-x-ray-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "dx-series",
"usage": "M"
},
{
"ie": "Series",
"key": "intra-oral-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "U"
},
{
"ie": "Image",
"key": "display-shutter",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "dx-anatomy-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "dx-image",
"usage": "M"
},
{
"ie": "Image",
"key": "dx-detector",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-collimator",
"usage": "U"
},
{
"ie": "Image",
"key": "dx-positioning",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-tomography-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-acquisition-dose",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-generation",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-filtration",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-grid",
"usage": "U"
},
{
"ie": "Image",
"key": "intra-oral-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "C"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "C"
},
{
"ie": "Image",
"key": "image-histogram",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"digital-mammography-x-ray-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "dx-series",
"usage": "M"
},
{
"ie": "Series",
"key": "mammography-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "U"
},
{
"ie": "Image",
"key": "display-shutter",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "dx-anatomy-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "dx-image",
"usage": "M"
},
{
"ie": "Image",
"key": "dx-detector",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-collimator",
"usage": "U"
},
{
"ie": "Image",
"key": "dx-positioning",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-tomography-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-acquisition-dose",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-generation",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-filtration",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-grid",
"usage": "U"
},
{
"ie": "Image",
"key": "mammography-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "C"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "C"
},
{
"ie": "Image",
"key": "image-histogram",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"digital-x-ray-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "dx-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "U"
},
{
"ie": "Image",
"key": "display-shutter",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "dx-anatomy-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "dx-image",
"usage": "M"
},
{
"ie": "Image",
"key": "dx-detector",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-collimator",
"usage": "U"
},
{
"ie": "Image",
"key": "dx-positioning",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-tomography-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-acquisition-dose",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-generation",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-filtration",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-grid",
"usage": "U"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "C"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "C"
},
{
"ie": "Image",
"key": "image-histogram",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"electromyogram": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"electrooculogram": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"encapsulated-cda": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "encapsulated-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "sc-equipment",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "encapsulated-document",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "sop-common",
"usage": "M"
}
],
"encapsulated-mtl": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "encapsulated-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "encapsulated-document",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "manufacturing-3d-model",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Encapsulated Document",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "common-instance-reference",
"usage": "C"
}
],
"encapsulated-obj": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "encapsulated-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "encapsulated-document",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "manufacturing-3d-model",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Encapsulated Document",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "common-instance-reference",
"usage": "C"
}
],
"encapsulated-pdf": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "encapsulated-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "sc-equipment",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "encapsulated-document",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "sop-common",
"usage": "M"
}
],
"encapsulated-stl": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "encapsulated-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "encapsulated-document",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "manufacturing-3d-model",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Encapsulated Document",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Encapsulated Document",
"key": "common-instance-reference",
"usage": "C"
}
],
"enhanced-ct-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ct-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "enhanced-ct-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "supplemental-palette-color-lookup-table",
"usage": "C"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-ct-image",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-multi-energy-ct-acquisition",
"usage": "C"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"enhanced-mr-color-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "mr-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "enhanced-mr-color-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "bulk-motion-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "specimen",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-mr-image",
"usage": "M"
},
{
"ie": "Image",
"key": "mr-pulse-sequence",
"usage": "C"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"enhanced-mr-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "mr-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "enhanced-mr-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "bulk-motion-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "supplemental-palette-color-lookup-table",
"usage": "C"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-mr-image",
"usage": "M"
},
{
"ie": "Image",
"key": "mr-pulse-sequence",
"usage": "C"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"enhanced-pet-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "enhanced-pet-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-pet-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-pet-isotope",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-pet-acquisition",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-pet-image",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-pet-corrections",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"enhanced-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"enhanced-us-volume": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "enhanced-us-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "ultrasound-frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "enhanced-us-volume-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-palette-color-lookup-table",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-us-image",
"usage": "M"
},
{
"ie": "Image",
"key": "ivus-image",
"usage": "C"
},
{
"ie": "Image",
"key": "excluded-intervals",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"enhanced-x-ray-radiation-dose-structured-report": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"enhanced-xa-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "xa-xrf-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "mask",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-xa-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-filtration",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-grid",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-xa-xrf-image",
"usage": "M"
},
{
"ie": "Image",
"key": "xa-xrf-acquisition",
"usage": "C"
},
{
"ie": "Image",
"key": "x-ray-image-intensifier",
"usage": "C"
},
{
"ie": "Image",
"key": "x-ray-detector",
"usage": "C"
},
{
"ie": "Image",
"key": "xa-xrf-multi-frame-presentation",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"enhanced-xrf-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "xa-xrf-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "mask",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-xrf-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-tomography-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-filtration",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-grid",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-xa-xrf-image",
"usage": "M"
},
{
"ie": "Image",
"key": "xa-xrf-acquisition",
"usage": "C"
},
{
"ie": "Image",
"key": "x-ray-image-intensifier",
"usage": "C"
},
{
"ie": "Image",
"key": "x-ray-detector",
"usage": "C"
},
{
"ie": "Image",
"key": "xa-xrf-multi-frame-presentation",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"extensible-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"general-audio-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"general-ecg": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"generic-implant-template": [
{
"ie": "Implant Template",
"key": "generic-implant-template-description",
"usage": "M"
},
{
"ie": "Implant Template",
"key": "generic-implant-template-2d-drawings",
"usage": "U"
},
{
"ie": "Implant Template",
"key": "generic-implant-template-3d-models",
"usage": "U"
},
{
"ie": "Implant Template",
"key": "generic-implant-template-mating-features",
"usage": "U"
},
{
"ie": "Implant Template",
"key": "generic-implant-template-planning-landmarks",
"usage": "U"
},
{
"ie": "Implant Template",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Surface Mesh",
"key": "surface-mesh",
"usage": "C"
}
],
"grayscale-softcopy-presentation-state": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-identification",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-relationship",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-shutter",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-mask",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "mask",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "display-shutter",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "bitmap-display-shutter",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "overlay-plane",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "overlay-activation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "displayed-area",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "graphic-annotation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "spatial-transformation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-layer",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-group",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "modality-lut",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "softcopy-voi-lut",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "softcopy-presentation-lut",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
}
],
"hanging-protocol": [
{
"ie": "Hanging Protocol",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Hanging Protocol",
"key": "hanging-protocol-definition",
"usage": "M"
},
{
"ie": "Hanging Protocol",
"key": "hanging-protocol-environment",
"usage": "M"
},
{
"ie": "Hanging Protocol",
"key": "hanging-protocol-display",
"usage": "M"
}
],
"hemodynamic-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"implant-assembly-template": [
{
"ie": "Implant Assembly",
"key": "implant-assembly-template",
"usage": "M"
},
{
"ie": "Implant Assembly",
"key": "sop-common",
"usage": "M"
}
],
"implant-template-group": [
{
"ie": "Implant Template Group",
"key": "implant-template-group",
"usage": "M"
},
{
"ie": "Implant Template Group",
"key": "sop-common",
"usage": "M"
}
],
"implantation-plan-sr-document": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"intraocular-lens-calculations": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "intraocular-lens-calculations-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Measurements",
"key": "intraocular-lens-calculations",
"usage": "M"
},
{
"ie": "Measurements",
"key": "general-ophthalmic-refractive-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "sop-common",
"usage": "M"
}
],
"intravascular-optical-coherence-tomography-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "intravascular-oct-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "supplemental-palette-color-lookup-table",
"usage": "C"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "M"
},
{
"ie": "Image",
"key": "intravascular-optical-coherence-tomography-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "intravascular-oct-image",
"usage": "M"
},
{
"ie": "Image",
"key": "intravascular-oct-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "intravascular-oct-processing-parameters",
"usage": "C"
},
{
"ie": "Image",
"key": "intravascular-image-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"keratometry-measurements": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "keratometry-measurements-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Measurements",
"key": "general-ophthalmic-refractive-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "keratometry-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "sop-common",
"usage": "M"
}
],
"key-object-selection-document": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "key-object-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "key-object-document",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"legacy-converted-enhanced-ct-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ct-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "U"
},
{
"ie": "Image",
"key": "legacy-converted-enhanced-ct-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-ct-image",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"legacy-converted-enhanced-mr-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "mr-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "U"
},
{
"ie": "Image",
"key": "legacy-converted-enhanced-mr-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "bulk-motion-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-mr-image",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"legacy-converted-enhanced-pet-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "enhanced-pet-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "legacy-converted-enhanced-pet-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "enhanced-pet-image",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"lensometry-measurements": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "lensometry-measurements-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Measurements",
"key": "general-ophthalmic-refractive-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "lensometry-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "sop-common",
"usage": "M"
}
],
"macular-grid-thickness-and-volume-report": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"mammography-cad-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"mr-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-plane",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "mr-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"mr-spectroscopy": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "mr-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "MR Spectroscopy",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "MR Spectroscopy",
"key": "mr-spectroscopy-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "MR Spectroscopy",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "MR Spectroscopy",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "MR Spectroscopy",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "MR Spectroscopy",
"key": "bulk-motion-synchronization",
"usage": "C"
},
{
"ie": "MR Spectroscopy",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "MR Spectroscopy",
"key": "specimen",
"usage": "U"
},
{
"ie": "MR Spectroscopy",
"key": "mr-spectroscopy",
"usage": "M"
},
{
"ie": "MR Spectroscopy",
"key": "mr-spectroscopy-pulse-sequence",
"usage": "C"
},
{
"ie": "MR Spectroscopy",
"key": "mr-spectroscopy-data",
"usage": "M"
},
{
"ie": "MR Spectroscopy",
"key": "sop-common",
"usage": "M"
},
{
"ie": "MR Spectroscopy",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "MR Spectroscopy",
"key": "frame-extraction",
"usage": "C"
}
],
"multi-channel-respiratory-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"multi-frame-grayscale-byte-sc-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "U"
},
{
"ie": "Equipment",
"key": "sc-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-pointers",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-grayscale-byte-sc-image-multi-frame-functional-groups",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-image",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-multi-frame-image",
"usage": "M"
},
{
"ie": "Image",
"key": "sc-multi-frame-vector",
"usage": "C"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "C"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"multi-frame-grayscale-word-sc-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "U"
},
{
"ie": "Equipment",
"key": "sc-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-pointers",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-grayscale-word-sc-image-multi-frame-functional-groups",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-image",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-multi-frame-image",
"usage": "M"
},
{
"ie": "Image",
"key": "sc-multi-frame-vector",
"usage": "C"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "C"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"multi-frame-single-bit-sc-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "U"
},
{
"ie": "Equipment",
"key": "sc-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-pointers",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-image",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-multi-frame-image",
"usage": "M"
},
{
"ie": "Image",
"key": "sc-multi-frame-vector",
"usage": "C"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"multi-frame-true-color-sc-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "U"
},
{
"ie": "Equipment",
"key": "sc-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-pointers",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-true-color-sc-image-multi-frame-functional-groups",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-image",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-multi-frame-image",
"usage": "M"
},
{
"ie": "Image",
"key": "sc-multi-frame-vector",
"usage": "C"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"nm-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "nm-pet-patient-orientation",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "nm-image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "nm-multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "nm-image",
"usage": "M"
},
{
"ie": "Image",
"key": "nm-isotope",
"usage": "M"
},
{
"ie": "Image",
"key": "nm-detector",
"usage": "M"
},
{
"ie": "Image",
"key": "nm-tomo-acquisition",
"usage": "C"
},
{
"ie": "Image",
"key": "nm-multi-gated-acquisition",
"usage": "C"
},
{
"ie": "Image",
"key": "nm-phase",
"usage": "C"
},
{
"ie": "Image",
"key": "nm-reconstruction",
"usage": "C"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-overlay",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"ophthalmic-axial-measurements": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ophthalmic-axial-measurements-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Measurements",
"key": "ophthalmic-axial-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "general-ophthalmic-refractive-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "sop-common",
"usage": "M"
}
],
"ophthalmic-optical-coherence-tomography-b-scan-volume-analysis": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ophthalmic-tomography-b-scan-volume-analysis-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-optical-coherence-tomography-b-scan-volume-analysis-image",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-optical-coherence-tomography-b-scan-volume-analysis-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"ophthalmic-optical-coherence-tomography-en-face-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ophthalmic-tomography-en-face-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "palette-color-lookup-table",
"usage": "C"
},
{
"ie": "Image",
"key": "ophthalmic-optical-coherence-tomography-en-face-image",
"usage": "M"
},
{
"ie": "Image",
"key": "ocular-region-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-optical-coherence-tomography-en-face-image-quality-rating",
"usage": "C"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"ophthalmic-photography-16-bit-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ophthalmic-photography-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Image",
"key": "ophthalmic-photography-image",
"usage": "M"
},
{
"ie": "Image",
"key": "ocular-region-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photography-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photographic-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"ophthalmic-photography-8-bit-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ophthalmic-photography-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Image",
"key": "ophthalmic-photography-image",
"usage": "M"
},
{
"ie": "Image",
"key": "ocular-region-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photography-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photographic-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"ophthalmic-thickness-map": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "ophthalmic-thickness-map-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "supplemental-palette-color-lookup-table",
"usage": "C"
},
{
"ie": "Image",
"key": "bitmap-display-shutter",
"usage": "C"
},
{
"ie": "Image",
"key": "ophthalmic-thickness-map",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-thickness-map-quality-rating",
"usage": "C"
},
{
"ie": "Image",
"key": "ophthalmic-photography-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"ophthalmic-tomography-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "ophthalmic-tomography-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "ophthalmic-tomography-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "ophthalmic-tomography-image",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-tomography-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-tomography-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "ocular-region-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"ophthalmic-visual-field-static-perimetry-measurements": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "visual-field-static-perimetry-measurements-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Measurements",
"key": "visual-field-static-perimetry-test-parameters",
"usage": "M"
},
{
"ie": "Measurements",
"key": "visual-field-static-perimetry-test-reliability",
"usage": "M"
},
{
"ie": "Measurements",
"key": "visual-field-static-perimetry-test-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "visual-field-static-perimetry-test-results",
"usage": "M"
},
{
"ie": "Measurements",
"key": "ophthalmic-patient-clinical-information-and-test-lens-parameters",
"usage": "U"
},
{
"ie": "Measurements",
"key": "sop-common",
"usage": "M"
}
],
"parametric-map": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "parametric-map-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "C"
},
{
"ie": "Image",
"key": "floating-point-image-pixel",
"usage": "C"
},
{
"ie": "Image",
"key": "double-floating-point-image-pixel",
"usage": "C"
},
{
"ie": "Image",
"key": "parametric-map-image",
"usage": "M"
},
{
"ie": "Image",
"key": "parametric-map-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "palette-color-lookup-table",
"usage": "C"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "bulk-motion-synchronization",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "C"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"patient-radiation-dose-structured-report": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"performed-imaging-agent-administration-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"pet-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "pet-series",
"usage": "M"
},
{
"ie": "Series",
"key": "pet-isotope",
"usage": "M"
},
{
"ie": "Series",
"key": "pet-multi-gated-acquisition",
"usage": "C"
},
{
"ie": "Series",
"key": "nm-pet-patient-orientation",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-plane",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "pet-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"planar-mpr-volumetric-presentation-state": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volumetric-presentation-state-identification",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volumetric-presentation-state-relationship",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volume-cropping",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "presentation-view-description",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "multi-planar-reconstruction-geometry",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "mpr-volumetric-presentation-state-display",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volumetric-graphic-annotation",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "graphic-annotation",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "graphic-layer",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-group",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "presentation-animation",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "common-instance-reference",
"usage": "M"
}
],
"planned-imaging-agent-administration-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"procedure-log": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"protocol-approval": [
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Approval",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Approval",
"key": "protocol-approval",
"usage": "M"
}
],
"pseudo-color-softcopy-presentation-state": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-identification",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-relationship",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-shutter",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-mask",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "mask",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "display-shutter",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "bitmap-display-shutter",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "overlay-plane",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "overlay-activation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "displayed-area",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "graphic-annotation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "spatial-transformation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-layer",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-group",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "modality-lut",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "softcopy-voi-lut",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "palette-color-lookup-table",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "icc-profile",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
}
],
"radiopharmaceutical-radiation-dose-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"raw-data": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Raw Data",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Raw Data",
"key": "specimen",
"usage": "U"
},
{
"ie": "Raw Data",
"key": "raw-data",
"usage": "M"
},
{
"ie": "Raw Data",
"key": "sop-common",
"usage": "M"
}
],
"real-time-audio-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "real-time-bulk-data-flow",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Waveform",
"key": "real-time-audio-waveform-current-frame-functional-groups",
"usage": "M"
}
],
"real-time-video-endoscopic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "real-time-bulk-data-flow",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "C"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "M"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Image",
"key": "real-time-acquisition",
"usage": "M"
},
{
"ie": "Image",
"key": "real-time-video-endoscopic-image-current-frame-functional-groups",
"usage": "M"
}
],
"real-time-video-photographic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "real-time-bulk-data-flow",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "C"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Image",
"key": "real-time-acquisition",
"usage": "M"
},
{
"ie": "Image",
"key": "real-time-video-photographic-image-current-frame-functional-groups",
"usage": "M"
}
],
"real-world-value-mapping": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "real-world-value-mapping-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Real World Value Mapping",
"key": "real-world-value-mapping",
"usage": "M"
},
{
"ie": "Real World Value Mapping",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Real World Value Mapping",
"key": "sop-common",
"usage": "M"
}
],
"rendition-selection-document": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "key-object-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Document",
"key": "key-object-document",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"respiratory-waveform": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"robotic-arm-radiation": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "rt-delivery-device-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "rt-radiation-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "robotic-arm-delivery-device",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "robotic-arm-path",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"robotic-arm-radiation-record": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "RT Delivered Radiation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-delivery-device-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-radiation-record-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "robotic-arm-delivery-device",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "robotic-arm-path",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"routine-scalp-electroencephalogram": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"rt-beams-delivery-instruction": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Plan",
"key": "rt-beams-delivery-instruction",
"usage": "M"
},
{
"ie": "Plan",
"key": "common-instance-reference",
"usage": "C"
},
{
"ie": "Plan",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Plan",
"key": "sop-common",
"usage": "M"
}
],
"rt-beams-treatment-record": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-general-treatment-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-patient-setup",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "rt-treatment-machine-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "measured-dose-reference-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "calculated-dose-reference-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "rt-beams-session-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-treatment-summary-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "common-instance-reference",
"usage": "U"
}
],
"rt-brachy-application-setup-delivery-instruction": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Plan",
"key": "rt-brachy-application-setup-delivery-instruction",
"usage": "M"
},
{
"ie": "Plan",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Plan",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Plan",
"key": "sop-common",
"usage": "M"
}
],
"rt-brachy-treatment-record": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-general-treatment-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-patient-setup",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "rt-treatment-machine-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "measured-dose-reference-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "calculated-dose-reference-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "rt-brachy-session-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-treatment-summary-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "common-instance-reference",
"usage": "U"
}
],
"rt-dose": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Dose",
"key": "general-image",
"usage": "C"
},
{
"ie": "Dose",
"key": "image-plane",
"usage": "C"
},
{
"ie": "Dose",
"key": "image-pixel",
"usage": "C"
},
{
"ie": "Dose",
"key": "multi-frame",
"usage": "C"
},
{
"ie": "Dose",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Dose",
"key": "multi-frame-overlay",
"usage": "U"
},
{
"ie": "Dose",
"key": "modality-lut",
"usage": "U"
},
{
"ie": "Dose",
"key": "rt-dose",
"usage": "M"
},
{
"ie": "Dose",
"key": "rt-dvh",
"usage": "U"
},
{
"ie": "Dose",
"key": "structure-set",
"usage": "C"
},
{
"ie": "Dose",
"key": "roi-contour",
"usage": "C"
},
{
"ie": "Dose",
"key": "rt-dose-roi",
"usage": "C"
},
{
"ie": "Dose",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Dose",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Dose",
"key": "frame-extraction",
"usage": "C"
}
],
"rt-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "rt-image",
"usage": "M"
},
{
"ie": "Image",
"key": "modality-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "approval",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"rt-ion-beams-treatment-record": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-general-treatment-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-patient-setup",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "rt-treatment-machine-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "measured-dose-reference-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "calculated-dose-reference-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "rt-ion-beams-session-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-treatment-summary-record",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "common-instance-reference",
"usage": "U"
}
],
"rt-ion-plan": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Plan",
"key": "rt-general-plan",
"usage": "M"
},
{
"ie": "Plan",
"key": "rt-prescription",
"usage": "U"
},
{
"ie": "Plan",
"key": "rt-ion-tolerance-tables",
"usage": "U"
},
{
"ie": "Plan",
"key": "rt-patient-setup",
"usage": "U"
},
{
"ie": "Plan",
"key": "rt-fraction-scheme",
"usage": "U"
},
{
"ie": "Plan",
"key": "rt-ion-beams",
"usage": "C"
},
{
"ie": "Plan",
"key": "approval",
"usage": "U"
},
{
"ie": "Plan",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Plan",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Plan",
"key": "common-instance-reference",
"usage": "U"
}
],
"rt-physician-intent": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "RT Physician Intent",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Physician Intent",
"key": "rt-physician-intent",
"usage": "M"
},
{
"ie": "RT Physician Intent",
"key": "rt-enhanced-prescription",
"usage": "U"
},
{
"ie": "RT Physician Intent",
"key": "rt-treatment-phase-intent",
"usage": "C"
},
{
"ie": "RT Physician Intent",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Physician Intent",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Physician Intent",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"rt-plan": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Plan",
"key": "rt-general-plan",
"usage": "M"
},
{
"ie": "Plan",
"key": "rt-prescription",
"usage": "U"
},
{
"ie": "Plan",
"key": "rt-tolerance-tables",
"usage": "U"
},
{
"ie": "Plan",
"key": "rt-patient-setup",
"usage": "U"
},
{
"ie": "Plan",
"key": "rt-fraction-scheme",
"usage": "U"
},
{
"ie": "Plan",
"key": "rt-beams",
"usage": "C"
},
{
"ie": "Plan",
"key": "rt-brachy-application-setups",
"usage": "C"
},
{
"ie": "Plan",
"key": "approval",
"usage": "U"
},
{
"ie": "Plan",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Plan",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Plan",
"key": "common-instance-reference",
"usage": "U"
}
],
"rt-radiation-record-set": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-radiation-record-set",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-dose-contribution-record",
"usage": "C"
},
{
"ie": "RT Delivered Radiation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"rt-radiation-salvage-record": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "RT Delivered Radiation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-delivery-device-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-radiation-record-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-radiation-salvage-record",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"rt-radiation-set": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "RT Radiation Set",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Radiation Set",
"key": "rt-radiation-set",
"usage": "M"
},
{
"ie": "RT Radiation Set",
"key": "rt-dose-contribution",
"usage": "C"
},
{
"ie": "RT Radiation Set",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Radiation Set",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Radiation Set",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"rt-segment-annotation": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "RT Segment Annotation",
"key": "rt-segment-annotation",
"usage": "M"
},
{
"ie": "RT Segment Annotation",
"key": "segment-reference",
"usage": "M"
},
{
"ie": "RT Segment Annotation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Segment Annotation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Segment Annotation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Segment Annotation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"rt-structure-set": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Structure Set",
"key": "structure-set",
"usage": "M"
},
{
"ie": "Structure Set",
"key": "roi-contour",
"usage": "M"
},
{
"ie": "Structure Set",
"key": "rt-roi-observations",
"usage": "M"
},
{
"ie": "Structure Set",
"key": "approval",
"usage": "U"
},
{
"ie": "Structure Set",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Structure Set",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Structure Set",
"key": "common-instance-reference",
"usage": "U"
}
],
"rt-treatment-summary-record": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "rt-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-general-treatment-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "rt-treatment-summary-record",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Treatment Record",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Treatment Record",
"key": "common-instance-reference",
"usage": "U"
}
],
"secondary-capture-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "U"
},
{
"ie": "Equipment",
"key": "sc-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "sc-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "modality-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"segmentation": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "segmentation-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "segmentation-image",
"usage": "M"
},
{
"ie": "Image",
"key": "segmentation-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "C"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"simplified-adult-echo-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "timezone",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"sleep-electroencephalogram": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform-identification",
"usage": "M"
},
{
"ie": "Waveform",
"key": "waveform",
"usage": "M"
},
{
"ie": "Waveform",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Waveform",
"key": "waveform-annotation",
"usage": "C"
},
{
"ie": "Waveform",
"key": "sop-common",
"usage": "M"
}
],
"spatial-fiducials": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "spatial-fiducials-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Spatial Fiducials",
"key": "spatial-fiducials",
"usage": "M"
},
{
"ie": "Spatial Fiducials",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Spatial Fiducials",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Spatial Fiducials",
"key": "sop-common",
"usage": "M"
}
],
"spatial-registration": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "spatial-registration-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Spatial Registration",
"key": "spatial-registration",
"usage": "M"
},
{
"ie": "Spatial Registration",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Spatial Registration",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Spatial Registration",
"key": "sop-common",
"usage": "M"
}
],
"spectacle-prescription-report": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"stereometric-relationship": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "stereometric-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Stereometric Relationship",
"key": "stereometric-relationship",
"usage": "M"
},
{
"ie": "Stereometric Relationship",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Stereometric Relationship",
"key": "sop-common",
"usage": "M"
}
],
"subjective-refraction-measurements": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "subjective-refraction-measurements-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Measurements",
"key": "general-ophthalmic-refractive-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "subjective-refraction-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "sop-common",
"usage": "M"
}
],
"surface-scan-mesh": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "optical-surface-scanner-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Surface",
"key": "surface-mesh",
"usage": "M"
},
{
"ie": "Surface",
"key": "uv-mapping",
"usage": "U"
},
{
"ie": "Surface",
"key": "scan-procedure",
"usage": "M"
},
{
"ie": "Surface",
"key": "specimen",
"usage": "U"
},
{
"ie": "Surface",
"key": "sop-common",
"usage": "M"
}
],
"surface-scan-point-cloud": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "optical-surface-scanner-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Surface",
"key": "point-cloud",
"usage": "M"
},
{
"ie": "Surface",
"key": "uv-mapping",
"usage": "U"
},
{
"ie": "Surface",
"key": "scan-procedure",
"usage": "M"
},
{
"ie": "Surface",
"key": "specimen",
"usage": "U"
},
{
"ie": "Surface",
"key": "sop-common",
"usage": "M"
}
],
"surface-segmentation": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "segmentation-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Surface",
"key": "surface-segmentation",
"usage": "M"
},
{
"ie": "Surface",
"key": "surface-mesh",
"usage": "M"
},
{
"ie": "Surface",
"key": "common-instance-reference",
"usage": "C"
},
{
"ie": "Surface",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Surface",
"key": "sop-common",
"usage": "M"
}
],
"tomotherapeutic-radiation": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "rt-delivery-device-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "rt-radiation-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "tomotherapeutic-delivery-device",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "tomotherapeutic-beam",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Radiation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"tomotherapeutic-radiation-record": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-rt-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "RT Delivered Radiation",
"key": "general-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-delivery-device-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "rt-radiation-record-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "tomotherapeutic-delivery-device",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "tomotherapeutic-beam",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "sop-common",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "RT Delivered Radiation",
"key": "radiotherapy-common-instance",
"usage": "M"
}
],
"tractography-results": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "tractography-results-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Tractography Results",
"key": "tractography-results",
"usage": "M"
},
{
"ie": "Tractography Results",
"key": "specimen",
"usage": "U"
},
{
"ie": "Tractography Results",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Tractography Results",
"key": "sop-common",
"usage": "M"
}
],
"us-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "palette-color-lookup-table",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "us-region-calibration",
"usage": "U"
},
{
"ie": "Image",
"key": "us-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"us-multi-frame-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "cine",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-pointers",
"usage": "U"
},
{
"ie": "Image",
"key": "palette-color-lookup-table",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "us-region-calibration",
"usage": "U"
},
{
"ie": "Image",
"key": "us-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-overlay",
"usage": "U"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"video-endoscopic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "cine",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "C"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"video-microscopic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "cine",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "C"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"video-photographic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "cine",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "C"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"visual-acuity-measurements": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "visual-acuity-measurements-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Measurements",
"key": "general-ophthalmic-refractive-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "visual-acuity-measurements",
"usage": "M"
},
{
"ie": "Measurements",
"key": "sop-common",
"usage": "M"
}
],
"vl-endoscopic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"vl-microscopic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "C"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "optical-path",
"usage": "U"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"vl-photographic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "vl-photographic-equipment",
"usage": "U"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "C"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "vl-photographic-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "vl-photographic-geolocation",
"usage": "U"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"vl-slide-coordinates-microscopic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "M"
},
{
"ie": "Image",
"key": "vl-image",
"usage": "M"
},
{
"ie": "Image",
"key": "slide-coordinates",
"usage": "M"
},
{
"ie": "Image",
"key": "optical-path",
"usage": "U"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
}
],
"vl-whole-slide-microscopy-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "whole-slide-microscopy-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "vl-whole-slide-microscopy-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "M"
},
{
"ie": "Image",
"key": "specimen",
"usage": "M"
},
{
"ie": "Image",
"key": "whole-slide-microscopy-image",
"usage": "M"
},
{
"ie": "Image",
"key": "optical-path",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-resolution-navigation",
"usage": "C"
},
{
"ie": "Image",
"key": "slide-label",
"usage": "C"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "M"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"volume-rendering-volumetric-presentation-state": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volumetric-presentation-state-identification",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volumetric-presentation-state-relationship",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volume-cropping",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "presentation-view-description",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volume-render-geometry",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "render-shading",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "render-display",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "volumetric-graphic-annotation",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "graphic-annotation",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "graphic-layer",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-group",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "presentation-animation",
"usage": "U"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "common-instance-reference",
"usage": "M"
}
],
"wide-field-ophthalmic-photography-3d-coordinates-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ophthalmic-photography-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Image",
"key": "ophthalmic-photography-image",
"usage": "M"
},
{
"ie": "Image",
"key": "wide-field-ophthalmic-photography-3d-coordinates",
"usage": "M"
},
{
"ie": "Image",
"key": "wide-field-ophthalmic-photography-quality-rating",
"usage": "C"
},
{
"ie": "Image",
"key": "ocular-region-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photography-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photographic-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "C"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"wide-field-ophthalmic-photography-stereographic-projection-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "ophthalmic-photography-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "M"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "U"
},
{
"ie": "Image",
"key": "ophthalmic-photography-image",
"usage": "M"
},
{
"ie": "Image",
"key": "wide-field-ophthalmic-photography-stereographic-projection",
"usage": "M"
},
{
"ie": "Image",
"key": "wide-field-ophthalmic-photography-quality-rating",
"usage": "C"
},
{
"ie": "Image",
"key": "ocular-region-imaged",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photography-acquisition-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "ophthalmic-photographic-parameters",
"usage": "M"
},
{
"ie": "Image",
"key": "icc-profile",
"usage": "C"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"x-ray-3d-angiographic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-3d-angiographic-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "cardiac-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "respiratory-synchronization",
"usage": "C"
},
{
"ie": "Image",
"key": "patient-orientation",
"usage": "U"
},
{
"ie": "Image",
"key": "image---equipment-coordinate-relationship",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-3d-image",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-3d-angiographic-image-contributing-sources",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-3d-angiographic-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-3d-reconstruction",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"x-ray-3d-craniofacial-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "enhanced-contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "acquisition-context",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-3d-craniofacial-image-multi-frame-functional-groups",
"usage": "M"
},
{
"ie": "Image",
"key": "multi-frame-dimension",
"usage": "U"
},
{
"ie": "Image",
"key": "patient-orientation",
"usage": "U"
},
{
"ie": "Image",
"key": "image---equipment-coordinate-relationship",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-3d-image",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-3d-craniofacial-image-contributing-sources",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-3d-craniofacial-acquisition",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-3d-reconstruction",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"x-ray-angiographic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "C"
},
{
"ie": "Image",
"key": "frame-pointers",
"usage": "U"
},
{
"ie": "Image",
"key": "mask",
"usage": "C"
},
{
"ie": "Image",
"key": "display-shutter",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-image",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-acquisition",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-collimator",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-table",
"usage": "C"
},
{
"ie": "Image",
"key": "xa-positioner",
"usage": "M"
},
{
"ie": "Image",
"key": "dx-detector",
"usage": "U"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-overlay",
"usage": "C"
},
{
"ie": "Image",
"key": "modality-lut",
"usage": "C"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"x-ray-radiation-dose-sr": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "sr-document-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "C"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-general",
"usage": "M"
},
{
"ie": "Document",
"key": "sr-document-content",
"usage": "M"
},
{
"ie": "Document",
"key": "sop-common",
"usage": "M"
}
],
"x-ray-radiofluoroscopic-image": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Frame of Reference",
"key": "synchronization",
"usage": "U"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Image",
"key": "general-image",
"usage": "M"
},
{
"ie": "Image",
"key": "general-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "image-pixel",
"usage": "M"
},
{
"ie": "Image",
"key": "contrast-bolus",
"usage": "C"
},
{
"ie": "Image",
"key": "cine",
"usage": "C"
},
{
"ie": "Image",
"key": "multi-frame",
"usage": "C"
},
{
"ie": "Image",
"key": "frame-pointers",
"usage": "U"
},
{
"ie": "Image",
"key": "mask",
"usage": "C"
},
{
"ie": "Image",
"key": "display-shutter",
"usage": "U"
},
{
"ie": "Image",
"key": "device",
"usage": "U"
},
{
"ie": "Image",
"key": "intervention",
"usage": "U"
},
{
"ie": "Image",
"key": "specimen",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-image",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-acquisition",
"usage": "M"
},
{
"ie": "Image",
"key": "x-ray-collimator",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-table",
"usage": "U"
},
{
"ie": "Image",
"key": "xrf-positioner",
"usage": "U"
},
{
"ie": "Image",
"key": "x-ray-tomography-acquisition",
"usage": "C"
},
{
"ie": "Image",
"key": "dx-detector",
"usage": "U"
},
{
"ie": "Image",
"key": "overlay-plane",
"usage": "U"
},
{
"ie": "Image",
"key": "multi-frame-overlay",
"usage": "C"
},
{
"ie": "Image",
"key": "modality-lut",
"usage": "C"
},
{
"ie": "Image",
"key": "voi-lut",
"usage": "U"
},
{
"ie": "Image",
"key": "sop-common",
"usage": "M"
},
{
"ie": "Image",
"key": "common-instance-reference",
"usage": "U"
},
{
"ie": "Image",
"key": "frame-extraction",
"usage": "C"
}
],
"xa-defined-procedure-protocol": [
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "protocol-context",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "clinical-trial-context",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "patient-specification",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "equipment-specification",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "instructions",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "patient-positioning",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "general-defined-acquisition",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "general-defined-reconstruction",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "defined-storage",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "sop-common",
"usage": "M"
}
],
"xa-performed-procedure-protocol": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "enhanced-series",
"usage": "M"
},
{
"ie": "Series",
"key": "xa-protocol-series",
"usage": "M"
},
{
"ie": "Frame of Reference",
"key": "frame-of-reference",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "protocol-context",
"usage": "M"
},
{
"ie": "Procedure Protocol",
"key": "patient-protocol-context",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "instructions",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "patient-positioning",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "performed-xa-acquisition",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "performed-xa-reconstruction",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "performed-storage",
"usage": "U"
},
{
"ie": "Procedure Protocol",
"key": "sop-common",
"usage": "M"
}
],
"xa-xrf-grayscale-softcopy-presentation-state": [
{
"ie": "Patient",
"key": "patient",
"usage": "M"
},
{
"ie": "Patient",
"key": "clinical-trial-subject",
"usage": "U"
},
{
"ie": "Study",
"key": "general-study",
"usage": "M"
},
{
"ie": "Study",
"key": "patient-study",
"usage": "U"
},
{
"ie": "Study",
"key": "clinical-trial-study",
"usage": "U"
},
{
"ie": "Series",
"key": "general-series",
"usage": "M"
},
{
"ie": "Series",
"key": "clinical-trial-series",
"usage": "U"
},
{
"ie": "Series",
"key": "presentation-series",
"usage": "M"
},
{
"ie": "Equipment",
"key": "general-equipment",
"usage": "M"
},
{
"ie": "Equipment",
"key": "enhanced-general-equipment",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-identification",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-relationship",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "presentation-state-shutter",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "bitmap-display-shutter",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "overlay-plane",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "overlay-activation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "displayed-area",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "graphic-annotation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "spatial-transformation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "graphic-layer",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "softcopy-voi-lut",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "xa-xrf-presentation-state-mask",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "xa-xrf-presentation-state-shutter",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "xa-xrf-presentation-state-presentation",
"usage": "C"
},
{
"ie": "Presentation State",
"key": "softcopy-presentation-lut",
"usage": "M"
},
{
"ie": "Presentation State",
"key": "sop-common",
"usage": "M"
}
]
}
SOP_CLASS_UID_IOD_KEY_MAP = {
"1.2.840.10008.1.3.10": "basic-directory",
"1.2.840.10008.5.1.4.1.1.1": "cr-image",
"1.2.840.10008.5.1.4.1.1.1.1": "digital-x-ray-image",
"1.2.840.10008.5.1.4.1.1.1.1.1": "digital-x-ray-image",
"1.2.840.10008.5.1.4.1.1.1.2": "digital-mammography-x-ray-image",
"1.2.840.10008.5.1.4.1.1.1.2.1": "digital-mammography-x-ray-image",
"1.2.840.10008.5.1.4.1.1.1.3": "digital-intra-oral-x-ray-image",
"1.2.840.10008.5.1.4.1.1.1.3.1": "digital-intra-oral-x-ray-image",
"1.2.840.10008.5.1.4.1.1.104.1": "encapsulated-pdf",
"1.2.840.10008.5.1.4.1.1.104.2": "encapsulated-cda",
"1.2.840.10008.5.1.4.1.1.104.3": "encapsulated-stl",
"1.2.840.10008.5.1.4.1.1.104.4": "encapsulated-obj",
"1.2.840.10008.5.1.4.1.1.104.5": "encapsulated-mtl",
"1.2.840.10008.5.1.4.1.1.11.1": "grayscale-softcopy-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.10": "volume-rendering-volumetric-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.11": "volume-rendering-volumetric-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.2": "color-softcopy-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.3": "pseudo-color-softcopy-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.4": "blending-softcopy-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.5": "xa-xrf-grayscale-softcopy-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.6": "planar-mpr-volumetric-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.7": "planar-mpr-volumetric-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.8": "advanced-blending-presentation-state",
"1.2.840.10008.5.1.4.1.1.11.9": "volume-rendering-volumetric-presentation-state",
"1.2.840.10008.5.1.4.1.1.12.1": "x-ray-angiographic-image",
"1.2.840.10008.5.1.4.1.1.12.1.1": "enhanced-xa-image",
"1.2.840.10008.5.1.4.1.1.12.2": "x-ray-radiofluoroscopic-image",
"1.2.840.10008.5.1.4.1.1.12.2.1": "enhanced-xrf-image",
"1.2.840.10008.5.1.4.1.1.128": "pet-image",
"1.2.840.10008.5.1.4.1.1.128.1": "legacy-converted-enhanced-pet-image",
"1.2.840.10008.5.1.4.1.1.13.1.1": "x-ray-3d-angiographic-image",
"1.2.840.10008.5.1.4.1.1.13.1.2": "x-ray-3d-craniofacial-image",
"1.2.840.10008.5.1.4.1.1.13.1.3": "breast-tomosynthesis-image",
"1.2.840.10008.5.1.4.1.1.13.1.4": "breast-projection-x-ray-image",
"1.2.840.10008.5.1.4.1.1.13.1.5": "breast-projection-x-ray-image",
"1.2.840.10008.5.1.4.1.1.130": "enhanced-pet-image",
"1.2.840.10008.5.1.4.1.1.131": "basic-structured-display",
"1.2.840.10008.5.1.4.1.1.14.1": "intravascular-optical-coherence-tomography-image",
"1.2.840.10008.5.1.4.1.1.14.2": "intravascular-optical-coherence-tomography-image",
"1.2.840.10008.5.1.4.1.1.2": "ct-image",
"1.2.840.10008.5.1.4.1.1.2.1": "enhanced-ct-image",
"1.2.840.10008.5.1.4.1.1.2.2": "legacy-converted-enhanced-ct-image",
"1.2.840.10008.5.1.4.1.1.20": "nm-image",
"1.2.840.10008.5.1.4.1.1.200.1": "ct-defined-procedure-protocol",
"1.2.840.10008.5.1.4.1.1.200.2": "ct-performed-procedure-protocol",
"1.2.840.10008.5.1.4.1.1.200.3": "protocol-approval",
"1.2.840.10008.5.1.4.1.1.200.7": "xa-defined-procedure-protocol",
"1.2.840.10008.5.1.4.1.1.200.8": "xa-performed-procedure-protocol",
"1.2.840.10008.5.1.4.1.1.3.1": "us-multi-frame-image",
"1.2.840.10008.5.1.4.1.1.30": "parametric-map",
"1.2.840.10008.5.1.4.1.1.4": "mr-image",
"1.2.840.10008.5.1.4.1.1.4.1": "enhanced-mr-image",
"1.2.840.10008.5.1.4.1.1.4.2": "mr-spectroscopy",
"1.2.840.10008.5.1.4.1.1.4.3": "enhanced-mr-color-image",
"1.2.840.10008.5.1.4.1.1.4.4": "legacy-converted-enhanced-mr-image",
"1.2.840.10008.5.1.4.1.1.481.1": "rt-image",
"1.2.840.10008.5.1.4.1.1.481.10": "rt-physician-intent",
"1.2.840.10008.5.1.4.1.1.481.11": "rt-segment-annotation",
"1.2.840.10008.5.1.4.1.1.481.12": "rt-radiation-set",
"1.2.840.10008.5.1.4.1.1.481.13": "c-arm-photon-electron-radiation",
"1.2.840.10008.5.1.4.1.1.481.14": "tomotherapeutic-radiation",
"1.2.840.10008.5.1.4.1.1.481.15": "robotic-arm-radiation",
"1.2.840.10008.5.1.4.1.1.481.16": "rt-radiation-record-set",
"1.2.840.10008.5.1.4.1.1.481.17": "rt-radiation-salvage-record",
"1.2.840.10008.5.1.4.1.1.481.18": "tomotherapeutic-radiation-record",
"1.2.840.10008.5.1.4.1.1.481.19": "c-arm-photon-electron-radiation-record",
"1.2.840.10008.5.1.4.1.1.481.2": "rt-dose",
"1.2.840.10008.5.1.4.1.1.481.20": "robotic-arm-radiation-record",
"1.2.840.10008.5.1.4.1.1.481.3": "rt-structure-set",
"1.2.840.10008.5.1.4.1.1.481.4": "rt-beams-treatment-record",
"1.2.840.10008.5.1.4.1.1.481.5": "rt-plan",
"1.2.840.10008.5.1.4.1.1.481.6": "rt-brachy-treatment-record",
"1.2.840.10008.5.1.4.1.1.481.7": "rt-treatment-summary-record",
"1.2.840.10008.5.1.4.1.1.481.8": "rt-ion-plan",
"1.2.840.10008.5.1.4.1.1.481.9": "rt-ion-beams-treatment-record",
"1.2.840.10008.5.1.4.1.1.6.1": "us-image",
"1.2.840.10008.5.1.4.1.1.6.2": "enhanced-us-volume",
"1.2.840.10008.5.1.4.1.1.66": "raw-data",
"1.2.840.10008.5.1.4.1.1.66.1": "spatial-registration",
"1.2.840.10008.5.1.4.1.1.66.2": "spatial-fiducials",
"1.2.840.10008.5.1.4.1.1.66.3": "deformable-spatial-registration",
"1.2.840.10008.5.1.4.1.1.66.4": "segmentation",
"1.2.840.10008.5.1.4.1.1.66.5": "surface-segmentation",
"1.2.840.10008.5.1.4.1.1.66.6": "tractography-results",
"1.2.840.10008.5.1.4.1.1.67": "real-world-value-mapping",
"1.2.840.10008.5.1.4.1.1.68.1": "surface-scan-mesh",
"1.2.840.10008.5.1.4.1.1.68.2": "surface-scan-point-cloud",
"1.2.840.10008.5.1.4.1.1.7": "secondary-capture-image",
"1.2.840.10008.5.1.4.1.1.7.1": "multi-frame-single-bit-sc-image",
"1.2.840.10008.5.1.4.1.1.7.2": "multi-frame-grayscale-byte-sc-image",
"1.2.840.10008.5.1.4.1.1.7.3": "multi-frame-grayscale-word-sc-image",
"1.2.840.10008.5.1.4.1.1.7.4": "multi-frame-true-color-sc-image",
"1.2.840.10008.5.1.4.1.1.77.1.1": "vl-endoscopic-image",
"1.2.840.10008.5.1.4.1.1.77.1.1.1": "video-endoscopic-image",
"1.2.840.10008.5.1.4.1.1.77.1.2": "vl-microscopic-image",
"1.2.840.10008.5.1.4.1.1.77.1.2.1": "video-microscopic-image",
"1.2.840.10008.5.1.4.1.1.77.1.3": "vl-slide-coordinates-microscopic-image",
"1.2.840.10008.5.1.4.1.1.77.1.4": "vl-photographic-image",
"1.2.840.10008.5.1.4.1.1.77.1.4.1": "video-photographic-image",
"1.2.840.10008.5.1.4.1.1.77.1.5.1": "ophthalmic-photography-8-bit-image",
"1.2.840.10008.5.1.4.1.1.77.1.5.2": "ophthalmic-photography-16-bit-image",
"1.2.840.10008.5.1.4.1.1.77.1.5.3": "stereometric-relationship",
"1.2.840.10008.5.1.4.1.1.77.1.5.4": "ophthalmic-tomography-image",
"1.2.840.10008.5.1.4.1.1.77.1.5.5": "wide-field-ophthalmic-photography-stereographic-projection-image",
"1.2.840.10008.5.1.4.1.1.77.1.5.6": "wide-field-ophthalmic-photography-3d-coordinates-image",
"1.2.840.10008.5.1.4.1.1.77.1.5.7": "ophthalmic-optical-coherence-tomography-en-face-image",
"1.2.840.10008.5.1.4.1.1.77.1.5.8": "ophthalmic-optical-coherence-tomography-b-scan-volume-analysis",
"1.2.840.10008.5.1.4.1.1.77.1.6": "vl-whole-slide-microscopy-image",
"1.2.840.10008.5.1.4.1.1.77.1.7": "dermoscopic-photography-image",
"1.2.840.10008.5.1.4.1.1.78.1": "lensometry-measurements",
"1.2.840.10008.5.1.4.1.1.78.2": "autorefraction-measurements",
"1.2.840.10008.5.1.4.1.1.78.3": "keratometry-measurements",
"1.2.840.10008.5.1.4.1.1.78.4": "subjective-refraction-measurements",
"1.2.840.10008.5.1.4.1.1.78.5": "visual-acuity-measurements",
"1.2.840.10008.5.1.4.1.1.78.6": "spectacle-prescription-report",
"1.2.840.10008.5.1.4.1.1.78.7": "ophthalmic-axial-measurements",
"1.2.840.10008.5.1.4.1.1.78.8": "intraocular-lens-calculations",
"1.2.840.10008.5.1.4.1.1.79.1": "macular-grid-thickness-and-volume-report",
"1.2.840.10008.5.1.4.1.1.80.1": "ophthalmic-visual-field-static-perimetry-measurements",
"1.2.840.10008.5.1.4.1.1.81.1": "ophthalmic-thickness-map",
"1.2.840.10008.5.1.4.1.1.82.1": "corneal-topography-map",
"1.2.840.10008.5.1.4.1.1.88.11": "basic-text-sr",
"1.2.840.10008.5.1.4.1.1.88.22": "enhanced-sr",
"1.2.840.10008.5.1.4.1.1.88.33": "comprehensive-sr",
"1.2.840.10008.5.1.4.1.1.88.34": "comprehensive-3d-sr",
"1.2.840.10008.5.1.4.1.1.88.35": "extensible-sr",
"1.2.840.10008.5.1.4.1.1.88.40": "procedure-log",
"1.2.840.10008.5.1.4.1.1.88.50": "mammography-cad-sr",
"1.2.840.10008.5.1.4.1.1.88.59": "key-object-selection-document",
"1.2.840.10008.5.1.4.1.1.88.65": "chest-cad-sr",
"1.2.840.10008.5.1.4.1.1.88.67": "x-ray-radiation-dose-sr",
"1.2.840.10008.5.1.4.1.1.88.68": "radiopharmaceutical-radiation-dose-sr",
"1.2.840.10008.5.1.4.1.1.88.69": "colon-cad-sr",
"1.2.840.10008.5.1.4.1.1.88.70": "implantation-plan-sr-document",
"1.2.840.10008.5.1.4.1.1.88.71": "acquisition-context-sr",
"1.2.840.10008.5.1.4.1.1.88.72": "simplified-adult-echo-sr",
"1.2.840.10008.5.1.4.1.1.88.73": "patient-radiation-dose-structured-report",
"1.2.840.10008.5.1.4.1.1.88.74": "planned-imaging-agent-administration-sr",
"1.2.840.10008.5.1.4.1.1.88.75": "performed-imaging-agent-administration-sr",
"1.2.840.10008.5.1.4.1.1.88.76": "patient-radiation-dose-structured-report",
"1.2.840.10008.5.1.4.1.1.9.1.1": "12-lead-ecg",
"1.2.840.10008.5.1.4.1.1.9.1.2": "general-ecg",
"1.2.840.10008.5.1.4.1.1.9.1.3": "ambulatory-ecg",
"1.2.840.10008.5.1.4.1.1.9.2.1": "hemodynamic-waveform",
"1.2.840.10008.5.1.4.1.1.9.3.1": "basic-cardiac-electrophysiology-waveform",
"1.2.840.10008.5.1.4.1.1.9.4.1": "basic-voice-audio-waveform",
"1.2.840.10008.5.1.4.1.1.9.4.2": "general-audio-waveform",
"1.2.840.10008.5.1.4.1.1.9.5.1": "arterial-pulse-waveform",
"1.2.840.10008.5.1.4.1.1.9.6.1": "respiratory-waveform",
"1.2.840.10008.5.1.4.1.1.9.6.2": "multi-channel-respiratory-waveform",
"1.2.840.10008.5.1.4.1.1.9.7.1": "routine-scalp-electroencephalogram",
"1.2.840.10008.5.1.4.1.1.9.7.2": "electromyogram",
"1.2.840.10008.5.1.4.1.1.9.7.3": "electrooculogram",
"1.2.840.10008.5.1.4.1.1.9.7.4": "sleep-electroencephalogram",
"1.2.840.10008.5.1.4.1.1.9.8.1": "body-position-waveform",
"1.2.840.10008.5.1.4.1.1.90.1": "content-assessment-results",
"1.2.840.10008.5.1.4.34.10": "rt-brachy-application-setup-delivery-instruction",
"1.2.840.10008.5.1.4.34.7": "rt-beams-delivery-instruction",
"1.2.840.10008.5.1.4.38.1": "hanging-protocol",
"1.2.840.10008.5.1.4.39.1": "color-palette",
"1.2.840.10008.5.1.4.43.1": "generic-implant-template",
"1.2.840.10008.5.1.4.44.1": "implant-assembly-template",
"1.2.840.10008.5.1.4.45.1": "implant-template-group"
}
| 23.719366
| 114
| 0.314159
| 25,719
| 363,523
| 4.44018
| 0.017108
| 0.082909
| 0.104031
| 0.044738
| 0.963519
| 0.953536
| 0.933422
| 0.914227
| 0.900908
| 0.893806
| 0
| 0.016682
| 0.499212
| 363,523
| 15,326
| 115
| 23.719366
| 0.610608
| 0.000234
| 0
| 0.578226
| 1
| 0.009464
| 0.334189
| 0.096874
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.000065
| 0
| 0.000065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2a6ec62fbe1e1820e66f836ebbb07fc996d82bf5
| 31,963
|
py
|
Python
|
sql/tests/ast/test_logical.py
|
AprilXiaoyanLiu/whitenoise-system
|
0e94d2cc8114b97a61d5d2e45278428f91f1e687
|
[
"MIT"
] | 63
|
2020-03-26T15:26:10.000Z
|
2020-10-22T06:26:38.000Z
|
sql/tests/ast/test_logical.py
|
AprilXiaoyanLiu/whitenoise-system
|
0e94d2cc8114b97a61d5d2e45278428f91f1e687
|
[
"MIT"
] | 82
|
2020-03-10T17:54:48.000Z
|
2020-10-23T02:11:06.000Z
|
sql/tests/ast/test_logical.py
|
AprilXiaoyanLiu/whitenoise-system
|
0e94d2cc8114b97a61d5d2e45278428f91f1e687
|
[
"MIT"
] | 15
|
2020-03-10T05:52:14.000Z
|
2020-10-09T09:09:52.000Z
|
import pytest
from snsql._ast.expressions.logical import *
from snsql._ast.tokens import Column, Literal
from snsql.sql.parse import QueryParser
from datetime import date
import numpy as np
"""
Test evaluation of logical operators. We need to support
operands of various types, including:
* Parsed atomic types
* Atomic types loaded from the database
* String literal representations of types
* Column names bound to a column value in the row
Test harness creates arrays with several representations of
values for numeric, date, and string
"""
vals_5 = [5, 5.0, "5", "5.0", float(5.0), int(5)]
names_5 = ["i5", "f5", "si5", "sf5", "npf5", "npi5"]
vals_7 = [7, 7.0, "7", "7.0"]
names_7 = ["i7", "f7", "si7", "sf7"]
d1_str = "1978-06-30"
d2_str = "1984-10-14"
vals_d1 = [d1_str, date.fromisoformat(d1_str)]
names_d1 = ["sd1", "dd1"]
vals_d2 = [d2_str, date.fromisoformat(d2_str)]
names_d2 = ["sd2", "dd2"]
vals_str = ["Smart", "Noise"]
names_str = ["smart", "noise"]
vals_f = ["false", False]
names_f = ["s_f", "b_f"]
vals_t = ["true", True]
names_t = ["s_t", "b_t"]
# now load all the values into a bindings dict
vals = vals_5 + vals_7 + vals_d1 + vals_d2 + vals_str + vals_t + vals_f
names = names_5 + names_7 + names_d1 + names_d2 + names_str + names_t + names_f
bindings = dict((name.lower(), val ) for name, val in zip(names, vals))
class TestLogical:
def test_eq(Self):
# test numeric values
for v5, n5 in zip(vals_5, names_5):
# All True
for v5b, n5b in zip(vals_5, names_5):
if not (isinstance(v5, str) and isinstance(v5b, str)):
assert(BooleanCompare(Literal(v5), '=', Literal(v5b)).evaluate(None))
assert(BooleanCompare(Column(n5), '=', Literal(v5b)).evaluate(bindings))
assert(BooleanCompare(Literal(v5), '=', Column(n5b)).evaluate(bindings))
# All False
for v7, n7 in zip(vals_7, names_7):
if not (isinstance(v5, str) and isinstance(v7, str)):
assert(not BooleanCompare(Literal(v5), '=', Literal(v7)).evaluate(None))
assert(not BooleanCompare(Column(n5), '=', Literal(v7)).evaluate(bindings))
assert(not BooleanCompare(Literal(v5), '=', Column(n7)).evaluate(bindings))
# test dates
for d1, n1 in zip(vals_d1, names_d1):
for d1b, n1b in zip(vals_d1, names_d1):
# all True
comp = BooleanCompare(Literal(d1b), '=', Literal(d1))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n1b), '=', Literal(d1))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1b), '=', Column(n1))
assert(comp.evaluate(bindings))
for d2, n2 in zip(vals_d2, names_d2):
# all False
comp = BooleanCompare(Literal(d1), '=', Literal(d2))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n1), '=', Literal(d2))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1), '=', Column(n2))
assert(not comp.evaluate(bindings))
# test strings
# All True
assert(BooleanCompare(Literal(vals_str[0]), '=', Literal(vals_str[0])).evaluate(None))
assert(BooleanCompare(Column(names_str[0]), '=', Literal(vals_str[0])).evaluate(bindings))
assert(BooleanCompare(Literal(vals_str[0]), '=', Column(names_str[0])).evaluate(bindings))
# All False
assert(not BooleanCompare(Literal(vals_str[1]), '=', Literal(vals_str[0])).evaluate(None))
assert(not BooleanCompare(Column(names_str[1]), '=', Literal(vals_str[0])).evaluate(bindings))
assert(not BooleanCompare(Literal(vals_str[1]), '=', Column(names_str[0])).evaluate(bindings))
def test_neq(Self):
# test numeric values
for v5, n5 in zip(vals_5, names_5):
# All False
for v5b, n5b in zip(vals_5, names_5):
if not (isinstance(v5, str) and isinstance(v5b, str)):
assert(not BooleanCompare(Literal(v5), '!=', Literal(v5b)).evaluate(None))
assert(not BooleanCompare(Column(n5), '!=', Literal(v5b)).evaluate(bindings))
assert(not BooleanCompare(Literal(v5), '!=', Column(n5b)).evaluate(bindings))
# All True
for v7, n7 in zip(vals_7, names_7):
if not (isinstance(v5, str) and isinstance(v7, str)):
assert(BooleanCompare(Literal(v5), '<>', Literal(v7)).evaluate(None))
assert(BooleanCompare(Column(n5), '!=', Literal(v7)).evaluate(bindings))
assert(BooleanCompare(Literal(v5), '<>', Column(n7)).evaluate(bindings))
# test dates
for d1, n1 in zip(vals_d1, names_d1):
for d1b, n1b in zip(vals_d1, names_d1):
# all False
comp = BooleanCompare(Literal(d1b), '!=', Literal(d1))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n1b), '<>', Literal(d1))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1b), '!=', Column(n1))
assert(not comp.evaluate(bindings))
for d2, n2 in zip(vals_d2, names_d2):
# all True
comp = BooleanCompare(Literal(d1), '!=', Literal(d2))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n1), '<>', Literal(d2))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1), '!=', Column(n2))
assert(comp.evaluate(bindings))
# test strings
# All False
assert(not BooleanCompare(Literal(vals_str[0]), '!=', Literal(vals_str[0])).evaluate(None))
assert(not BooleanCompare(Column(names_str[0]), '<>', Literal(vals_str[0])).evaluate(bindings))
assert(not BooleanCompare(Literal(vals_str[0]), '!=', Column(names_str[0])).evaluate(bindings))
# All True
assert(BooleanCompare(Literal(vals_str[1]), '!=', Literal(vals_str[0])).evaluate(None))
assert(BooleanCompare(Column(names_str[1]), '<>', Literal(vals_str[0])).evaluate(bindings))
assert(BooleanCompare(Literal(vals_str[1]), '!=', Column(names_str[0])).evaluate(bindings))
def test_gt(self):
# test numeric values
for v5, n5 in zip(vals_5, names_5):
for v7, n7 in zip(vals_7, names_7):
# all True
comp = BooleanCompare(Literal(v7), '>', Literal(v5))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n7), '>', Literal(v5))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(v7), '>', Column(n5))
assert(comp.evaluate(bindings))
# all False
comp = BooleanCompare(Literal(v5), '>', Literal(v7))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n5), '>', Literal(v7))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(v5), '>', Column(n7))
assert(not comp.evaluate(bindings))
# test dates
for d1, n1 in zip(vals_d1, names_d1):
for d2, n2 in zip(vals_d2, names_d2):
# all True
comp = BooleanCompare(Literal(d2), '>', Literal(d1))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n2), '>', Literal(d1))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(d2), '>', Column(n1))
assert(comp.evaluate(bindings))
# all False
comp = BooleanCompare(Literal(d1), '>', Literal(d2))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n1), '>', Literal(d2))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1), '>', Column(n2))
assert(not comp.evaluate(bindings))
# test strings
# All True
assert(BooleanCompare(Literal(vals_str[0]), '>', Literal(vals_str[1])).evaluate(None))
assert(BooleanCompare(Column(names_str[0]), '>', Literal(vals_str[1])).evaluate(bindings))
assert(BooleanCompare(Literal(vals_str[0]), '>', Column(names_str[1])).evaluate(bindings))
# All False
assert(not BooleanCompare(Literal(vals_str[1]), '>', Literal(vals_str[0])).evaluate(None))
assert(not BooleanCompare(Column(names_str[1]), '>', Literal(vals_str[0])).evaluate(bindings))
assert(not BooleanCompare(Literal(vals_str[1]), '>', Column(names_str[0])).evaluate(bindings))
def test_lt(self):
# test numeric values
for v5, n5 in zip(vals_5, names_5):
for v7, n7 in zip(vals_7, names_7):
# all False
comp = BooleanCompare(Literal(v7), '<', Literal(v5))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n7), '<', Literal(v5))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(v7), '<', Column(n5))
assert(not comp.evaluate(bindings))
# all True
comp = BooleanCompare(Literal(v5), '<', Literal(v7))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n5), '<', Literal(v7))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(v5), '<', Column(n7))
assert(comp.evaluate(bindings))
# test dates
for d1, n1 in zip(vals_d1, names_d1):
for d2, n2 in zip(vals_d2, names_d2):
# all False
comp = BooleanCompare(Literal(d2), '<', Literal(d1))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n2), '<', Literal(d1))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(d2), '<', Column(n1))
assert(not comp.evaluate(bindings))
# all True
comp = BooleanCompare(Literal(d1), '<', Literal(d2))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n1), '<', Literal(d2))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1), '<', Column(n2))
assert(comp.evaluate(bindings))
# test strings
# All False
assert(not BooleanCompare(Literal(vals_str[0]), '<', Literal(vals_str[1])).evaluate(None))
assert(not BooleanCompare(Column(names_str[0]), '<', Literal(vals_str[1])).evaluate(bindings))
assert(not BooleanCompare(Literal(vals_str[0]), '<', Column(names_str[1])).evaluate(bindings))
# All True
assert(BooleanCompare(Literal(vals_str[1]), '<', Literal(vals_str[0])).evaluate(None))
assert(BooleanCompare(Column(names_str[1]), '<', Literal(vals_str[0])).evaluate(bindings))
assert(BooleanCompare(Literal(vals_str[1]), '<', Column(names_str[0])).evaluate(bindings))
def test_gte(self):
# test numeric values
for v5, n5 in zip(vals_5, names_5):
for v7, n7 in zip(vals_7, names_7):
# all True
comp = BooleanCompare(Literal(v7), '>=', Literal(v5))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n7), '>=', Literal(v5))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(v7), '>=', Column(n5))
assert(comp.evaluate(bindings))
# all False
comp = BooleanCompare(Literal(v5), '>=', Literal(v7))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n5), '>=', Literal(v7))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(v5), '>=', Column(n7))
assert(not comp.evaluate(bindings))
# test dates
for d1, n1 in zip(vals_d1, names_d1):
for d2, n2 in zip(vals_d2, names_d2):
# all True
comp = BooleanCompare(Literal(d2), '>=', Literal(d1))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n2), '>=', Literal(d1))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(d2), '>=', Column(n1))
assert(comp.evaluate(bindings))
# all False
comp = BooleanCompare(Literal(d1), '>=', Literal(d2))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n1), '>=', Literal(d2))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1), '>=', Column(n2))
assert(not comp.evaluate(bindings))
# test strings
# All True
assert(BooleanCompare(Literal(vals_str[0]), '>=', Literal(vals_str[1])).evaluate(None))
assert(BooleanCompare(Column(names_str[0]), '>=', Literal(vals_str[1])).evaluate(bindings))
assert(BooleanCompare(Literal(vals_str[0]), '>=', Column(names_str[1])).evaluate(bindings))
# All False
assert(not BooleanCompare(Literal(vals_str[1]), '>=', Literal(vals_str[0])).evaluate(None))
assert(not BooleanCompare(Column(names_str[1]), '>=', Literal(vals_str[0])).evaluate(bindings))
assert(not BooleanCompare(Literal(vals_str[1]), '>=', Column(names_str[0])).evaluate(bindings))
# Test equality
# test numeric values
for v5, n5 in zip(vals_5, names_5):
# All True
for v5b, n5b in zip(vals_5, names_5):
if not (isinstance(v5, str) and isinstance(v5b, str)):
assert(BooleanCompare(Literal(v5), '>=', Literal(v5b)).evaluate(None))
assert(BooleanCompare(Column(n5), '>=', Literal(v5b)).evaluate(bindings))
assert(BooleanCompare(Literal(v5), '>=', Column(n5b)).evaluate(bindings))
# test dates
for d1, n1 in zip(vals_d1, names_d1):
for d1b, n1b in zip(vals_d1, names_d1):
# all True
comp = BooleanCompare(Literal(d1b), '>=', Literal(d1))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n1b), '>=', Literal(d1))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1b), '>=', Column(n1))
assert(comp.evaluate(bindings))
# test strings
# All True
assert(BooleanCompare(Literal(vals_str[0]), '>=', Literal(vals_str[0])).evaluate(None))
assert(BooleanCompare(Column(names_str[0]), '>=', Literal(vals_str[0])).evaluate(bindings))
assert(BooleanCompare(Literal(vals_str[0]), '>=', Column(names_str[0])).evaluate(bindings))
def test_lte(self):
# test numeric values
for v5, n5 in zip(vals_5, names_5):
for v7, n7 in zip(vals_7, names_7):
# all False
comp = BooleanCompare(Literal(v7), '<=', Literal(v5))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n7), '<=', Literal(v5))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(v7), '<=', Column(n5))
assert(not comp.evaluate(bindings))
# all True
comp = BooleanCompare(Literal(v5), '<=', Literal(v7))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n5), '<=', Literal(v7))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(v5), '<=', Column(n7))
assert(comp.evaluate(bindings))
# test dates
for d1, n1 in zip(vals_d1, names_d1):
for d2, n2 in zip(vals_d2, names_d2):
# all False
comp = BooleanCompare(Literal(d2), '<=', Literal(d1))
assert(not comp.evaluate(None))
comp = BooleanCompare(Column(n2), '<=', Literal(d1))
assert(not comp.evaluate(bindings))
comp = BooleanCompare(Literal(d2), '<=', Column(n1))
assert(not comp.evaluate(bindings))
# all True
comp = BooleanCompare(Literal(d1), '<=', Literal(d2))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n1), '<=', Literal(d2))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1), '<=', Column(n2))
assert(comp.evaluate(bindings))
# test strings
# All False
assert(not BooleanCompare(Literal(vals_str[0]), '<=', Literal(vals_str[1])).evaluate(None))
assert(not BooleanCompare(Column(names_str[0]), '<=', Literal(vals_str[1])).evaluate(bindings))
assert(not BooleanCompare(Literal(vals_str[0]), '<=', Column(names_str[1])).evaluate(bindings))
# All True
assert(BooleanCompare(Literal(vals_str[1]), '<=', Literal(vals_str[0])).evaluate(None))
assert(BooleanCompare(Column(names_str[1]), '<=', Literal(vals_str[0])).evaluate(bindings))
assert(BooleanCompare(Literal(vals_str[1]), '<=', Column(names_str[0])).evaluate(bindings))
# Test equality
# test numeric values
for v5, n5 in zip(vals_5, names_5):
# All True
for v5b, n5b in zip(vals_5, names_5):
if not (isinstance(v5, str) and isinstance(v5b, str)):
assert(BooleanCompare(Literal(v5), '<=', Literal(v5b)).evaluate(None))
assert(BooleanCompare(Column(n5), '<=', Literal(v5b)).evaluate(bindings))
assert(BooleanCompare(Literal(v5), '<=', Column(n5b)).evaluate(bindings))
# test dates
for d1, n1 in zip(vals_d1, names_d1):
for d1b, n1b in zip(vals_d1, names_d1):
# all True
comp = BooleanCompare(Literal(d1b), '<=', Literal(d1))
assert(comp.evaluate(None))
comp = BooleanCompare(Column(n1b), '<=', Literal(d1))
assert(comp.evaluate(bindings))
comp = BooleanCompare(Literal(d1b), '<=', Column(n1))
assert(comp.evaluate(bindings))
# test strings
# All True
assert(BooleanCompare(Literal(vals_str[0]), '<=', Literal(vals_str[0])).evaluate(None))
assert(BooleanCompare(Column(names_str[0]), '<=', Literal(vals_str[0])).evaluate(bindings))
assert(BooleanCompare(Literal(vals_str[0]), '<=', Column(names_str[0])).evaluate(bindings))
def test_and(self):
for tv, tn in zip(vals_t, names_t):
for tvb, tnb in zip(vals_t, names_t):
# All True
if not (isinstance(tv, str) and isinstance(tvb, str)):
assert BooleanCompare(Literal(tv), 'and', Literal(tvb)).evaluate(None)
assert BooleanCompare(Column(tn), 'and', Literal(tvb)).evaluate(bindings)
assert BooleanCompare(Literal(tv), 'and', Column(tnb)).evaluate(bindings)
for fv, fn in zip(vals_f, names_f):
# All False
if not (isinstance(tv, str) and isinstance(fv, str)):
assert not BooleanCompare(Literal(tv), 'and', Literal(fv)).evaluate(None)
assert not BooleanCompare(Column(tn), 'and', Literal(fv)).evaluate(bindings)
assert not BooleanCompare(Literal(tv), 'and', Column(fn)).evaluate(bindings)
class TestCaseExpression:
def test_simple_case(self):
qp = QueryParser()
c = qp.parse_expression("CASE x WHEN 5 THEN 'five' WHEN 6 THEN 'six' ELSE '' END")
bindings = dict([('x', 5)])
assert(c.evaluate(bindings) == "five")
bindings = dict([('x', 6)])
assert(c.evaluate(bindings) == 'six')
bindings = dict([('x', 7)])
assert(c.evaluate(bindings) == '')
def test_variable_replace(self):
qp = QueryParser()
c = qp.parse_expression("CASE x WHEN 5 THEN y WHEN 6 THEN z ELSE 0 END")
bindings = dict([('x', 5), ('y', 10), ('z', 12)])
assert(c.evaluate(bindings) == 10)
bindings['x'] = 6
assert(c.evaluate(bindings) == 12)
bindings['x'] = 1
assert(c.evaluate(bindings) == 0)
def test_string_bound(self):
qp = QueryParser()
c = qp.parse_expression("CASE x WHEN 5 THEN y WHEN 6 THEN z ELSE q END")
bindings = dict([('x', 5), ('y', 'ten'), ('z', 'twelve'), ('q', 'zero')])
assert(c.evaluate(bindings) == "ten")
bindings['x'] = 6
assert(c.evaluate(bindings) == "twelve")
bindings['x'] = 1
assert(c.evaluate(bindings) == "zero")
def test_full_case(self):
qp = QueryParser()
c = qp.parse_expression("CASE WHEN x <= 5 THEN y WHEN x > 6 THEN 0 ELSE z END")
bindings = dict([('x', 5), ('y', 10), ('z', 12)])
assert(c.evaluate(bindings) == 10)
bindings['x'] = 6
assert(c.evaluate(bindings) == 12)
bindings['x'] = 10
assert(c.evaluate(bindings) == 0)
def test_iif(self):
qp = QueryParser()
c = qp.parse_expression("IIF(x <= 5, y, 0)")
bindings = dict([('x', 5), ('y', 10), ('z', 12)])
assert(c.evaluate(bindings) == 10)
bindings["x"] = 6
assert(c.evaluate(bindings) == 0)
c = qp.parse_expression("IIF(x <= 5, y, 'string')")
assert(c.evaluate(bindings) == "string")
def test_choose(self):
qp = QueryParser()
c = qp.parse_expression("CHOOSE(x, 'a', 'b', 'c')")
bindings = dict([('x', 3), ('y', 10), ('z', 12)])
assert(c.evaluate(bindings) == "c")
bindings["x"] = 1
assert(c.evaluate(bindings) == 'a')
bindings["x"] = 0
assert(c.evaluate(bindings) == None)
bindings["x"] = 10
assert(c.evaluate(bindings) == None)
c = qp.parse_expression("CHOOSE(x, 'a', 5, NULL)")
bindings = dict([('x', 3), ('y', 10), ('z', 12)])
assert(c.evaluate(bindings) == None)
bindings["x"] = "2"
assert(c.evaluate(bindings) == 5)
c = qp.parse_expression("CHOOSE(x % 2 + 1, NULL, 5)")
bindings["x"] = 13
assert(c.evaluate(bindings) == 5)
class TestPredicateExpression:
def test_between_condition(self):
c = PredicatedExpression(Column("x"), BetweenCondition(Literal(3), Literal(6), False))
bindings = dict([('x', 2)])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', 4)])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', 7)])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
c = PredicatedExpression(Column("x"), BetweenCondition(Literal('d'), Literal('h'), False))
bindings = dict([('x', 'a')])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', 'e')])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', 'v')])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
c = PredicatedExpression(Column("x"), BetweenCondition(Literal('2017/01/01'), Literal('2019/01/01'), False))
bindings = dict([('x', '2016/01/01')])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', '2018/01/01')])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', '2020/01/01')])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
def test_not_between_condition(self):
c = PredicatedExpression(Column("x"), BetweenCondition(Literal(3), Literal(6), True))
bindings = dict([('x', 2)])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', 4)])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', 7)])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
c = PredicatedExpression(Column("x"), BetweenCondition(Literal('d'), Literal('h'), True))
bindings = dict([('x', 'a')])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', 'e')])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', 'v')])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
c = PredicatedExpression(Column("x"), BetweenCondition(Literal('2017/01/01'), Literal('2019/01/01'), True))
bindings = dict([('x', '2016/01/01')])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', '2018/01/01')])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', '2020/01/01')])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
def test_in_condition(self):
c = PredicatedExpression(Column("x"), InCondition(Seq([Literal(1), Literal(2), Literal(3)])))
bindings = dict([('x', 2)])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', 2.)])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', 7)])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
c = PredicatedExpression(Column("x"), InCondition(Seq([Literal("1"), Literal("2"), Literal("3")])))
bindings = dict([('x', "2")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "2.")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "7")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
c = PredicatedExpression(Column("x"), InCondition(Seq([Literal("2017/01/01"), Literal("2019/01/01")])))
bindings = dict([('x', "2017/01/01")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "2020/01/01")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
def test_not_in_condition(self):
c = PredicatedExpression(Column("x"), InCondition(Seq([Literal(1), Literal(2), Literal(3)]),True))
bindings = dict([('x', 2)])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', 2.)])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', 7)])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
c = PredicatedExpression(Column("x"), InCondition(Seq([Literal("1"), Literal("2"), Literal("3")]),True))
bindings = dict([('x', "2")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "2.")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "7")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
c = PredicatedExpression(Column("x"), InCondition(Seq([Literal("2017/01/01"), Literal("2019/01/01")]),True))
bindings = dict([('x', "2017/01/01")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "2020/01/01")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == None)
def test_is_condition(self):
c = PredicatedExpression(Column("x"), IsCondition(Literal("NULL"), False))
bindings = dict([('x', 2)])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == True)
c = PredicatedExpression(Column("x"), IsCondition(Literal(None), False))
bindings = dict([('x', 2)])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == True)
c = PredicatedExpression(Column("x"), IsCondition(Literal("TRUE"), False))
bindings = dict([('x', "True")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "true")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "t")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "y")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "yes")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "on")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "1")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == False)
c = PredicatedExpression(Column("x"), IsCondition(Literal("False"), False))
bindings = dict([('x', "False")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "false")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "f")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "n")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "no")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "off")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', "0")])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == False)
def test_is_not_condition(self):
c = PredicatedExpression(Column("x"), IsCondition(Literal("NULL"), True))
bindings = dict([('x', 2)])
assert(c.evaluate(bindings) == True)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == False)
c = PredicatedExpression(Column("x"), IsCondition(Literal("True"), True))
bindings = dict([('x', "True")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "true")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "t")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "y")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "yes")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "on")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "1")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == True)
c = PredicatedExpression(Column("x"), IsCondition(Literal("False"), True))
bindings = dict([('x', "False")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "false")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "f")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "n")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "no")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "off")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', "0")])
assert(c.evaluate(bindings) == False)
bindings = dict([('x', None)])
assert(c.evaluate(bindings) == True)
| 48.947933
| 116
| 0.55583
| 3,626
| 31,963
| 4.832598
| 0.053227
| 0.177139
| 0.090738
| 0.139131
| 0.918222
| 0.905724
| 0.891057
| 0.865662
| 0.789305
| 0.778406
| 0
| 0.031693
| 0.273441
| 31,963
| 652
| 117
| 49.023006
| 0.722861
| 0.026249
| 0
| 0.569832
| 0
| 0
| 0.036314
| 0
| 0
| 0
| 0
| 0
| 0.443203
| 1
| 0.035382
| false
| 0
| 0.011173
| 0
| 0.052142
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aa9467c69980bf2afadefb00f552d6f7531cb008
| 19,633
|
py
|
Python
|
kimai_python/api/user_api.py
|
kbancerz/kimai-python
|
c5401acca8fe8cfa7db486dee5a215bd7daea95b
|
[
"MIT"
] | 6
|
2019-12-19T16:01:58.000Z
|
2022-01-19T18:10:16.000Z
|
kimai_python/api/user_api.py
|
kbancerz/kimai-python
|
c5401acca8fe8cfa7db486dee5a215bd7daea95b
|
[
"MIT"
] | 4
|
2020-05-16T23:33:15.000Z
|
2021-07-06T20:53:32.000Z
|
kimai_python/api/user_api.py
|
kbancerz/kimai-python
|
c5401acca8fe8cfa7db486dee5a215bd7daea95b
|
[
"MIT"
] | 3
|
2020-05-16T23:14:13.000Z
|
2021-06-30T08:53:11.000Z
|
# coding: utf-8
"""
Kimai 2 - API Docs
JSON API for the Kimai 2 time-tracking software. Read more about its usage in the [API documentation](https://www.kimai.org/documentation/rest-api.html) and then download a [Swagger file](doc.json) for import e.g. in Postman. Be aware: it is not yet considered stable and BC breaks might happen, especially when using code generation. The order of JSON attributes is not guaranteed. # noqa: E501
OpenAPI spec version: 0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kimai_python.api_client import ApiClient
class UserApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def api_users_get(self, **kwargs): # noqa: E501
"""Returns the collection of all registered users # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str visible: Visibility status to filter users. Allowed values: 1=visible, 2=hidden, 3=all (default: 1)
:param str order_by: The field by which results will be ordered. Allowed values: id, username, alias, email (default: username)
:param str order: The result order. Allowed values: ASC, DESC (default: ASC)
:param str term: Free search term
:return: list[UserCollection]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_users_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.api_users_get_with_http_info(**kwargs) # noqa: E501
return data
def api_users_get_with_http_info(self, **kwargs): # noqa: E501
"""Returns the collection of all registered users # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str visible: Visibility status to filter users. Allowed values: 1=visible, 2=hidden, 3=all (default: 1)
:param str order_by: The field by which results will be ordered. Allowed values: id, username, alias, email (default: username)
:param str order: The result order. Allowed values: ASC, DESC (default: ASC)
:param str term: Free search term
:return: list[UserCollection]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['visible', 'order_by', 'order', 'term'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_users_get" % key
)
params[key] = val
del params['kwargs']
if 'visible' in params and not re.match(r'1|2|3', params['visible']): # noqa: E501
raise ValueError("Invalid value for parameter `visible` when calling `api_users_get`, must conform to the pattern `/1|2|3/`") # noqa: E501
if 'order_by' in params and not re.match(r'id|username|alias|email', params['order_by']): # noqa: E501
raise ValueError("Invalid value for parameter `order_by` when calling `api_users_get`, must conform to the pattern `/id|username|alias|email/`") # noqa: E501
if 'order' in params and not re.match(r'ASC|DESC', params['order']): # noqa: E501
raise ValueError("Invalid value for parameter `order` when calling `api_users_get`, must conform to the pattern `/ASC|DESC/`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'visible' in params:
query_params.append(('visible', params['visible'])) # noqa: E501
if 'order_by' in params:
query_params.append(('orderBy', params['order_by'])) # noqa: E501
if 'order' in params:
query_params.append(('order', params['order'])) # noqa: E501
if 'term' in params:
query_params.append(('term', params['term'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apiToken', 'apiUser'] # noqa: E501
return self.api_client.call_api(
'/api/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[UserCollection]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_users_id_get(self, id, **kwargs): # noqa: E501
"""Return one user entity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User ID to fetch (required)
:return: UserEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_users_id_get_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.api_users_id_get_with_http_info(id, **kwargs) # noqa: E501
return data
def api_users_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Return one user entity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User ID to fetch (required)
:return: UserEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_users_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `api_users_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apiToken', 'apiUser'] # noqa: E501
return self.api_client.call_api(
'/api/users/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserEntity', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_users_id_patch(self, body, id, **kwargs): # noqa: E501
"""Update an existing user # noqa: E501
Update an existing user, you can pass all or just a subset of all attributes (passing roles will replace all existing ones) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_id_patch(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserEditForm body: (required)
:param int id: User ID to update (required)
:return: UserEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_users_id_patch_with_http_info(body, id, **kwargs) # noqa: E501
else:
(data) = self.api_users_id_patch_with_http_info(body, id, **kwargs) # noqa: E501
return data
def api_users_id_patch_with_http_info(self, body, id, **kwargs): # noqa: E501
"""Update an existing user # noqa: E501
Update an existing user, you can pass all or just a subset of all attributes (passing roles will replace all existing ones) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_id_patch_with_http_info(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserEditForm body: (required)
:param int id: User ID to update (required)
:return: UserEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_users_id_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `api_users_id_patch`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `api_users_id_patch`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['apiToken', 'apiUser'] # noqa: E501
return self.api_client.call_api(
'/api/users/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserEntity', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_users_me_get(self, **kwargs): # noqa: E501
"""Return the current user entity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_me_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: UserEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_users_me_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.api_users_me_get_with_http_info(**kwargs) # noqa: E501
return data
def api_users_me_get_with_http_info(self, **kwargs): # noqa: E501
"""Return the current user entity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_me_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: UserEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_users_me_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apiToken', 'apiUser'] # noqa: E501
return self.api_client.call_api(
'/api/users/me', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserEntity', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def api_users_post(self, body, **kwargs): # noqa: E501
"""Creates a new user # noqa: E501
Creates a new user and returns it afterwards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserCreateForm body: (required)
:return: UserEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.api_users_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.api_users_post_with_http_info(body, **kwargs) # noqa: E501
return data
def api_users_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Creates a new user # noqa: E501
Creates a new user and returns it afterwards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api_users_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserCreateForm body: (required)
:return: UserEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method api_users_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `api_users_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['apiToken', 'apiUser'] # noqa: E501
return self.api_client.call_api(
'/api/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserEntity', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.423695
| 401
| 0.605919
| 2,381
| 19,633
| 4.769425
| 0.098278
| 0.046495
| 0.024657
| 0.031701
| 0.91608
| 0.904368
| 0.899613
| 0.879359
| 0.856375
| 0.846161
| 0
| 0.016196
| 0.301839
| 19,633
| 497
| 402
| 39.503018
| 0.812286
| 0.348597
| 0
| 0.746269
| 0
| 0.011194
| 0.190103
| 0.032365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041045
| false
| 0
| 0.014925
| 0
| 0.115672
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aacbffcfecc54690e29a99678d58f3f50482a241
| 737
|
py
|
Python
|
Jupyter/cntmtzs.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Jupyter/cntmtzs.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Jupyter/cntmtzs.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
# Description: Count number of *.mtz files in current directory.
# Source: placeHolder
"""
cmd.do('print("Count the number of mtz structure factor files in current directory.");')
cmd.do('print("Usage: cntmtzs");')
cmd.do('myPath = os.getcwd();')
cmd.do('mtzCounter = len(glob.glob1(myPath,"*.mtz"));')
cmd.do('print("Number of number of mtz structure factor files in the current directory: ", mtzCounter);')
"""
cmd.do('print("Count the number of mtz structure factor files in current directory.");')
cmd.do('print("Usage: cntmtzs");')
cmd.do('myPath = os.getcwd();')
cmd.do('mtzCounter = len(glob.glob1(myPath,"*.mtz"));')
cmd.do('print("Number of number of mtz structure factor files in the current directory: ", mtzCounter);')
| 43.352941
| 106
| 0.698779
| 107
| 737
| 4.813084
| 0.242991
| 0.097087
| 0.116505
| 0.15534
| 0.869903
| 0.869903
| 0.869903
| 0.869903
| 0.869903
| 0.869903
| 0
| 0.003077
| 0.118046
| 737
| 16
| 107
| 46.0625
| 0.789231
| 0.548168
| 0
| 0
| 0
| 0
| 0.814815
| 0.098765
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
2ab11f1139e481644c45839acb62eeb9d64dff7c
| 455
|
py
|
Python
|
tests/parser/bug.07.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/bug.07.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/bug.07.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
a(1).
a(2).
b(1,2).
c(2).
c(3).
q(X,Y) :- a(X), c(Y).
p(X,Y,Z) :- a(X), q(Y,Z), m(X,Z).
m(X,Y) :- a(Z), p(Z,X,Y).
m(X,Y) :- b(X,Y), not n(X,Y).
n(X,Y) :- q(X,Y).
n(X,Y) :- b(X,Y), m(X,Y).
"""
output = """
a(1).
a(2).
b(1,2).
c(2).
c(3).
q(X,Y) :- a(X), c(Y).
p(X,Y,Z) :- a(X), q(Y,Z), m(X,Z).
m(X,Y) :- a(Z), p(Z,X,Y).
m(X,Y) :- b(X,Y), not n(X,Y).
n(X,Y) :- q(X,Y).
n(X,Y) :- b(X,Y), m(X,Y).
"""
| 11.666667
| 34
| 0.314286
| 130
| 455
| 1.1
| 0.123077
| 0.335664
| 0.125874
| 0.111888
| 0.923077
| 0.923077
| 0.923077
| 0.923077
| 0.923077
| 0.923077
| 0
| 0.035191
| 0.250549
| 455
| 38
| 35
| 11.973684
| 0.384164
| 0
| 0
| 0.923077
| 0
| 0.076923
| 0.926366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
2afd3eb221dd73e46acdaf7cd5924b75d1f259b8
| 444
|
py
|
Python
|
WeChall/6.Training/decoder.py
|
Youngermaster/We-Chall-Challenges
|
a8efa70464cf7c0a7e9cb40d157473d6bf5bc385
|
[
"Apache-2.0"
] | null | null | null |
WeChall/6.Training/decoder.py
|
Youngermaster/We-Chall-Challenges
|
a8efa70464cf7c0a7e9cb40d157473d6bf5bc385
|
[
"Apache-2.0"
] | null | null | null |
WeChall/6.Training/decoder.py
|
Youngermaster/We-Chall-Challenges
|
a8efa70464cf7c0a7e9cb40d157473d6bf5bc385
|
[
"Apache-2.0"
] | null | null | null |
from urllib.parse import unquote
url = '%59%69%70%70%65%68%21%20%59%6F%75%72%20%55%52%4C%20%69%73%20%63%68%61%6C%6C%65%6E%67%65%2F%74%72%61%69%6E%69%6E%67%2F%65%6E%63%6F%64%69%6E%67%73%2F%75%72%6C%2F%73%61%77%5F%6C%6F%74%69%6F%6E%2E%70%68%70%3F%70%3D%62%69%70%67%62%69%67%6C%65%63%6C%63%26%63%69%64%3D%35%32%23%70%61%73%73%77%6F%72%64%3D%66%69%62%72%65%5F%6F%70%74%69%63%73%20%56%65%72%79%20%77%65%6C%6C%20%64%6F%6E%65%21'
print(unquote(url))
| 88.8
| 389
| 0.675676
| 136
| 444
| 2.205882
| 0.308824
| 0.04
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.505747
| 0.02027
| 444
| 4
| 390
| 111
| 0.183908
| 0
| 0
| 0
| 0
| 0.333333
| 0.858108
| 0.858108
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
6303fd11f983de632723025aee246bd091c06eb7
| 505
|
py
|
Python
|
Variables/2.5.1.py
|
MosheBakshi/HANGMAN
|
49750b98ac54f5eee9378ed66fd67d6dd57dc29a
|
[
"MIT"
] | null | null | null |
Variables/2.5.1.py
|
MosheBakshi/HANGMAN
|
49750b98ac54f5eee9378ed66fd67d6dd57dc29a
|
[
"MIT"
] | null | null | null |
Variables/2.5.1.py
|
MosheBakshi/HANGMAN
|
49750b98ac54f5eee9378ed66fd67d6dd57dc29a
|
[
"MIT"
] | null | null | null |
HANGMAN_ASCII_ART = ("""
_ _
| | | |
| |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __
| __ |/ _` | '_ \ / _` | '_ ` _ \ / _` | '_ \
| | | | (_| | | | | (_| | | | | | | (_| | | | |
|_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_|
__/ |
|___/
\n""")
MAX_TRIES = 6
print(HANGMAN_ASCII_ART, MAX_TRIES)
"""
HANGMAN GAME ASCII LOGO AND MAX TRIES
"""
| 33.666667
| 49
| 0.29505
| 20
| 505
| 4.2
| 0.55
| 0.285714
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004049
| 0.510891
| 505
| 15
| 50
| 33.666667
| 0.336032
| 0
| 0
| 0
| 0
| 0.25
| 0.826464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
632151b4c8b12bd364c9abc191c32b932ff883f6
| 4,204
|
py
|
Python
|
repositories/actions/state.py
|
DevgurusSupport/commercetools-python-sdk
|
849c4cc182b68850fedc1f67a594449c28a4f751
|
[
"MIT"
] | null | null | null |
repositories/actions/state.py
|
DevgurusSupport/commercetools-python-sdk
|
849c4cc182b68850fedc1f67a594449c28a4f751
|
[
"MIT"
] | null | null | null |
repositories/actions/state.py
|
DevgurusSupport/commercetools-python-sdk
|
849c4cc182b68850fedc1f67a594449c28a4f751
|
[
"MIT"
] | null | null | null |
from .baseactions import BaseActions
from models.state import State
import re
class StateActions(BaseActions):
@classmethod
def _regular_attribute_actions(cls, diff: dict, obj, old_obj=None):
actions = []
for root_attr in diff:
attr = root_attr.split('.')[1]
if attr == 'key':
actions.append({'action': 'changeKey', 'key': obj.key})
elif attr == '_type':
actions.append({'action': 'changeType', 'type': obj.type})
elif attr == 'initial':
actions.append(
{'action': 'changeInitial', 'initial': obj.initial})
elif attr == 'roles':
actions.append({'action': 'setRoles', 'roles': obj.roles})
elif attr == 'transitions':
actions.append({'action': 'setTransitions', 'transitions': [
transition.__dict__ for transition in obj.transitions]})
elif attr.startswith('name'):
actions.append({'action': 'setName', 'name': obj.name})
elif attr.startswith('description'):
actions.append({'action': 'setDescription',
'description': obj.description})
return actions
@classmethod
def _iterable_attribute_add_actions(cls, diff: dict, obj, old_obj=None):
actions = []
for root_attr in diff:
attr = root_attr.split('.')[1]
if attr.__contains__('roles'):
actions.append(
{'action': 'addRoles', 'roles': [diff[root_attr]]})
elif attr.__contains__('transitions'):
actions.append({'action': 'setTransitions', 'transitions': [
transition.__dict__ for transition in obj.transitions]})
return actions
@classmethod
def _iterable_attribute_update_actions(cls, diff: dict, obj, old_obj):
actions = []
for root_attr in diff:
attr = root_attr.split('.')[1]
if attr.__contains__('roles'):
actions.append({'action': 'removeRoles', 'roles': [
old_obj.roles[int(re.findall(r'[\d+]', attr)[0])]]})
actions.append({'action': 'addRoles', 'roles': [
obj.roles[int(re.findall(r'[\d+]', attr)[0])]]})
elif attr.__contains__('transitions'):
actions.append({'action': 'setTransitions', 'transitions': [
transition.__dict__ for transition in obj.transitions]})
return actions
@classmethod
def _iterable_attribute_remove_actions(cls, diff: dict, obj, old_obj=None):
actions = []
for root_attr in diff:
attr = root_attr.split('.')[1]
if attr.__contains__('roles'):
actions.append(
{'action': 'removeRoles', 'roles': [diff[root_attr]]})
elif attr.__contains__('transitions'):
actions.append({'action': 'setTransitions', 'transitions': [
transition.__dict__ for transition in obj.transitions]})
return actions
@classmethod
def _diccionary_attribute_add_actions(cls, diff: dict, obj, old_obj=None):
actions = []
for root_attr in diff:
attr = root_attr.split('.')[1]
if attr.startswith('name'):
actions.append({'action': 'setName', 'name': obj.name})
elif attr.startswith('description'):
actions.append({'action': 'setDescription',
'description': obj.description})
return actions
@classmethod
def _diccionary_attribute_remove_actions(cls, diff: dict, obj, old_obj=None):
actions = []
for root_attr in diff:
attr = root_attr.split('.')[1]
if attr.startswith('name'):
actions.append({'action': 'setName', 'name': obj.name})
elif attr.startswith('description'):
actions.append({'action': 'setDescription',
'description': obj.description})
return actions
| 44.252632
| 87
| 0.540676
| 394
| 4,204
| 5.553299
| 0.149746
| 0.106947
| 0.156307
| 0.04936
| 0.838208
| 0.819927
| 0.80713
| 0.79479
| 0.79479
| 0.77011
| 0
| 0.002827
| 0.326832
| 4,204
| 94
| 88
| 44.723404
| 0.770318
| 0
| 0
| 0.701149
| 0
| 0
| 0.136299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.183908
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2d51eeed22783a075c5718264f1f3003da131451
| 25,983
|
py
|
Python
|
tests/conftest.py
|
PeggyJV/cellars
|
6cba2440ed56508e67cff6fb22114c3e5740bd8e
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
PeggyJV/cellars
|
6cba2440ed56508e67cff6fb22114c3e5740bd8e
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
PeggyJV/cellars
|
6cba2440ed56508e67cff6fb22114c3e5740bd8e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import pytest, math
from brownie import accounts, CellarPoolShare, Contract
@pytest.fixture(scope="module")
def CellarPoolShareContract(USDC, WETH):
name = "Cellar Pool Share Token"
symbol = "CPS"
token0 = USDC
token1 = WETH
# cellarTickInfo = [[0, 870000, -870000, 1]]
cellarTickInfo = [[0, 240000, 210000, 1], [0, 210000, 180000, 5], [0, 180000, 150000, 2]]
return CellarPoolShare.deploy(name, symbol, token0, token1, 3000, cellarTickInfo, {'from':accounts[0]})
@pytest.fixture(scope="session")
def SwapRouter():
return Contract.from_abi("SwapRouter", "0xE592427A0AEce92De3Edee1F18E0157C05861564", [{"inputs":[{"internalType":"address","name":"_factory","type":"address"},{"internalType":"address","name":"_WETH9","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"WETH9","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"components":[{"internalType":"bytes","name":"path","type":"bytes"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMinimum","type":"uint256"}],"internalType":"struct ISwapRouter.ExactInputParams","name":"params","type":"tuple"}],"name":"exactInput","outputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint24","name":"fee","type":"uint24"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMinimum","type":"uint256"},{"internalType":"uint160","name":"sqrtPriceLimitX96","type":"uint160"}],"internalType":"struct ISwapRouter.ExactInputSingleParams","name":"params","type":"tuple"}],"name":"exactInputSingle","outputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"bytes","name":"path","type":"bytes"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"amountInMaximum","type":"uint256"}],"internalType":"struct ISwapRouter.ExactOutputParams","name":"params","type":"tuple"}],"name":"exactOutput","outputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[{"components":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint24","name":"fee","type":"uint24"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"amountInMaximum","type":"uint256"},{"internalType":"uint160","name":"sqrtPriceLimitX96","type":"uint160"}],"internalType":"struct ISwapRouter.ExactOutputSingleParams","name":"params","type":"tuple"}],"name":"exactOutputSingle","outputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"data","type":"bytes[]"}],"name":"multicall","outputs":[{"internalType":"bytes[]","name":"results","type":"bytes[]"}],"stateMutability":"payable","type":"function"},{"inputs":[],"name":"refundETH","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermit","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"nonce","type":"uint256"},{"internalType":"uint256","name":"expiry","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitAllowed","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"nonce","type":"uint256"},{"internalType":"uint256","name":"expiry","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitAllowedIfNecessary","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"selfPermitIfNecessary","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"}],"name":"sweepToken","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"feeBips","type":"uint256"},{"internalType":"address","name":"feeRecipient","type":"address"}],"name":"sweepTokenWithFee","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"int256","name":"amount0Delta","type":"int256"},{"internalType":"int256","name":"amount1Delta","type":"int256"},{"internalType":"bytes","name":"_data","type":"bytes"}],"name":"uniswapV3SwapCallback","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"}],"name":"unwrapWETH9","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountMinimum","type":"uint256"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"feeBips","type":"uint256"},{"internalType":"address","name":"feeRecipient","type":"address"}],"name":"unwrapWETH9WithFee","outputs":[],"stateMutability":"payable","type":"function"},{"stateMutability":"payable","type":"receive"}])
@pytest.fixture(scope="session")
def USDC():
return Contract.from_abi("USDC", "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", [{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"owner","type":"address"},{"indexed":True,"internalType":"address","name":"spender","type":"address"},{"indexed":False,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"authorizer","type":"address"},{"indexed":True,"internalType":"bytes32","name":"nonce","type":"bytes32"}],"name":"AuthorizationCanceled","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"authorizer","type":"address"},{"indexed":True,"internalType":"bytes32","name":"nonce","type":"bytes32"}],"name":"AuthorizationUsed","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"_account","type":"address"}],"name":"Blacklisted","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"newBlacklister","type":"address"}],"name":"BlacklisterChanged","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"burner","type":"address"},{"indexed":False,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"Burn","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"newMasterMinter","type":"address"}],"name":"MasterMinterChanged","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"minter","type":"address"},{"indexed":True,"internalType":"address","name":"to","type":"address"},{"indexed":False,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"Mint","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"minter","type":"address"},{"indexed":False,"internalType":"uint256","name":"minterAllowedAmount","type":"uint256"}],"name":"MinterConfigured","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"oldMinter","type":"address"}],"name":"MinterRemoved","type":"event"},{"anonymous":False,"inputs":[{"indexed":False,"internalType":"address","name":"previousOwner","type":"address"},{"indexed":False,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":False,"inputs":[],"name":"Pause","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"newAddress","type":"address"}],"name":"PauserChanged","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"newRescuer","type":"address"}],"name":"RescuerChanged","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"from","type":"address"},{"indexed":True,"internalType":"address","name":"to","type":"address"},{"indexed":False,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"_account","type":"address"}],"name":"UnBlacklisted","type":"event"},{"anonymous":False,"inputs":[],"name":"Unpause","type":"event"},{"inputs":[],"name":"APPROVE_WITH_AUTHORIZATION_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"CANCEL_AUTHORIZATION_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"DECREASE_ALLOWANCE_WITH_AUTHORIZATION_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"DOMAIN_SEPARATOR","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"INCREASE_ALLOWANCE_WITH_AUTHORIZATION_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"PERMIT_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"TRANSFER_WITH_AUTHORIZATION_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"validAfter","type":"uint256"},{"internalType":"uint256","name":"validBefore","type":"uint256"},{"internalType":"bytes32","name":"nonce","type":"bytes32"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"approveWithAuthorization","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"authorizer","type":"address"},{"internalType":"bytes32","name":"nonce","type":"bytes32"}],"name":"authorizationState","outputs":[{"internalType":"enum GasAbstraction.AuthorizationState","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_account","type":"address"}],"name":"blacklist","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"blacklister","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"burn","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"authorizer","type":"address"},{"internalType":"bytes32","name":"nonce","type":"bytes32"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"cancelAuthorization","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"minter","type":"address"},{"internalType":"uint256","name":"minterAllowedAmount","type":"uint256"}],"name":"configureMinter","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"currency","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"decrement","type":"uint256"}],"name":"decreaseAllowance","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"decrement","type":"uint256"},{"internalType":"uint256","name":"validAfter","type":"uint256"},{"internalType":"uint256","name":"validBefore","type":"uint256"},{"internalType":"bytes32","name":"nonce","type":"bytes32"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"decreaseAllowanceWithAuthorization","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"increment","type":"uint256"}],"name":"increaseAllowance","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"increment","type":"uint256"},{"internalType":"uint256","name":"validAfter","type":"uint256"},{"internalType":"uint256","name":"validBefore","type":"uint256"},{"internalType":"bytes32","name":"nonce","type":"bytes32"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"increaseAllowanceWithAuthorization","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"string","name":"tokenName","type":"string"},{"internalType":"string","name":"tokenSymbol","type":"string"},{"internalType":"string","name":"tokenCurrency","type":"string"},{"internalType":"uint8","name":"tokenDecimals","type":"uint8"},{"internalType":"address","name":"newMasterMinter","type":"address"},{"internalType":"address","name":"newPauser","type":"address"},{"internalType":"address","name":"newBlacklister","type":"address"},{"internalType":"address","name":"newOwner","type":"address"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"string","name":"newName","type":"string"}],"name":"initializeV2","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_account","type":"address"}],"name":"isBlacklisted","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"isMinter","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"masterMinter","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"mint","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"minter","type":"address"}],"name":"minterAllowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"}],"name":"nonces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"pause","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"paused","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"pauser","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"permit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"minter","type":"address"}],"name":"removeMinter","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract IERC20","name":"tokenContract","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"rescueERC20","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"rescuer","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"validAfter","type":"uint256"},{"internalType":"uint256","name":"validBefore","type":"uint256"},{"internalType":"bytes32","name":"nonce","type":"bytes32"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"transferWithAuthorization","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_account","type":"address"}],"name":"unBlacklist","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"unpause","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_newBlacklister","type":"address"}],"name":"updateBlacklister","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_newMasterMinter","type":"address"}],"name":"updateMasterMinter","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_newPauser","type":"address"}],"name":"updatePauser","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newRescuer","type":"address"}],"name":"updateRescuer","outputs":[],"stateMutability":"nonpayable","type":"function"}])
@pytest.fixture(scope="session")
def WETH():
return Contract.from_abi("WETH", "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", [{"constant":True,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"name":"guy","type":"address"},{"name":"wad","type":"uint256"}],"name":"approve","outputs":[{"name":"","type":"bool"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"name":"src","type":"address"},{"name":"dst","type":"address"},{"name":"wad","type":"uint256"}],"name":"transferFrom","outputs":[{"name":"","type":"bool"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"name":"wad","type":"uint256"}],"name":"withdraw","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[],"name":"decimals","outputs":[{"name":"","type":"uint8"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"","type":"address"}],"name":"balanceOf","outputs":[{"name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"name":"dst","type":"address"},{"name":"wad","type":"uint256"}],"name":"transfer","outputs":[{"name":"","type":"bool"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[],"name":"deposit","outputs":[],"payable":True,"stateMutability":"payable","type":"function"},{"constant":True,"inputs":[{"name":"","type":"address"},{"name":"","type":"address"}],"name":"allowance","outputs":[{"name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"payable":True,"stateMutability":"payable","type":"fallback"},{"anonymous":False,"inputs":[{"indexed":True,"name":"src","type":"address"},{"indexed":True,"name":"guy","type":"address"},{"indexed":False,"name":"wad","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"name":"src","type":"address"},{"indexed":True,"name":"dst","type":"address"},{"indexed":False,"name":"wad","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"name":"dst","type":"address"},{"indexed":False,"name":"wad","type":"uint256"}],"name":"Deposit","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"name":"src","type":"address"},{"indexed":False,"name":"wad","type":"uint256"}],"name":"Withdrawal","type":"event"}])
| 962.333333
| 15,603
| 0.668899
| 2,519
| 25,983
| 6.885669
| 0.088527
| 0.077371
| 0.116691
| 0.072643
| 0.864226
| 0.828193
| 0.808706
| 0.75774
| 0.751398
| 0.712424
| 0
| 0.029629
| 0.005003
| 25,983
| 26
| 15,604
| 999.346154
| 0.64128
| 0.002309
| 0
| 0.157895
| 0
| 0
| 0.639211
| 0.026426
| 0
| 0
| 0.004861
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.105263
| 0.157895
| 0.526316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 12
|
2d7b9917aec98f84c2ce25f9d2571dfc65bbd9ed
| 6,309
|
py
|
Python
|
contrib/wydget/wydget/widgets/drawer.py
|
bitcraft/pyglet
|
144257c365ca85528c6a4c5bed8141e683d7a9b6
|
[
"BSD-3-Clause"
] | 15
|
2015-01-21T12:29:01.000Z
|
2018-12-09T09:17:33.000Z
|
contrib/wydget/wydget/widgets/drawer.py
|
bitcraft/pyglet
|
144257c365ca85528c6a4c5bed8141e683d7a9b6
|
[
"BSD-3-Clause"
] | null | null | null |
contrib/wydget/wydget/widgets/drawer.py
|
bitcraft/pyglet
|
144257c365ca85528c6a4c5bed8141e683d7a9b6
|
[
"BSD-3-Clause"
] | 9
|
2015-12-12T09:12:46.000Z
|
2021-12-26T13:29:14.000Z
|
from wydget import anim
from wydget.widgets.frame import Frame
class Drawer(Frame):
"""A *transparent container* that may hide and expose its contents.
"""
name = 'drawer'
HIDDEN = 'hidden'
EXPOSED = 'exposed'
LEFT = 'left'
RIGHT = 'right'
TOP = 'top'
BOTTOM = 'bottom'
def __init__(self, parent, state=HIDDEN, side=LEFT,
is_transparent=True, **kw):
super().__init__(parent, is_transparent=is_transparent,
**kw)
self.state = state
self.side = side
if state == self.HIDDEN:
self.setVisible(False)
def toggle_state(self):
if self.state == self.EXPOSED:
self.hide()
else:
self.expose()
_anim = None
def expose(self):
if self.state == self.EXPOSED:
return
if self._anim is not None and self._anim.is_running:
self._anim.cancel()
self._anim = ExposeAnimation(self)
self.setVisible(True)
self.state = self.EXPOSED
def hide(self):
if self.state == self.HIDDEN:
return
if self._anim is not None and self._anim.is_running:
self._anim.cancel()
self._anim = HideAnimation(self)
self.state = self.HIDDEN
class HideAnimation(anim.Animation):
def __init__(self, drawer, duration=.25, function=anim.cosine90):
self.drawer = drawer
self.duration = duration
self.function = function
if drawer.side == Drawer.LEFT:
self.sx = int(drawer.x)
self.ex = int(drawer.x - drawer.width)
self.sw = int(drawer.width)
self.ew = 0
elif drawer.side == Drawer.RIGHT:
self.sx = int(drawer.x)
self.ex = int(drawer.x + drawer.width)
self.sw = int(drawer.width)
self.ew = 0
elif drawer.side == Drawer.TOP:
self.sy = int(drawer.y)
self.ey = int(drawer.y - drawer.height)
self.sh = int(drawer.height)
self.eh = 0
elif drawer.side == Drawer.BOTTOM:
self.sy = int(drawer.y)
self.ey = int(drawer.y + drawer.height)
self.sh = int(drawer.height)
self.eh = 0
super().__init__()
def cancel(self):
self.drawer.setVisible(False)
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
self.drawer.setViewClip((self.sx, 0, self.ew,
self.drawer.height))
self.drawer.x = self.ex
else:
self.drawer.setViewClip((0, self.sy, self.drawer.width,
self.eh))
self.drawer.y = self.ey
super().cancel()
def animate(self, dt):
self.anim_time += dt
if self.anim_time >= self.duration:
self.cancel()
else:
t = self.anim_time / self.duration
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
x = anim.tween(self.sx, self.ex, t, self.function)
w = anim.tween(self.sw, self.ew, t, self.function)
if self.drawer.side == Drawer.LEFT:
vcx = self.sw - w
elif self.drawer.side == Drawer.RIGHT:
vcx = 0
self.drawer.setViewClip((vcx, 0, w, self.drawer.height))
self.drawer.x = x
else:
y = anim.tween(self.sy, self.ey, t, self.function)
h = anim.tween(self.sh, self.eh, t, self.function)
if self.drawer.side == Drawer.TOP:
vcy = self.sh - h
elif self.drawer.side == Drawer.BOTTOM:
vcy = 0
self.drawer.setViewClip((0, vcy, self.drawer.width, h))
self.drawer.y = y
class ExposeAnimation(anim.Animation):
def __init__(self, drawer, duration=.25, function=anim.cosine90):
self.drawer = drawer
self.duration = duration
self.function = function
if drawer.side == Drawer.LEFT:
self.sx = int(drawer.x)
self.ex = int(drawer.x + drawer.width)
self.sw = 0
self.ew = int(drawer.width)
elif drawer.side == Drawer.RIGHT:
self.sx = int(drawer.x)
self.ex = int(drawer.x - drawer.width)
self.sw = 0
self.ew = int(drawer.width)
elif drawer.side == Drawer.TOP:
self.sy = int(drawer.y)
self.ey = int(drawer.y + drawer.height)
self.sh = 0
self.eh = int(drawer.height)
elif drawer.side == Drawer.BOTTOM:
self.sy = int(drawer.y)
self.ey = int(drawer.y - drawer.height)
self.sh = 0
self.eh = int(drawer.height)
super().__init__()
def cancel(self):
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
self.drawer.setViewClip((0, 0, self.ew, self.drawer.height))
self.drawer.x = self.ex
else:
self.drawer.setViewClip((0, 0, self.drawer.width, self.eh))
self.drawer.y = self.ey
super().cancel()
def animate(self, dt):
self.anim_time += dt
if self.anim_time >= self.duration:
self.cancel()
else:
t = self.anim_time / self.duration
if self.drawer.side in (Drawer.LEFT, Drawer.RIGHT):
x = anim.tween(self.sx, self.ex, t, self.function)
w = anim.tween(self.sw, self.ew, t, self.function)
if self.drawer.side == Drawer.LEFT:
vcx = self.ew - w
elif self.drawer.side == Drawer.RIGHT:
vcx = 0
self.drawer.setViewClip((vcx, 0, w, self.drawer.height))
self.drawer.x = x
else:
y = anim.tween(self.sy, self.ey, t, self.function)
h = anim.tween(self.sh, self.eh, t, self.function)
if self.drawer.side == Drawer.TOP:
vcy = self.eh - h
elif self.drawer.side == Drawer.BOTTOM:
vcy = 0
self.drawer.setViewClip((0, vcy, self.drawer.width, h))
self.drawer.y = y
| 35.05
| 72
| 0.518466
| 765
| 6,309
| 4.216993
| 0.101961
| 0.127092
| 0.079355
| 0.039678
| 0.815561
| 0.796032
| 0.779293
| 0.779293
| 0.779293
| 0.779293
| 0
| 0.007483
| 0.364559
| 6,309
| 179
| 73
| 35.24581
| 0.797206
| 0.010144
| 0
| 0.727848
| 0
| 0
| 0.005935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063291
| false
| 0
| 0.012658
| 0
| 0.158228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2dd69a9a4545cb95b50291a577dec4fd615bb04c
| 10,440
|
py
|
Python
|
tests/integration_tests/python/test_propagation.py
|
scitran/nimsapi
|
a4203cf6c6d29aa15d33011250ee69ff929fcb0d
|
[
"MIT"
] | 13
|
2016-05-31T14:32:58.000Z
|
2021-09-17T07:18:11.000Z
|
tests/integration_tests/python/test_propagation.py
|
scitran/core
|
a4203cf6c6d29aa15d33011250ee69ff929fcb0d
|
[
"MIT"
] | 911
|
2016-02-16T18:40:27.000Z
|
2018-08-07T17:50:29.000Z
|
tests/integration_tests/python/test_propagation.py
|
scitran/nimsapi
|
a4203cf6c6d29aa15d33011250ee69ff929fcb0d
|
[
"MIT"
] | 16
|
2016-02-17T15:54:34.000Z
|
2021-04-07T05:30:34.000Z
|
# Test changing propagated properties
def test_public_propagation_from_project(data_builder, as_admin):
"""
Tests:
- 'public' is a propagated property
"""
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
payload = {'public': False}
r = as_admin.put('/projects/' + project, json=payload)
assert r.ok
r = as_admin.get('/projects/' + project)
assert r.ok and not r.json()['public']
r = as_admin.get('/sessions/' + session)
assert r.ok and not r.json()['public']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and not r.json()['public']
def test_public_propagation_from_session(data_builder, as_admin):
"""
Tests:
- propagation works from a session level
"""
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
payload = {'public': True}
r = as_admin.put('/sessions/' + session, json=payload)
assert r.ok
r = as_admin.get('/sessions/' + session)
assert r.ok and r.json()['public']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and r.json()['public']
def test_set_public_acquisition(data_builder, as_admin):
"""
Tests:
- setting a propagated property on an acquisition does not attempt to propagate (would hit Exception)
"""
acquisition = data_builder.create_acquisition()
payload = {'public': True}
r = as_admin.put('/acquisitions/' + acquisition, json=payload)
assert r.ok
# Test propagation of project permission changes
def test_add_and_remove_user_for_project_permissions(data_builder, as_admin):
"""
Tests:
- changing permissions at a project level triggers propagation
- additive change to list propagates properly
- change to list propagates properly
- removal from list propagates properly
"""
def get_user_in_perms(perms, uid):
for perm in perms:
if perm['_id'] == uid:
return perm
return None
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
user_id = 'propagation@user.com'
# Add user to project permissions
payload = {'_id': user_id, 'access': 'admin'}
r = as_admin.post('/projects/' + project + '/permissions', json=payload)
assert r.ok
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
# Modify user permissions
payload = {'access': 'rw', '_id': user_id}
r = as_admin.put('/projects/' + project + '/permissions/' + user_id, json=payload)
assert r.ok
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
# Remove user from project permissions
r = as_admin.delete('/projects/' + project + '/permissions/' + user_id, json=payload)
assert r.ok
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
# Test group permission propagation
def test_add_and_remove_user_group_permission(data_builder, as_admin):
"""
Tests:
- changing permissions at a group level with flag triggers propagation
- additive change to list propagates properly
- change to list propagates properly
- removal from list propagates properly
"""
def get_user_in_perms(perms, uid):
for perm in perms:
if perm['_id'] == uid:
return perm
return None
group = data_builder.create_group()
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
user_id = 'propagation@user.com'
# Add user to group permissions
payload = {'_id': user_id, 'access': 'admin'}
r = as_admin.post('/groups/' + group + '/permissions', json=payload, params={'propagate': 'true'})
assert r.ok
# Add project without default group perms
r = as_admin.post('/projects', params={'inherit': 'false'}, json={'label': 'project2', 'group': group})
assert r.ok
project2 = r.json()['_id']
r = as_admin.get('/groups/' + group)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.json()['group'] == group
assert r.ok and user
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
# Modify user permissions
payload = {'access': 'rw', '_id': user_id}
r = as_admin.put('/groups/' + group + '/permissions/' + user_id, json=payload, params={'propagate': 'true'})
assert r.ok
r = as_admin.get('/groups/' + group)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/projects/' + project2)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
# Remove user from project permissions
r = as_admin.delete('/groups/' + group + '/permissions/' + user_id, json=payload, params={'propagate': 'true'})
assert r.ok
r = as_admin.get('/groups/' + group)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
# Delete empty project 2
r= as_admin.delete('/projects/' + project2)
assert r.ok
# Test tag pool renaming and deletion
def test_add_rename_remove_group_tag(data_builder, as_admin):
"""
Tests:
- propagation from the group level
- renaming tag at group level renames tags in hierarchy
- deleting tag at group level renames tags in hierarchy
"""
group = data_builder.create_group()
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
tag = 'test tag'
tag_renamed = 'test tag please ignore'
# Add tag to hierarchy
payload = {'value': tag}
r = as_admin.post('/groups/' + group + '/tags', json=payload)
assert r.ok
r = as_admin.post('/projects/' + project + '/tags', json=payload)
assert r.ok
r = as_admin.post('/sessions/' + session + '/tags', json=payload)
assert r.ok
r = as_admin.post('/acquisitions/' + acquisition + '/tags', json=payload)
assert r.ok
r = as_admin.get('/groups/' + group)
assert r.ok and tag in r.json()['tags']
r = as_admin.get('/projects/' + project)
assert r.ok and tag in r.json()['tags']
r = as_admin.get('/sessions/' + session)
assert r.ok and tag in r.json()['tags']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and tag in r.json()['tags']
# Rename tag
payload = {'value': tag_renamed}
r = as_admin.put('/groups/' + group + '/tags/' + tag, json=payload)
assert r.ok
r = as_admin.get('/groups/' + group)
assert r.ok and tag_renamed in r.json()['tags']
r = as_admin.get('/projects/' + project)
assert r.ok and tag_renamed in r.json()['tags']
r = as_admin.get('/sessions/' + session)
assert r.ok and tag_renamed in r.json()['tags']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and tag_renamed in r.json()['tags']
# Delete tag
r = as_admin.delete('/groups/' + group + '/tags/' + tag_renamed)
assert r.ok
r = as_admin.get('/groups/' + group)
assert r.ok and tag_renamed not in r.json()['tags']
r = as_admin.get('/projects/' + project)
assert r.ok and tag_renamed not in r.json()['tags']
r = as_admin.get('/sessions/' + session)
assert r.ok and tag_renamed not in r.json()['tags']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and tag_renamed not in r.json()['tags']
| 33.354633
| 115
| 0.646648
| 1,457
| 10,440
| 4.459849
| 0.070693
| 0.06679
| 0.068944
| 0.06602
| 0.875808
| 0.845029
| 0.806402
| 0.804709
| 0.785165
| 0.765928
| 0
| 0.00061
| 0.215038
| 10,440
| 312
| 116
| 33.461538
| 0.792312
| 0.116762
| 0
| 0.848039
| 0
| 0
| 0.144592
| 0
| 0
| 0
| 0
| 0
| 0.279412
| 1
| 0.039216
| false
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ddfb7aad8002c9fa30cae84859622223cb7810d
| 95
|
py
|
Python
|
family_task_queue/db/__init__.py
|
HitmanBobina47/family-task-queue
|
d9db82065b88ff7aa85963245dfb5448735464c9
|
[
"MIT"
] | null | null | null |
family_task_queue/db/__init__.py
|
HitmanBobina47/family-task-queue
|
d9db82065b88ff7aa85963245dfb5448735464c9
|
[
"MIT"
] | null | null | null |
family_task_queue/db/__init__.py
|
HitmanBobina47/family-task-queue
|
d9db82065b88ff7aa85963245dfb5448735464c9
|
[
"MIT"
] | null | null | null |
from . import task, user, db as _db
from .db import *
def init_app(app):
db.init_app(app)
| 15.833333
| 35
| 0.673684
| 18
| 95
| 3.388889
| 0.5
| 0.229508
| 0.327869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 95
| 5
| 36
| 19
| 0.813333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
936c119f00a532271de1f3993a4a84c7e0d227ea
| 837
|
py
|
Python
|
src/encoded/tests/fixtures/schemas/correlation_quality_metric.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | 102
|
2015-05-20T01:17:43.000Z
|
2022-03-07T06:03:55.000Z
|
src/encoded/tests/fixtures/schemas/correlation_quality_metric.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | 901
|
2015-01-07T23:11:57.000Z
|
2022-03-18T13:56:12.000Z
|
src/encoded/tests/fixtures/schemas/correlation_quality_metric.py
|
procha2/encoded
|
e9f122362b71f3b8641023b8d2d5ad531d3484b7
|
[
"MIT"
] | 65
|
2015-02-06T23:00:26.000Z
|
2022-01-22T07:58:44.000Z
|
import pytest
@pytest.fixture
def correlation_quality_metric(testapp, analysis_step_run_bam, file_tsv_1_2, award, lab):
item = {
'step_run': analysis_step_run_bam['@id'],
'quality_metric_of': [file_tsv_1_2['@id']],
'Pearson correlation': 0.1,
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/correlation_quality_metric', item).json['@graph'][0]
@pytest.fixture
def spearman_correlation_quality_metric(testapp, analysis_step_run_bam, file_tsv_1_2, award, lab):
item = {
'step_run': analysis_step_run_bam['@id'],
'quality_metric_of': [file_tsv_1_2['@id']],
'Spearman correlation': 0.7,
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/correlation_quality_metric', item).json['@graph'][0]
| 31
| 98
| 0.642772
| 111
| 837
| 4.477477
| 0.261261
| 0.156942
| 0.193159
| 0.144869
| 0.812877
| 0.812877
| 0.812877
| 0.812877
| 0.812877
| 0.812877
| 0
| 0.02071
| 0.192354
| 837
| 26
| 99
| 32.192308
| 0.714497
| 0
| 0
| 0.666667
| 0
| 0
| 0.232975
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35822ea97b82e59603fb219e1409394d0461c4d2
| 57,741
|
py
|
Python
|
PicoCTF/2017/Level 1/worldchat.py
|
bernardosequeir/CTFfiles
|
36a6ceba49d9a9019056d3669c5e8f84aa83b618
|
[
"MIT"
] | null | null | null |
PicoCTF/2017/Level 1/worldchat.py
|
bernardosequeir/CTFfiles
|
36a6ceba49d9a9019056d3669c5e8f84aa83b618
|
[
"MIT"
] | 5
|
2020-09-23T18:28:25.000Z
|
2020-09-23T18:28:41.000Z
|
PicoCTF/2017/Level 1/worldchat.py
|
bernardosequeir/CTFSolutions
|
503944617cb18826d12ab98fa33fd761e791328a
|
[
"MIT"
] | null | null | null |
text = "20:31:27 clinton: my parents , in my opinion, are our best chance for what, I do not know
20:31:27 hildegard: My sworn enemy totally understands me and my pet sloth to generate fusion power
20:31:27 jazmin: You is our best chance to drink your milkshake
20:31:27 dede: A huge moose is our best chance to create a self driving car
20:31:27 clinton: Scary pandas , in my well-educated opinion, are our best chance to help me spell 'raspberry' correctly
20:31:27 noihazflag: that girl from that movie will never understand me for what, I do not know
20:31:27 roselyn: A dog with a cape will never understand me for the future of humanity
20:31:27 dede: You gives me hope to generate fusion power
20:31:27 corrie: Cats with hats need to meet up for what, I do not know
20:31:28 denver: Hungry jackolanterns have demanded my presence for what, I do not know
20:31:28 lula: We want to see me to drink your milkshake
20:31:28 kelvin: We will never be able for what, I do not know
20:31:28 karine: that guy from that movie will never understand me to understand me
20:31:28 nathalie: Hungry jackolanterns will never be able to understand me
20:31:28 emiko: Hungry jackolanterns have demanded my presence to generate fusion power
20:31:28 patience: We give me hope for the future of humanity
20:31:28 maynard: that girl from that movie would like to meet you for the future of humanity
20:31:28 tamela: Anyone but me is our best chance for what, I do not know
20:31:28 deandrea: A dog with a cape is our best chance to make a rasberry pie
20:31:28 nadia: Scary pandas give me hope for the future of humanity
20:31:28 kelvin: Scary pandas will never be able to generate fusion power
20:31:28 gregg: A dog with a cape wants to see me to understand me
20:31:28 tamela: A small moose would like to meet you to generate fusion power
20:31:28 sheilah: Hungry jackolanterns will never be able to make a rasberry pie
20:31:28 lindsey: A huge moose wants to steal my sloth for the future of humanity
20:31:28 noihazflag: Several heavily mustached dolphins are the best of friends for the future of humanity
20:31:28 raina: You would like to meet you to generate fusion power
20:31:28 noihazflag: Several heavily mustached dolphins , in my opinion, are our best chance to generate fusion power
20:31:28 sara: Hungry jackolanterns are the best of friends to make a rasberry pie
20:31:28 adelina: We need to meet up to help me spell 'raspberry' correctly
20:31:28 gregg: your dad has attacked my toes to generate fusion power
20:31:28 mariann: Several heavily mustached dolphins are the best of friends to make a rasberry pie
20:31:28 theresia: We will never be able for what, I do not know
20:31:28 ilene: my homie would like to meet you for the future of humanity
20:31:28 anabel: Hungry jackolanterns , in my well-educated opinion, are our best chance to help me spell 'raspberry' correctly
20:31:28 sonny: your dad has attacked my toes to understand me
20:31:28 karine: My sworn enemy is our best chance to generate fusion power
20:31:28 emiko: I has attacked my toes to generate fusion power
20:31:28 lilli: Scary pandas are the best of friends for the future of humanity
20:31:28 luann: a heavily bearded dolphin will never understand me for the future of humanity
20:31:28 leandro: My friend wants to see me to understand me
20:31:28 charity: A huge moose will never understand me to create a self driving car
20:31:29 whatisflag: I totally understands me and my pet sloth to understand me
20:31:29 lynwood: A huge moose totally understands me and my pet sloth to generate fusion power
20:31:29 diana: Scary pandas , in my opinion, are our best chance to understand me
20:31:29 adelina: my homegirlz give me hope to drink your milkshake
20:31:29 gregg: A silly panda would like to meet you for the future of humanity
20:31:29 ihazflag: Only us have demanded my presence to generate fusion power
20:31:29 kory: My sworn enemy will never understand me for the future of humanity
20:31:29 nikia: A dog with a cape would like to meet you to understand me
20:31:29 whatisflag: your dad totally understands me and my pet sloth to generate fusion power
20:31:29 jammie: that guy from that movie would like to meet you for what, I do not know
20:31:29 beau: Cats with hats , in my opinion, are our best chance to help me spell 'raspberry' correctly
20:31:29 shala: your dad will never understand me for what, I do not know
20:31:29 jammie: my homie will never understand me to drink your milkshake
20:31:29 karine: Scary pandas have demanded my presence for the future of humanity
20:31:29 anabel: We will never be able to drink your milkshake
20:31:29 gregg: Hungry jackolanterns need to meet up to help me spell 'raspberry' correctly
20:31:29 nikia: I wants to steal my sloth to generate fusion power
20:31:29 ihazflag: that guy from that movie is our best chance to make a rasberry pie
20:31:29 noihazflag: My sworn enemy wants to see me to create a self driving car
20:31:29 sharell: Anyone but me wants to steal my sloth to understand me
20:31:29 sheilah: You will never understand me to make a rasberry pie
20:31:29 personwithflag: my parents want to see me to understand me
20:31:29 clinton: my parents have demanded my presence to generate fusion power
20:31:29 ihazflag: my parents will never be able to generate fusion power
20:31:29 sonny: I would like to meet you to create a self driving car
20:31:29 christene: Anyone but me is our best chance for what, I do not know
20:31:29 gregg: My sworn enemy wants to steal my sloth for the future of humanity
20:31:29 sonny: Cats with hats have demanded my presence to make a rasberry pie
20:31:29 christene: Cats with hats are the best of friends to help me spell 'raspberry' correctly
20:31:29 jefferson: my parents are the best of friends for the future of humanity
20:31:29 mariann: A silly panda will never understand me to help me spell 'raspberry' correctly
20:31:29 jefferson: my homegirlz want to see me for what, I do not know
20:31:29 sheilah: my homegirlz want to see me for the future of humanity
20:31:30 corrie: my homegirlz will never be able to help me spell 'raspberry' correctly
20:31:30 diana: You will never understand me to make a rasberry pie
20:31:30 sheilah: A small moose wants to steal my sloth to generate fusion power
20:31:30 deandrea: Hungry jackolanterns have demanded my presence to drink your milkshake
20:31:30 sharell: I is our best chance to make a rasberry pie
20:31:30 jefferson: A silly panda totally understands me and my pet sloth to make a rasberry pie
20:31:30 sheilah: Several heavily mustached dolphins want to see me for the future of humanity
20:31:30 noihazflag: my homegirlz are the best of friends to drink your milkshake
20:31:30 rita: My friend wants to see me to create a self driving car
20:31:30 sara: that girl from that movie is our best chance for what, I do not know
20:31:30 lucretia: a heavily bearded dolphin is our best chance to make a rasberry pie
20:31:30 sharell: Cats with hats have demanded my presence for the future of humanity
20:31:30 luann: a heavily bearded dolphin totally understands me and my pet sloth for what, I do not know
20:31:30 emiko: my parents , in my well-educated opinion, are our best chance to drink your milkshake
20:31:30 jefferson: Hungry jackolanterns give me hope to help me spell 'raspberry' correctly
20:31:30 lilli: my homegirlz are the best of friends to drink your milkshake
20:31:30 shala: Anyone but me will never understand me for the future of humanity
20:31:30 rickie: My sworn enemy gives me hope for the future of humanity
20:31:30 nadia: Cats with hats are the best of friends for the future of humanity
20:31:30 karine: your dad wants to see me to create a self driving car
20:31:30 emiko: Anyone but me is our best chance to help me spell 'raspberry' correctly
20:31:30 nikia: Only us have demanded my presence to generate fusion power
20:31:30 corrie: A silly panda has attacked my toes for what, I do not know
20:31:30 mariann: Cats with hats want to see me for what, I do not know
20:31:30 roselyn: your dad wants to see me for the future of humanity
20:31:30 denver: Only us will never be able to help me spell 'raspberry' correctly
20:31:30 rita: Only us have demanded my presence to understand me
20:31:30 noihazflag: A silly panda totally understands me and my pet sloth for what, I do not know
20:31:30 denver: a heavily bearded dolphin would like to meet you to generate fusion power
20:31:30 roselyn: I totally understands me and my pet sloth for what, I do not know
20:31:30 hollie: Anyone but me wants to see me for the future of humanity
20:31:30 nathalie: A huge moose wants to steal my sloth to create a self driving car
20:31:30 christene: a heavily bearded dolphin would like to meet you for what, I do not know
20:31:30 jazmin: that girl from that movie has attacked my toes to understand me
20:31:31 karine: We give me hope to create a self driving car
20:31:31 hildegard: A huge moose has attacked my toes to make a rasberry pie
20:31:31 sheilah: my parents are the best of friends to create a self driving car
20:31:31 flagperson: this is part 1/8 of the flag - 2e5c
20:31:31 anabel: A silly panda has attacked my toes for what, I do not know
20:31:31 luann: a heavily bearded dolphin wants to steal my sloth to help me spell 'raspberry' correctly
20:31:31 charity: Hungry jackolanterns will never be able to create a self driving car
20:31:31 dede: my homie has attacked my toes to make a rasberry pie
20:31:31 adelina: We are the best of friends for the future of humanity
20:31:31 roselyn: My sworn enemy wants to see me to generate fusion power
20:31:31 ilene: your dad has attacked my toes to generate fusion power
20:31:31 raina: We , in my opinion, are our best chance for the future of humanity
20:31:31 lucretia: Scary pandas give me hope to drink your milkshake
20:31:31 adelina: Several heavily mustached dolphins have demanded my presence to generate fusion power
20:31:31 cordelia: that girl from that movie totally understands me and my pet sloth to help me spell 'raspberry' correctly
20:31:31 mariann: Only us give me hope to help me spell 'raspberry' correctly
20:31:31 karine: Cats with hats are the best of friends to make a rasberry pie
20:31:31 anabel: my homie totally understands me and my pet sloth for the future of humanity
20:31:31 myra: a heavily bearded dolphin will never understand me to help me spell 'raspberry' correctly
20:31:31 noihazflag: that girl from that movie totally understands me and my pet sloth to make a rasberry pie
20:31:31 shala: Anyone but me totally understands me and my pet sloth to help me spell 'raspberry' correctly
20:31:31 christene: a heavily bearded dolphin is our best chance to create a self driving car
20:31:31 kelvin: Only us will never be able to create a self driving car
20:31:31 theresia: Scary pandas give me hope for what, I do not know
20:31:31 gregg: A dog with a cape wants to steal my sloth to generate fusion power
20:31:31 lynwood: my parents have demanded my presence to help me spell 'raspberry' correctly
20:31:31 karine: Only us need to meet up to make a rasberry pie
20:31:31 deandrea: Cats with hats , in my opinion, are our best chance to understand me
20:31:31 lula: Several heavily mustached dolphins , in my well-educated opinion, are our best chance to help me spell 'raspberry' correctly
20:31:31 rickie: Anyone but me would like to meet you to drink your milkshake
20:31:31 tamela: Anyone but me is our best chance to create a self driving car
20:31:31 deandrea: my homegirlz will never be able to create a self driving car
20:31:31 hollie: Hungry jackolanterns want to see me for what, I do not know
20:31:31 lindsey: my homegirlz want to see me for the future of humanity
20:31:31 leandro: Cats with hats , in my well-educated opinion, are our best chance for the future of humanity
20:31:32 emiko: my homegirlz need to meet up to drink your milkshake
20:31:32 roselyn: Cats with hats , in my well-educated opinion, are our best chance to understand me
20:31:32 anabel: We will never be able for what, I do not know
20:31:32 flagperson: this is part 2/8 of the flag - 3014
20:31:32 leandro: my homegirlz will never be able for what, I do not know
20:31:32 leandro: Scary pandas , in my well-educated opinion, are our best chance for the future of humanity
20:31:32 anabel: I would like to meet you for the future of humanity
20:31:32 nathalie: A huge moose is our best chance to make a rasberry pie
20:31:32 kelvin: my homie will never understand me to generate fusion power
20:31:32 whatisflag: Hungry jackolanterns will never be able to drink your milkshake
20:31:32 janett: A small moose wants to see me to drink your milkshake
20:31:32 anabel: Several heavily mustached dolphins , in my opinion, are our best chance to drink your milkshake
20:31:32 jammie: A silly panda is our best chance to help me spell 'raspberry' correctly
20:31:32 sonny: Several heavily mustached dolphins , in my opinion, are our best chance to help me spell 'raspberry' correctly
20:31:32 corrie: your dad totally understands me and my pet sloth for what, I do not know
20:31:32 charity: my homegirlz give me hope for the future of humanity
20:31:32 deandrea: My friend wants to steal my sloth to generate fusion power
20:31:32 tamela: I is our best chance to drink your milkshake
20:31:32 ihazflag: a heavily bearded dolphin gives me hope for the future of humanity
20:31:32 myra: My sworn enemy has attacked my toes for what, I do not know
20:31:32 flagperson: this is part 3/8 of the flag - a9c5
20:31:32 lindsey: We want to see me to generate fusion power
20:31:32 janett: Several heavily mustached dolphins need to meet up to generate fusion power
20:31:32 sheilah: a heavily bearded dolphin gives me hope to generate fusion power
20:31:32 raina: Cats with hats are the best of friends for the future of humanity
20:31:32 jazmin: my homie has attacked my toes to drink your milkshake
20:31:32 kelvin: Several heavily mustached dolphins will never be able to generate fusion power
20:31:32 leandro: My sworn enemy wants to see me for what, I do not know
20:31:32 raina: I wants to steal my sloth for what, I do not know
20:31:32 emiko: my parents give me hope for the future of humanity
20:31:32 deandrea: Hungry jackolanterns want to see me to help me spell 'raspberry' correctly
20:31:33 lilli: Cats with hats have demanded my presence to help me spell 'raspberry' correctly
20:31:33 leandro: A silly panda would like to meet you to create a self driving car
20:31:33 leandro: my parents have demanded my presence for what, I do not know
20:31:33 christene: My friend gives me hope to drink your milkshake
20:31:33 luann: A dog with a cape will never understand me to help me spell 'raspberry' correctly
20:31:33 sara: My sworn enemy has attacked my toes to create a self driving car
20:31:33 hollie: Only us will never be able to help me spell 'raspberry' correctly
20:31:33 nikia: my homie gives me hope to understand me
20:31:33 gregg: A small moose would like to meet you to understand me
20:31:33 tamela: that guy from that movie wants to steal my sloth for the future of humanity
20:31:33 leandro: Only us will never be able to make a rasberry pie
20:31:33 sharell: Cats with hats give me hope to help me spell 'raspberry' correctly
20:31:33 lindsey: my homie gives me hope to understand me
20:31:33 nadia: Anyone but me wants to see me to create a self driving car
20:31:33 diana: that girl from that movie totally understands me and my pet sloth to generate fusion power
20:31:33 ihazflag: You has attacked my toes to help me spell 'raspberry' correctly
20:31:33 honey: We will never be able to drink your milkshake
20:31:33 sheilah: We want to see me to create a self driving car
20:31:33 raina: Cats with hats need to meet up to make a rasberry pie
20:31:33 sara: Scary pandas , in my well-educated opinion, are our best chance for the future of humanity
20:31:33 whatisflag: My friend gives me hope for what, I do not know
20:31:33 sonny: Only us give me hope for what, I do not know
20:31:33 patience: Hungry jackolanterns have demanded my presence to make a rasberry pie
20:31:33 beau: Scary pandas have demanded my presence to drink your milkshake
20:31:33 noihazflag: Only us , in my well-educated opinion, are our best chance to understand me
20:31:33 janett: You gives me hope for the future of humanity
20:31:33 rickie: A small moose would like to meet you for the future of humanity
20:31:33 roselyn: We have demanded my presence for the future of humanity
20:31:33 kelvin: that guy from that movie wants to steal my sloth to understand me
20:31:33 dede: that guy from that movie would like to meet you to understand me
20:31:33 beau: Cats with hats will never be able for the future of humanity
20:31:33 raina: Cats with hats need to meet up to create a self driving car
20:31:33 corrie: my parents are the best of friends to make a rasberry pie
20:31:33 emiko: A dog with a cape would like to meet you to generate fusion power
20:31:33 personwithflag: A huge moose wants to steal my sloth to generate fusion power
20:31:33 kory: A huge moose wants to steal my sloth to make a rasberry pie
20:31:34 roselyn: Cats with hats need to meet up to make a rasberry pie
20:31:34 beau: A small moose will never understand me to generate fusion power
20:31:34 nadia: Cats with hats , in my opinion, are our best chance to help me spell 'raspberry' correctly
20:31:34 denver: my homie wants to steal my sloth for what, I do not know
20:31:34 roselyn: Cats with hats are the best of friends to generate fusion power
20:31:34 sara: my homegirlz have demanded my presence to generate fusion power
20:31:34 beau: that girl from that movie wants to steal my sloth to understand me
20:31:34 roselyn: I wants to steal my sloth for the future of humanity
20:31:34 gregg: that girl from that movie wants to steal my sloth to make a rasberry pie
20:31:34 ihazflag: A silly panda will never understand me to generate fusion power
20:31:34 clinton: that girl from that movie will never understand me to understand me
20:31:34 adelina: Scary pandas want to see me to help me spell 'raspberry' correctly
20:31:34 lucretia: We , in my well-educated opinion, are our best chance to help me spell 'raspberry' correctly
20:31:34 anabel: my homie wants to see me to help me spell 'raspberry' correctly
20:31:34 emiko: Several heavily mustached dolphins have demanded my presence to generate fusion power
20:31:34 theresia: that girl from that movie totally understands me and my pet sloth to make a rasberry pie
20:31:34 lindsey: Scary pandas have demanded my presence for what, I do not know
20:31:34 personwithflag: Scary pandas , in my opinion, are our best chance to create a self driving car
20:31:34 gregg: Scary pandas have demanded my presence to create a self driving car
20:31:34 sharell: my homegirlz will never be able to drink your milkshake
20:31:34 myra: My sworn enemy totally understands me and my pet sloth to create a self driving car
20:31:34 sheilah: your dad will never understand me to generate fusion power
20:31:34 christene: Scary pandas will never be able to help me spell 'raspberry' correctly
20:31:34 janett: my parents , in my well-educated opinion, are our best chance to drink your milkshake
20:31:34 noihazflag: We , in my well-educated opinion, are our best chance to help me spell 'raspberry' correctly
20:31:34 corrie: that girl from that movie is our best chance to create a self driving car
20:31:34 anabel: a heavily bearded dolphin would like to meet you to understand me
20:31:34 nikia: my homie wants to steal my sloth to create a self driving car
20:31:34 anabel: Cats with hats have demanded my presence for what, I do not know
20:31:34 jazmin: A silly panda has attacked my toes to make a rasberry pie
20:31:34 maynard: I wants to see me to drink your milkshake
20:31:34 tamela: We , in my opinion, are our best chance to understand me
20:31:34 honey: that guy from that movie will never understand me to drink your milkshake
20:31:34 maynard: my parents need to meet up for the future of humanity
20:31:34 honey: Anyone but me will never understand me for the future of humanity
20:31:34 flagperson: this is part 4/8 of the flag - ff31
20:31:35 clinton: A small moose gives me hope for what, I do not know
20:31:35 dede: your dad will never understand me for what, I do not know
20:31:35 cordelia: We will never be able to create a self driving car
20:31:35 patience: your dad will never understand me to generate fusion power
20:31:35 sheilah: Scary pandas need to meet up to make a rasberry pie
20:31:35 personwithflag: A dog with a cape will never understand me to make a rasberry pie
20:31:35 sharell: A silly panda wants to steal my sloth for the future of humanity
20:31:35 hollie: my homegirlz want to see me for the future of humanity
20:31:35 theresia: I has attacked my toes to help me spell 'raspberry' correctly
20:31:35 ihazflag: You would like to meet you to make a rasberry pie
20:31:35 whatisflag: My friend totally understands me and my pet sloth to drink your milkshake
20:31:35 hollie: Cats with hats , in my opinion, are our best chance for what, I do not know
20:31:35 kory: Hungry jackolanterns , in my opinion, are our best chance for what, I do not know
20:31:35 clinton: A dog with a cape is our best chance to create a self driving car
20:31:35 karine: We , in my opinion, are our best chance for the future of humanity
20:31:35 charity: I wants to steal my sloth to make a rasberry pie
20:31:35 patience: Scary pandas are the best of friends for what, I do not know
20:31:35 dede: Cats with hats want to see me to create a self driving car
20:31:35 cordelia: Anyone but me has attacked my toes to help me spell 'raspberry' correctly
20:31:35 mariann: I has attacked my toes for the future of humanity
20:31:35 raina: my homie has attacked my toes to make a rasberry pie
20:31:35 lynwood: My sworn enemy totally understands me and my pet sloth to drink your milkshake
20:31:35 nadia: You has attacked my toes to understand me
20:31:35 lilli: my homie totally understands me and my pet sloth for the future of humanity
20:31:35 denver: Cats with hats give me hope to drink your milkshake
20:31:35 nathalie: a heavily bearded dolphin will never understand me to create a self driving car
20:31:35 corrie: A dog with a cape has attacked my toes to drink your milkshake
20:31:35 jammie: my homegirlz need to meet up for what, I do not know
20:31:35 deandrea: Cats with hats have demanded my presence to understand me
20:31:36 roselyn: Several heavily mustached dolphins , in my opinion, are our best chance for what, I do not know
20:31:36 janett: My friend totally understands me and my pet sloth to help me spell 'raspberry' correctly
20:31:36 lula: My friend wants to see me to understand me
20:31:36 dede: my homegirlz need to meet up to make a rasberry pie
20:31:36 nathalie: A huge moose has attacked my toes to drink your milkshake
20:31:36 patience: Cats with hats have demanded my presence for the future of humanity
20:31:36 ilene: Cats with hats will never be able for what, I do not know
20:31:36 maynard: You wants to steal my sloth to make a rasberry pie
20:31:36 theresia: Hungry jackolanterns give me hope to generate fusion power
20:31:36 roselyn: Cats with hats , in my well-educated opinion, are our best chance to understand me
20:31:36 sonny: My sworn enemy is our best chance for what, I do not know
20:31:36 whatisflag: Cats with hats , in my opinion, are our best chance to help me spell 'raspberry' correctly
20:31:36 sharell: Scary pandas have demanded my presence to create a self driving car
20:31:36 lilli: my parents want to see me for the future of humanity
20:31:36 theresia: A small moose wants to see me for the future of humanity
20:31:36 raina: A small moose wants to see me to drink your milkshake
20:31:36 sheilah: Hungry jackolanterns , in my opinion, are our best chance to generate fusion power
20:31:36 leandro: my homegirlz need to meet up to understand me
20:31:36 honey: Scary pandas want to see me to generate fusion power
20:31:36 adelina: Scary pandas want to see me for the future of humanity
20:31:36 leandro: my homie wants to see me to understand me
20:31:36 anabel: Hungry jackolanterns want to see me to make a rasberry pie
20:31:36 jammie: A dog with a cape is our best chance to help me spell 'raspberry' correctly
20:31:36 roselyn: my homegirlz are the best of friends to make a rasberry pie
20:31:36 theresia: Only us have demanded my presence to create a self driving car
20:31:36 leandro: my homegirlz give me hope for the future of humanity
20:31:36 flagperson: this is part 5/8 of the flag - 5a5c
20:31:36 emiko: A huge moose has attacked my toes to make a rasberry pie
20:31:36 lindsey: that girl from that movie gives me hope to generate fusion power
20:31:36 nikia: Hungry jackolanterns are the best of friends to generate fusion power
20:31:36 shala: a heavily bearded dolphin wants to see me to drink your milkshake
20:31:36 clinton: A huge moose gives me hope to generate fusion power
20:31:36 rickie: Hungry jackolanterns want to see me to understand me
20:31:36 personwithflag: My friend is our best chance to make a rasberry pie
20:31:36 sharell: Cats with hats , in my opinion, are our best chance to create a self driving car
20:31:36 rickie: I wants to steal my sloth to make a rasberry pie
20:31:37 diana: Hungry jackolanterns , in my opinion, are our best chance for what, I do not know
20:31:37 lilli: my parents , in my well-educated opinion, are our best chance to drink your milkshake
20:31:37 hollie: Several heavily mustached dolphins give me hope to make a rasberry pie
20:31:37 anabel: Hungry jackolanterns want to see me for the future of humanity
20:31:37 luann: that girl from that movie will never understand me to make a rasberry pie
20:31:37 honey: Only us want to see me for what, I do not know
20:31:37 shala: my parents have demanded my presence to generate fusion power
20:31:37 theresia: Several heavily mustached dolphins are the best of friends to drink your milkshake
20:31:37 luann: Cats with hats are the best of friends for what, I do not know
20:31:37 patience: A silly panda totally understands me and my pet sloth to create a self driving car
20:31:37 shala: We want to see me to generate fusion power
20:31:37 lilli: that girl from that movie has attacked my toes to understand me
20:31:37 denver: A silly panda gives me hope for the future of humanity
20:31:37 myra: My friend is our best chance to create a self driving car
20:31:37 deandrea: A small moose would like to meet you to generate fusion power
20:31:37 nathalie: my homegirlz need to meet up for the future of humanity
20:31:37 emiko: Hungry jackolanterns want to see me to help me spell 'raspberry' correctly
20:31:37 janett: my homie is our best chance to understand me
20:31:37 lilli: my homegirlz , in my well-educated opinion, are our best chance to create a self driving car
20:31:37 patience: my parents , in my well-educated opinion, are our best chance to understand me
20:31:37 noihazflag: We , in my opinion, are our best chance to understand me
20:31:37 gregg: my homegirlz want to see me to create a self driving car
20:31:37 maynard: Anyone but me is our best chance to understand me
20:31:37 anabel: Scary pandas need to meet up for what, I do not know
20:31:37 sheilah: Anyone but me wants to see me to understand me
20:31:37 mariann: My friend will never understand me to create a self driving car
20:31:37 gregg: Hungry jackolanterns , in my well-educated opinion, are our best chance for the future of humanity
20:31:37 kory: my parents need to meet up to make a rasberry pie
20:31:37 lucretia: Only us , in my well-educated opinion, are our best chance for the future of humanity
20:31:37 janett: I is our best chance to generate fusion power
20:31:37 dede: Only us have demanded my presence to drink your milkshake
20:31:37 kelvin: my parents give me hope to understand me
20:31:38 ihazflag: Anyone but me will never understand me to create a self driving car
20:31:38 ihazflag: Several heavily mustached dolphins give me hope to understand me
20:31:38 denver: a heavily bearded dolphin totally understands me and my pet sloth to generate fusion power
20:31:38 hildegard: My friend totally understands me and my pet sloth to help me spell 'raspberry' correctly
20:31:38 karine: my homegirlz , in my well-educated opinion, are our best chance to make a rasberry pie
20:31:38 beau: my parents will never be able to help me spell 'raspberry' correctly
20:31:38 dede: My friend has attacked my toes for the future of humanity
20:31:38 raina: my homegirlz have demanded my presence for what, I do not know
20:31:38 lilli: Cats with hats are the best of friends for what, I do not know
20:31:38 hollie: My friend gives me hope for the future of humanity
20:31:38 kelvin: my homie will never understand me for the future of humanity
20:31:38 diana: Cats with hats need to meet up for the future of humanity
20:31:38 sharell: Scary pandas want to see me for what, I do not know
20:31:38 christene: I wants to see me to understand me
20:31:38 luann: We have demanded my presence for what, I do not know
20:31:38 noihazflag: my homegirlz are the best of friends to generate fusion power
20:31:38 sheilah: Cats with hats are the best of friends to help me spell 'raspberry' correctly
20:31:38 charity: You would like to meet you for the future of humanity
20:31:38 luann: that girl from that movie is our best chance for the future of humanity
20:31:38 sonny: Hungry jackolanterns need to meet up to create a self driving car
20:31:38 ilene: Cats with hats want to see me for the future of humanity
20:31:38 ilene: Scary pandas have demanded my presence for what, I do not know
20:31:38 charity: that guy from that movie will never understand me to help me spell 'raspberry' correctly
20:31:38 sonny: I will never understand me to generate fusion power
20:31:38 lynwood: A silly panda will never understand me to make a rasberry pie
20:31:38 personwithflag: my homegirlz want to see me to help me spell 'raspberry' correctly
20:31:38 hollie: my homie has attacked my toes to help me spell 'raspberry' correctly
20:31:38 honey: Hungry jackolanterns , in my well-educated opinion, are our best chance for the future of humanity
20:31:38 christene: my homegirlz , in my well-educated opinion, are our best chance for the future of humanity
20:31:38 janett: A huge moose gives me hope to drink your milkshake
20:31:38 honey: My friend would like to meet you for what, I do not know
20:31:38 myra: that guy from that movie totally understands me and my pet sloth to drink your milkshake
20:31:38 charity: We , in my well-educated opinion, are our best chance to generate fusion power
20:31:38 beau: We want to see me for what, I do not know
20:31:38 personwithflag: A small moose would like to meet you to help me spell 'raspberry' correctly
20:31:38 lindsey: my homie has attacked my toes to help me spell 'raspberry' correctly
20:31:39 diana: Anyone but me gives me hope for what, I do not know
20:31:39 flagperson: this is part 6/8 of the flag - b6db
20:31:39 roselyn: A silly panda is our best chance to create a self driving car
20:31:39 raina: Anyone but me wants to steal my sloth for the future of humanity
20:31:39 ilene: Hungry jackolanterns have demanded my presence to understand me
20:31:39 jazmin: my homegirlz need to meet up to make a rasberry pie
20:31:39 luann: A silly panda gives me hope for the future of humanity
20:31:39 corrie: my parents give me hope for the future of humanity
20:31:39 noihazflag: I gives me hope to drink your milkshake
20:31:39 sonny: your dad wants to see me to help me spell 'raspberry' correctly
20:31:39 gregg: My friend gives me hope to understand me
20:31:39 flagperson: this is part 7/8 of the flag - 97bc
20:31:39 maynard: Only us need to meet up to generate fusion power
20:31:39 kelvin: your dad wants to steal my sloth to generate fusion power
20:31:39 janett: A small moose will never understand me to generate fusion power
20:31:39 denver: my parents are the best of friends to drink your milkshake
20:31:39 roselyn: I gives me hope to understand me
20:31:39 lucretia: that girl from that movie has attacked my toes to generate fusion power
20:31:39 rickie: that girl from that movie has attacked my toes for the future of humanity
20:31:39 jazmin: Cats with hats will never be able for what, I do not know
20:31:39 shala: Cats with hats need to meet up to understand me
20:31:39 mariann: my homegirlz want to see me to generate fusion power
20:31:39 christene: my homegirlz will never be able to generate fusion power
20:31:39 nadia: your dad wants to steal my sloth for what, I do not know
20:31:39 nikia: You would like to meet you to make a rasberry pie
20:31:39 charity: a heavily bearded dolphin wants to see me to create a self driving car
20:31:39 beau: A small moose would like to meet you to help me spell 'raspberry' correctly
20:31:39 leandro: I is our best chance to create a self driving car
20:31:39 kelvin: my parents need to meet up to create a self driving car
20:31:39 sara: Several heavily mustached dolphins have demanded my presence to understand me
20:31:39 honey: My sworn enemy would like to meet you to drink your milkshake
20:31:39 denver: Hungry jackolanterns give me hope to create a self driving car
20:31:39 christene: my parents , in my opinion, are our best chance to understand me
20:31:39 dede: My friend totally understands me and my pet sloth to understand me
20:31:39 theresia: A huge moose will never understand me to help me spell 'raspberry' correctly
20:31:39 mariann: A huge moose is our best chance to make a rasberry pie
20:31:39 rickie: your dad totally understands me and my pet sloth for what, I do not know
20:31:40 dede: my homie totally understands me and my pet sloth for the future of humanity
20:31:40 luann: Several heavily mustached dolphins will never be able to help me spell 'raspberry' correctly
20:31:40 myra: I has attacked my toes to create a self driving car
20:31:40 emiko: Cats with hats will never be able for the future of humanity
20:31:40 nikia: my homegirlz give me hope to drink your milkshake
20:31:40 lindsey: A small moose totally understands me and my pet sloth to make a rasberry pie
20:31:40 lynwood: my parents want to see me to help me spell 'raspberry' correctly
20:31:40 sharell: We will never be able to understand me
20:31:40 kory: A small moose would like to meet you to understand me
20:31:40 honey: that girl from that movie will never understand me to understand me
20:31:40 nikia: I will never understand me for what, I do not know
20:31:40 adelina: your dad will never understand me for the future of humanity
20:31:40 theresia: Cats with hats want to see me for the future of humanity
20:31:40 janett: Several heavily mustached dolphins , in my well-educated opinion, are our best chance to understand me
20:31:40 rita: We will never be able for the future of humanity
20:31:40 rita: Cats with hats want to see me to understand me
20:31:40 raina: A dog with a cape totally understands me and my pet sloth to drink your milkshake
20:31:40 whatisflag: Only us , in my well-educated opinion, are our best chance to generate fusion power
20:31:40 whatisflag: Cats with hats are the best of friends to help me spell 'raspberry' correctly
20:31:40 hildegard: Anyone but me gives me hope for the future of humanity
20:31:40 roselyn: Scary pandas , in my opinion, are our best chance to generate fusion power
20:31:40 charity: My friend is our best chance for what, I do not know
20:31:40 janett: My friend will never understand me to make a rasberry pie
20:31:40 beau: Several heavily mustached dolphins have demanded my presence for what, I do not know
20:31:40 sharell: You gives me hope for what, I do not know
20:31:40 rita: Anyone but me will never understand me to make a rasberry pie
20:31:40 corrie: a heavily bearded dolphin wants to see me to help me spell 'raspberry' correctly
20:31:40 lilli: Several heavily mustached dolphins need to meet up to create a self driving car
20:31:40 sharell: A small moose totally understands me and my pet sloth to generate fusion power
20:31:40 sara: Anyone but me wants to see me to make a rasberry pie
20:31:40 nikia: Hungry jackolanterns are the best of friends for the future of humanity
20:31:41 beau: Scary pandas , in my opinion, are our best chance to understand me
20:31:41 kelvin: Cats with hats are the best of friends for what, I do not know
20:31:41 lucretia: My sworn enemy has attacked my toes to understand me
20:31:41 mariann: your dad wants to steal my sloth to make a rasberry pie
20:31:41 luann: that girl from that movie would like to meet you to drink your milkshake
20:31:41 sheilah: my parents will never be able to make a rasberry pie
20:31:41 whatisflag: Scary pandas give me hope to create a self driving car
20:31:41 kelvin: Anyone but me totally understands me and my pet sloth to understand me
20:31:41 noihazflag: My sworn enemy would like to meet you for the future of humanity
20:31:41 lula: My sworn enemy has attacked my toes to generate fusion power
20:31:41 luann: my parents , in my opinion, are our best chance to understand me
20:31:41 jazmin: Anyone but me has attacked my toes for what, I do not know
20:31:41 kelvin: my homie wants to see me for the future of humanity
20:31:41 adelina: A dog with a cape is our best chance to help me spell 'raspberry' correctly
20:31:41 noihazflag: I totally understands me and my pet sloth to create a self driving car
20:31:41 sonny: My sworn enemy wants to steal my sloth to understand me
20:31:41 mariann: Hungry jackolanterns give me hope to help me spell 'raspberry' correctly
20:31:41 deandrea: my parents give me hope to create a self driving car
20:31:41 kory: A huge moose is our best chance to drink your milkshake
20:31:41 jazmin: A dog with a cape is our best chance to help me spell 'raspberry' correctly
20:31:41 dede: a heavily bearded dolphin will never understand me to generate fusion power
20:31:41 rickie: My sworn enemy totally understands me and my pet sloth to make a rasberry pie
20:31:41 maynard: Only us , in my well-educated opinion, are our best chance to generate fusion power
20:31:41 theresia: Scary pandas , in my well-educated opinion, are our best chance to create a self driving car
20:31:41 mariann: my parents give me hope to help me spell 'raspberry' correctly
20:31:41 noihazflag: A small moose will never understand me to understand me
20:31:41 emiko: My friend wants to see me to make a rasberry pie
20:31:41 gregg: a heavily bearded dolphin will never understand me to create a self driving car
20:31:41 karine: my homegirlz , in my opinion, are our best chance to help me spell 'raspberry' correctly
20:31:42 denver: Only us need to meet up to understand me
20:31:42 dede: Scary pandas give me hope for the future of humanity
20:31:42 sara: Cats with hats give me hope for the future of humanity
20:31:42 lilli: Scary pandas , in my opinion, are our best chance to make a rasberry pie
20:31:42 lucretia: A dog with a cape gives me hope to create a self driving car
20:31:42 sharell: Hungry jackolanterns have demanded my presence to create a self driving car
20:31:42 denver: Several heavily mustached dolphins want to see me to make a rasberry pie
20:31:42 lilli: I would like to meet you to drink your milkshake
20:31:42 jammie: Only us are the best of friends to generate fusion power
20:31:42 sonny: a heavily bearded dolphin totally understands me and my pet sloth to understand me
20:31:42 janett: We will never be able to drink your milkshake
20:31:42 dede: My sworn enemy is our best chance to help me spell 'raspberry' correctly
20:31:42 kelvin: My friend gives me hope to generate fusion power
20:31:42 cordelia: my homegirlz , in my opinion, are our best chance for the future of humanity
20:31:42 corrie: Anyone but me will never understand me to make a rasberry pie
20:31:42 jammie: Cats with hats will never be able for what, I do not know
20:31:42 denver: A small moose has attacked my toes for what, I do not know
20:31:42 jammie: Only us , in my well-educated opinion, are our best chance to drink your milkshake
20:31:42 sheilah: A huge moose has attacked my toes for the future of humanity
20:31:42 lindsey: We , in my opinion, are our best chance for what, I do not know
20:31:42 personwithflag: I will never understand me to generate fusion power
20:31:42 sara: Only us want to see me to drink your milkshake
20:31:42 nadia: Hungry jackolanterns are the best of friends for what, I do not know
20:31:42 tamela: Several heavily mustached dolphins , in my well-educated opinion, are our best chance to generate fusion power
20:31:42 dede: A huge moose gives me hope to drink your milkshake
20:31:42 sharell: that girl from that movie would like to meet you for the future of humanity
20:31:42 personwithflag: my homegirlz , in my opinion, are our best chance to help me spell 'raspberry' correctly
20:31:42 hollie: We , in my opinion, are our best chance for what, I do not know
20:31:42 nathalie: Anyone but me gives me hope to create a self driving car
20:31:42 kelvin: Scary pandas will never be able to understand me
20:31:42 charity: A silly panda gives me hope for the future of humanity
20:31:42 nadia: my homie is our best chance to understand me
20:31:42 nadia: Several heavily mustached dolphins have demanded my presence to drink your milkshake
20:31:43 clinton: We are the best of friends for what, I do not know
20:31:43 jazmin: my homie is our best chance to make a rasberry pie
20:31:43 christene: my parents are the best of friends to understand me
20:31:43 nathalie: Only us have demanded my presence to help me spell 'raspberry' correctly
20:31:43 christene: A silly panda totally understands me and my pet sloth to help me spell 'raspberry' correctly
20:31:43 tamela: that guy from that movie totally understands me and my pet sloth to drink your milkshake
20:31:43 lula: Scary pandas will never be able to make a rasberry pie
20:31:43 jazmin: Only us give me hope to help me spell 'raspberry' correctly
20:31:43 roselyn: Anyone but me will never understand me to generate fusion power
20:31:43 lula: my homegirlz will never be able to create a self driving car
20:31:43 diana: A small moose gives me hope to make a rasberry pie
20:31:43 jefferson: Hungry jackolanterns have demanded my presence to generate fusion power
20:31:43 jammie: my parents , in my opinion, are our best chance to create a self driving car
20:31:43 gregg: Scary pandas , in my well-educated opinion, are our best chance to make a rasberry pie
20:31:43 personwithflag: Only us are the best of friends to drink your milkshake
20:31:43 lilli: Scary pandas want to see me for what, I do not know
20:31:43 corrie: A small moose gives me hope to create a self driving car
20:31:43 nikia: Only us need to meet up to make a rasberry pie
20:31:43 dede: A dog with a cape would like to meet you to help me spell 'raspberry' correctly
20:31:43 jefferson: Anyone but me is our best chance to create a self driving car
20:31:43 karine: Anyone but me gives me hope to make a rasberry pie
20:31:43 sheilah: Cats with hats , in my well-educated opinion, are our best chance to understand me
20:31:43 lula: Hungry jackolanterns are the best of friends to understand me
20:31:43 jazmin: Anyone but me has attacked my toes to help me spell 'raspberry' correctly
20:31:43 corrie: Anyone but me wants to steal my sloth to drink your milkshake
20:31:43 kory: A dog with a cape wants to see me to make a rasberry pie
20:31:43 mariann: Only us , in my opinion, are our best chance to create a self driving car
20:31:43 kory: A small moose wants to steal my sloth to create a self driving car
20:31:43 shala: your dad wants to steal my sloth to generate fusion power
20:31:43 noihazflag: Only us need to meet up for the future of humanity
20:31:43 dede: A silly panda would like to meet you for the future of humanity
20:31:43 luann: My sworn enemy has attacked my toes to make a rasberry pie
20:31:43 diana: A small moose wants to see me to understand me
20:31:44 nikia: my homegirlz are the best of friends to help me spell 'raspberry' correctly
20:31:44 rickie: a heavily bearded dolphin totally understands me and my pet sloth to create a self driving car
20:31:44 rita: You gives me hope to help me spell 'raspberry' correctly
20:31:44 kory: my homegirlz need to meet up for what, I do not know
20:31:44 karine: Only us , in my opinion, are our best chance to understand me
20:31:44 jazmin: Several heavily mustached dolphins want to see me to create a self driving car
20:31:44 diana: My friend will never understand me to help me spell 'raspberry' correctly
20:31:44 charity: My sworn enemy gives me hope to generate fusion power
20:31:44 rickie: my parents have demanded my presence to generate fusion power
20:31:44 jammie: Hungry jackolanterns need to meet up to understand me
20:31:44 dede: Anyone but me would like to meet you for the future of humanity
20:31:44 rita: Cats with hats want to see me for the future of humanity
20:31:44 jammie: A dog with a cape totally understands me and my pet sloth to drink your milkshake
20:31:44 patience: a heavily bearded dolphin totally understands me and my pet sloth for the future of humanity
20:31:44 sonny: Only us are the best of friends to drink your milkshake
20:31:44 myra: A huge moose would like to meet you to create a self driving car
20:31:44 lucretia: I will never understand me for the future of humanity
20:31:44 denver: my homie would like to meet you for what, I do not know
20:31:44 lula: my homie wants to steal my sloth for the future of humanity
20:31:44 lucretia: my homegirlz will never be able to understand me
20:31:44 roselyn: A dog with a cape wants to steal my sloth to help me spell 'raspberry' correctly
20:31:44 charity: Cats with hats are the best of friends to understand me
20:31:44 denver: my homegirlz have demanded my presence to make a rasberry pie
20:31:44 mariann: Scary pandas will never be able to drink your milkshake
20:31:44 nathalie: a heavily bearded dolphin would like to meet you to drink your milkshake
20:31:44 sheilah: My friend gives me hope for what, I do not know
20:31:44 karine: Several heavily mustached dolphins have demanded my presence to make a rasberry pie
20:31:44 luann: I gives me hope to create a self driving car
20:31:44 lynwood: my parents give me hope to drink your milkshake
20:31:44 sonny: that guy from that movie wants to steal my sloth to understand me
20:31:44 maynard: my parents are the best of friends to understand me
20:31:45 noihazflag: Only us need to meet up to create a self driving car
20:31:45 dede: Scary pandas , in my well-educated opinion, are our best chance to make a rasberry pie
20:31:45 gregg: that guy from that movie would like to meet you to create a self driving car
20:31:45 christene: that guy from that movie gives me hope to understand me
20:31:45 clinton: Cats with hats give me hope to understand me
20:31:45 noihazflag: a heavily bearded dolphin wants to see me to understand me
20:31:45 whatisflag: that girl from that movie will never understand me to generate fusion power
20:31:45 flagperson: this is part 8/8 of the flag - addf
20:31:45 ilene: Several heavily mustached dolphins need to meet up to create a self driving car
20:31:45 sharell: your dad is our best chance to drink your milkshake
20:31:45 jammie: My sworn enemy wants to see me to create a self driving car
20:31:45 beau: my homie wants to see me to drink your milkshake
20:31:45 lula: We give me hope to make a rasberry pie
20:31:45 whatisflag: a heavily bearded dolphin is our best chance for the future of humanity
20:31:45 nadia: Scary pandas are the best of friends to create a self driving car
20:31:45 anabel: my parents , in my well-educated opinion, are our best chance to generate fusion power
20:31:45 rita: that guy from that movie wants to steal my sloth to generate fusion power
20:31:45 anabel: We , in my opinion, are our best chance to create a self driving car
20:31:45 clinton: Cats with hats , in my opinion, are our best chance to generate fusion power
20:31:45 lynwood: my parents , in my well-educated opinion, are our best chance to create a self driving car
20:31:45 rita: You wants to see me for what, I do not know
20:31:45 honey: A silly panda is our best chance to drink your milkshake
20:31:45 hildegard: my parents give me hope for the future of humanity
20:31:45 gregg: Cats with hats want to see me to drink your milkshake
20:31:45 rita: A huge moose will never understand me to generate fusion power
20:31:45 maynard: My sworn enemy gives me hope to drink your milkshake
20:31:45 dede: Several heavily mustached dolphins need to meet up to generate fusion power
20:31:45 sheilah: My sworn enemy totally understands me and my pet sloth to help me spell 'raspberry' correctly
20:31:45 lula: my parents want to see me for the future of humanity
20:31:46 christene: Cats with hats want to see me for the future of humanity
20:31:46 tamela: Anyone but me wants to steal my sloth to create a self driving car
20:31:46 cordelia: A dog with a cape gives me hope to make a rasberry pie
20:31:46 kelvin: your dad totally understands me and my pet sloth to generate fusion power
20:31:46 beau: Hungry jackolanterns want to see me for the future of humanity
20:31:46 kelvin: a heavily bearded dolphin has attacked my toes to drink your milkshake
20:31:46 emiko: My sworn enemy wants to see me to make a rasberry pie
20:31:46 noihazflag: A dog with a cape has attacked my toes for the future of humanity
20:31:46 jammie: A small moose gives me hope to drink your milkshake
20:31:46 myra: that guy from that movie wants to see me to drink your milkshake
20:31:46 karine: A huge moose gives me hope to understand me
20:31:46 jazmin: A dog with a cape is our best chance to create a self driving car
20:31:46 sharell: Cats with hats give me hope for the future of humanity
20:31:46 kelvin: A dog with a cape totally understands me and my pet sloth for the future of humanity
20:31:46 ilene: Anyone but me gives me hope to make a rasberry pie
20:31:46 charity: that guy from that movie will never understand me for the future of humanity
20:31:46 raina: Anyone but me gives me hope to understand me
20:31:46 charity: Only us , in my well-educated opinion, are our best chance to create a self driving car
20:31:46 corrie: Hungry jackolanterns , in my opinion, are our best chance for what, I do not know
20:31:46 adelina: a heavily bearded dolphin totally understands me and my pet sloth to create a self driving car
20:31:46 sara: my parents give me hope to help me spell 'raspberry' correctly
20:31:46 deandrea: My friend will never understand me for what, I do not know
20:31:46 ilene: Hungry jackolanterns , in my opinion, are our best chance to create a self driving car
20:31:46 hildegard: Anyone but me will never understand me to understand me
20:31:46 lynwood: Scary pandas have demanded my presence to create a self driving car
20:31:46 kelvin: A small moose will never understand me for what, I do not know
20:31:46 janett: Scary pandas will never be able for what, I do not know
20:31:46 rita: that guy from that movie wants to steal my sloth to drink your milkshake
20:31:46 personwithflag: A dog with a cape has attacked my toes to make a rasberry pie
20:31:46 beau: I is our best chance to make a rasberry pie
20:31:46 sharell: Anyone but me has attacked my toes to drink your milkshake
20:31:46 jazmin: Only us will never be able to help me spell 'raspberry' correctly
20:31:46 theresia: You would like to meet you to understand me
20:31:46 kory: that girl from that movie is our best chance to make a rasberry pie
20:31:46 hildegard: Several heavily mustached dolphins , in my well-educated opinion, are our best chance to create a self driving car
20:31:47 lindsey: We will never be able to create a self driving car
20:31:47 nathalie: A huge moose wants to see me to drink your milkshake
20:31:47 corrie: Cats with hats , in my opinion, are our best chance to create a self driving car
20:31:47 anabel: A silly panda is our best chance to make a rasberry pie
20:31:47 kelvin: Several heavily mustached dolphins have demanded my presence to understand me
20:31:47 cordelia: My sworn enemy has attacked my toes to drink your milkshake
20:31:47 dede: We will never be able to make a rasberry pie
20:31:47 luann: Scary pandas need to meet up to understand me
20:31:47 rita: Several heavily mustached dolphins want to see me for what, I do not know
20:31:47 lula: We are the best of friends for the future of humanity
20:31:47 christene: My sworn enemy gives me hope to understand me
20:31:47 shala: Several heavily mustached dolphins , in my opinion, are our best chance to drink your milkshake
20:31:47 kory: your dad wants to steal my sloth for what, I do not know
20:31:47 karine: My sworn enemy will never understand me to make a rasberry pie
20:31:47 lindsey: A dog with a cape wants to steal my sloth to make a rasberry pie
20:31:47 karine: You gives me hope to make a rasberry pie
20:31:47 noihazflag: I will never understand me to generate fusion power
20:31:47 diana: that girl from that movie has attacked my toes to create a self driving car
20:31:47 cordelia: my parents will never be able to understand me
20:31:47 ihazflag: that girl from that movie wants to steal my sloth to create a self driving car
20:31:47 rickie: Scary pandas need to meet up to make a rasberry pie
20:31:47 raina: Hungry jackolanterns need to meet up to make a rasberry pie
20:31:47 rickie: A huge moose wants to steal my sloth to make a rasberry pie
20:31:47 ihazflag: I is our best chance for the future of humanity
20:31:47 rita: We want to see me to drink your milkshake
20:31:47 ihazflag: my homie has attacked my toes to make a rasberry pie
20:31:47 lynwood: Scary pandas will never be able to create a self driving car
20:31:47 christene: You is our best chance to make a rasberry pie
20:31:47 beau: a heavily bearded dolphin has attacked my toes to understand me
20:31:47 hildegard: A silly panda has attacked my toes to make a rasberry pie
20:31:48 luann: My friend wants to see me to help me spell 'raspberry' correctly
20:31:48 emiko: Scary pandas want to see me to help me spell 'raspberry' correctly
20:31:48 corrie: my homie has attacked my toes to generate fusion power
20:31:48 lynwood: Hungry jackolanterns , in my opinion, are our best chance to drink your milkshake
20:31:48 leandro: I is our best chance to help me spell 'raspberry' correctly
20:31:48 gregg: that girl from that movie will never understand me to make a rasberry pie
20:31:48 raina: my homegirlz have demanded my presence to generate fusion power
20:31:48 sheilah: Only us have demanded my presence to drink your milkshake
20:31:48 theresia: Cats with hats , in my opinion, are our best chance to make a rasberry pie
20:31:48 whatisflag: Only us are the best of friends to understand me
20:31:48 adelina: Anyone but me would like to meet you to drink your milkshake
20:31:48 nadia: You wants to see me to understand me
20:31:48 nathalie: that guy from that movie wants to steal my sloth to make a rasberry pie
20:31:48 cordelia: My friend wants to steal my sloth to generate fusion power
20:31:48 emiko: I gives me hope to understand me
20:31:48 nikia: My sworn enemy totally understands me and my pet sloth to generate fusion power
20:31:48 jazmin: Only us , in my opinion, are our best chance to generate fusion power
20:31:48 hildegard: your dad has attacked my toes to create a self driving car
20:31:48 lucretia: Cats with hats need to meet up to help me spell 'raspberry' correctly
20:31:48 denver: my parents are the best of friends to drink your milkshake
20:31:48 christene: Hungry jackolanterns are the best of friends to understand me
20:31:48 sheilah: We are the best of friends to make a rasberry pie
20:31:48 charity: a heavily bearded dolphin wants to see me for the future of humanity
20:31:48 nikia: A silly panda wants to steal my sloth for what, I do not know
20:31:48 corrie: A small moose wants to steal my sloth to understand me
20:31:48 christene: Hungry jackolanterns need to meet up for what, I do not know
20:31:48 patience: Anyone but me wants to see me to create a self driving car
20:31:48 roselyn: a heavily bearded dolphin would like to meet you to help me spell 'raspberry' correctly
20:31:48 tamela: My friend would like to meet you for what, I do not know
20:31:48 whatisflag: that girl from that movie wants to steal my sloth to create a self driving car
20:31:48 flagperson: this is part 1/8 of the flag - 2e5c
20:31:48 nadia: Scary pandas have demanded my presence to make a rasberry pie
20:31:48 mariann: Hungry jackolanterns are the best of friends for what, I do not know
20:31:48 clinton: A huge moose gives me hope to drink your milkshake
20:31:48 gregg: Cats with hats are the best of friends to make a rasberry pie
20:31:48 ihazflag: Several heavily mustached dolphins , in my opinion, are our best chance to make a rasberry pie
20:31:48 maynard: You gives me hope to generate fusion power
20:31:48 theresia: A huge moose wants to see me to create a self driving car
20:31:49 beau: Cats with hats have demanded my presence to generate fusion power
20:31:49 sharell: A dog with a cape would like to meet you to understand me
20:31:49 lilli: my parents , in my well-educated opinion, are our best chance to create a self driving car
20:31:49 lynwood: My sworn enemy would like to meet you to create a self driving car
20:31:49 leandro: I wants to steal my sloth to drink your milkshake
20:31:49 lindsey: A small moose will never understand me to create a self driving car"
| 80.643855
| 139
| 0.779723
| 11,507
| 57,741
| 3.912575
| 0.017902
| 0.063258
| 0.036382
| 0.03234
| 0.916063
| 0.909044
| 0.894141
| 0.856692
| 0.825108
| 0.767003
| 0
| 0.090462
| 0.175439
| 57,741
| 715
| 140
| 80.756643
| 0.85516
| 0
| 0
| 0
| 1
| 0.345506
| 0.012002
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
35ac3a79b2fe5d17700cef0d553a62c5f8265bbe
| 139
|
py
|
Python
|
my_feed/__init__.py
|
SimoneABNto/My-Code-Py
|
47276c1d69a92aa284685c9f148c1bd960147f7f
|
[
"MIT"
] | null | null | null |
my_feed/__init__.py
|
SimoneABNto/My-Code-Py
|
47276c1d69a92aa284685c9f148c1bd960147f7f
|
[
"MIT"
] | null | null | null |
my_feed/__init__.py
|
SimoneABNto/My-Code-Py
|
47276c1d69a92aa284685c9f148c1bd960147f7f
|
[
"MIT"
] | null | null | null |
from my_feed.my_feed import Updater
from my_feed.modules.post import PostModel, MediaModel
from my_feed.modules.types import PostType
| 27.8
| 55
| 0.827338
| 22
| 139
| 5.045455
| 0.5
| 0.216216
| 0.27027
| 0.306306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129496
| 139
| 4
| 56
| 34.75
| 0.917355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
35d15ab910f95d264d0b96e081e31de86168761d
| 8,794
|
py
|
Python
|
Rosalind.info Problems/Reverse complement.py
|
peterforgacs/bioinformatics-scripts
|
f4a497b84bc3cf0c295b7e6b506b9dd9c88e7881
|
[
"MIT"
] | null | null | null |
Rosalind.info Problems/Reverse complement.py
|
peterforgacs/bioinformatics-scripts
|
f4a497b84bc3cf0c295b7e6b506b9dd9c88e7881
|
[
"MIT"
] | null | null | null |
Rosalind.info Problems/Reverse complement.py
|
peterforgacs/bioinformatics-scripts
|
f4a497b84bc3cf0c295b7e6b506b9dd9c88e7881
|
[
"MIT"
] | null | null | null |
__author__ = 'Peter'
t = "ATATTGGCTTGGTGACCATATGGTCTAACCCCGTCCTCGTATGCCTGGAAGAACGAGACGGTTGCCGTTGCTCGGCTAGTCTCTCGACGTGAGCCAAATGCAGGGATCCTTTTATTTTTCAGTAAAGTCAGAGATGTGTATTTGGAGTATCAGAACGATGTGAAACTTGCAAGCAGACTGGCGCTGGATAGGGAAACAGTGCTGGCTGCGACAATCCATGAAATCTCGAATTAATGAAGTCTGCTCACCTAACCAGCTTTGGAGATGTGCGTATATGTCTGAGTAGAGGAAACTGAATATCAGATAAATCTCCGTTCGTGTGGATGGTGAACAGAGCGCTCTTAGGTAAACCTGGCGTTGATAGAACGTGAGCACGACAGCGTGCTCGACAACAGGGCTTGGACAGAAACGCAGCTTCTTCGTCACGAACCAGTTGCGGGATAAGGGCATGCCTCTGAGTGACTTAGCAAACACACAGGACTAGGGCGATCCTACGACTATTGTCAAAGTTAAGACCGCGAAGGCTTATAAGACGCCATATAGCAGCCATGATGCGGATTGCGGGTACCCTATGCCGGATTACCTTTCAGCTCATCATGTCGTTTGAACTCGGACAATCCTTATTAATCCCATCTATAATTCTCTTCGATGCATGGCACAAGACTGACAGCTACGAGTCGCTACTGAGCGGGTACCGGCTGAGAATAGAACTGCGGAGCCTGGGACAGCAAGCCATAGCAACTTTACTCAATTTCCATCGCCATAGATGTCATAGGTTCTGCCGGAGCCCCTGGGTGCTCGCTATAAACATGGTTGTGGGTCTACGCAGCACCCAAGACCAGTCGTTATGGTCTCAACATTAGTCCAATGAAATGCGACTACCCAGACGTCCCTCGCGCTCGTCCGGTGATTATTCGTCTGATGCGACCGGATCGGAGCGTTGGACCACAGACATCAGGGTTAAGGGAAGCACGGAGCAGTTCCGAACGGTGTGCCACACCTATCGATAAACCTCTGTTCCCACCGCACTTATGGAATATGACTGCACAATCGACATACTCCGGAGCCTTAGGTTTAGACGAACCTTCTGCCCCTATACACACTAAAGGGACCGACAAGGGGTAGTTCATTAAGTTCCCTCTACTTGTAGTAGCCGCAGTTCACCAAAGCCATAGTAACGTTCTCCTCTGTTTGTCCAACAGACTCAGGTCAATAGCAAAACAGTCGCGGTATTTCTACACAGGATCTAATGCCACGTGGATCGCCTCGTACGCCCTGGGGGTTTTGAACTGCAGTTTTTTCTCCTCCTCCTATACTTAGCGTACTAGAAATAATGTTGAATAGTAGAGCTCTACTGTATTTCCTTTGCGCCGGGGTATTCAGGACCAATCGAGTCTCGACGAGGGAAAAAGCAGTTTCAATGCCGCGGGGGTTAATCGGAAACACGGTGTCCCTGGGCAAATGTTCGAGTAAAATAGGATACCGACATACGTTGTTCAAGACAAGTCGTAGGGGATCAAATTGAACTTTAGGCCACGCCCATTTCTCGTAAACTACTAGCAACTAGGCCTCAACCCCTGTGCCGAAGATAACAGACCTCCAGCCAGGCGGAAGAGGAAAACAGACTGTGTTGGTGGTGCCACAATTCAGGAAACCTCATCGGGGCTACGGATTAATTCGGGCAGGAACATTGACTCGGTTTTGTAACCCCCCAGACACATAGGGCCCACCCATTACGCTTATGAGCGCCGAATGAAGTAGCTAGCCGCCAGACTAACAGACGACCAAGCCGAGGTGAGCAGACCCTGCACAGCGATCGGTTTGCGAGTTACATCACTTGTTCGTCACGTTAAGGGCTGCTACTTAAACGGATCTAATGTTCGGCACCCCTACCATGCCTTCATTAATACCACTCTCCGCTTCGAGGCAGGGTGATGACAACCACCACATTACTCAACAATGTGATACCTTATCCTTGGCATTCCTGGGGGCAGCTCCTCCCCTTTATGTTAAAGGAAGCAGTGCTGCACACCTGAGGGCAAAACACAGGTTCGCATTACCTTCCGGTACGCCTCCCGATGATCAACGTAGGAACACAAGCGCCATACTAAATGCAAAATTGGCTCGCTGAGCGAATCCGCTTCACCCTGTCTCGCTCAGCTTGAAGCATACCTGCGCAAACGTAGTCTGGACGGGTAGCAGGTTTACCAGCGCCCTGTTCTATGCAATCTACCTTGGGCAAATTAAGGAGAAAGAGAACCGTTGGCTGTATCAAAGTTTGCAGGCGATACTGTAGCAAGTAGAAACCTAGAGCATCGAGTTTTAGGATCATTCCCCCAAAGCTAACCTGCCCTGGGTCATTTGGTTCTGTGGAACGGCTATAGGACTAATCCTTGCAAGAGGATGTCACACGTTCTTAGCTCTCCATGTAAGACGTGTCATACCCCTTCCATTCTCGACCGACGAGGTCTTGGGTGAAGCCGGAGAGGTCCGCGCCTTGCCGATGCCTGGCGCGTCATGGCGTGCTTTTCGGGGGCTTGCTCGTTGCGCCTCTTCAGCTATCCCGGGACCATCAACGAGCAGTAGCTCAAAAAGCCGCCTACACCAACTCATACATCAGTATATTAATTACGAACTAACCCACCAACGTTGATGACTCATGACGACAGTAAAGCTTGCTGTGCATGGTGCACCCTCGTAGTTTCAGTCTAGTTATCAGCTCTATAGCGTGCTAACGGGGATGGAGCAGGGGCGGTCTGCCGTGACTTGCCGCCTCAAAACTGCGCTGGCCAGGCATCTGCCCTCGTAGCATACACGCCTGCGCAGAATGCTCTTCGTGTTAGAGTCTCCGACCGTACGGTATTCCGGCATGCCAGACAATACTGGGCGGGGGGTGCTAGGCGGTAAAGGGATCAATCCGATAGTCGAACTCGCAGTCGCCAAGCTCTTATGAGCCGTATCGCATGGTTTCATGGGCCTCATCTACAGTACATCGTGGGGGAGGGTCTGCACCATCTGGCTCCCTCGAAGGACAGGTCTCCACGGTCTAACGCTCTGTGCTACTCACTGCCCCACGAAACCAGGTCTGCGGGGTATGCGTGTGAGAATATATGTACCAACGAAGAGGCCGGACAACTAACCGAAGCGACGCTAAGCGAACGTATAGTCCAAGATTAAGCCTCTCTTATTCTCCCTCGACCCTCTTCGCATTGTTTCGCGTACCGCCTCGGAGTACTGACCCCACTCAATTGTTTAATTGTAAAACGACTCTGTTTGCCAATTATCTCTCATCCATATCCCAAAAAGCGAGCAGCGCTGAGTGGGCGCTATCCAACTCTTAATAGCCGGGATAGCGTGCCAACACTAGACTAGTTTGCACATATACCGTTCCCAGCGAACCTGTATGAAATCCGGTCGTGGTCGCGCAGACGGCAGGGTGATGTCTATTTCGTCGCCATGTGTAACAGGAGTAACCCAATTACCCCAAAATCAGGTGTGAAGACATCCTATCTTCTCGCGACCAACAGGAATCAGCCGTAGTCTCACTCGGCCTTGGTAGCTAATGTCCGTTGCGTGTAGGATGGGTGTCGCCAACGTAATCGTATAGCGGAATGCGCCGGTTCCACATTCGCAGCAGAACGCCAGCAGGAGTCGGCGATCATTCGTATACCAGTCTGTATGTCCGCAACCACTCTTAAGCGCTAAGCAAGCTGGCTAGGATTTCGGTGGTAGGGCCATCGTTTTCAGATAAGTGATCCTGCTCTATCCGTCGTATAGGAGAGCACCGAGTAAAGACAAGAATAAGTGTTAAACCTTGAGGAGTAACTCGTTGACACGCTAAAGTGCTTTAAGCCACCAGTGGCCCTCATATTCTTTCAGAAATTTCGGCACAGGGTTTCCCACCTCTCCAAGCGACGTCAGGCATTTAAGCCCCAACCTACGCGATGCCTGTCGCTCAAAGTCGGCCCGGCGCCAGACGATAGGCATAATTGCTCTAAAGCTTGCACATATAGGAACGCCAAAGATAGAATGTAATTCGCTTCGGCCCCACCTCCGCCTGACCTGCATCGTTCTGTCTGTGACTTATGCCCCTTTTGTCAATCTCGGGTCATAGAGATGAGACCGACTGTAGAGTGTCTGCCCAAGTAACACAGTACGCGGGGGACGCGTCAGTCGCCGTAGTGCACCTTGGTGTGGCACCGACTGTTTAATCTGCCGGGGGGATTTACTCCTCTCGAGCAACTTTCAAAGCTTCGTGGCAAGCTATGTGTGACTTAGGGCGTCCTGGCGATTATAACCTTGTGCGCAGGTATTGGTTGTAATATGAAGATGACAATTTGCAACTCATAAGTTGAGGGATTAGTTGTACACTATGCCTGACTTCCACGGGTAAACTGACGTGCCTACCATCTGTACGGTACTTATACGCAGTCGGCGCGCAATATGACAGTGACCTCTCTCCCCGCAACGCAGTGTGGTAACGTGTTGGCCAAACAGACTGTACATATGGTTAATTGAGCCTGAGCATAAATGCACGTTCACCCGGATAAATTTTCCGTGCCAGCCGTCTACCAGGCCTGCACCTCCTGAGTATAAGCGCGGAATCACGTAACGAGTTCGACGGACTATGTGGTGAAGGTATCTCGTAAGGACCGTACTCGACCAATAAGCCCGTCACGACCCTAATATACCCTGTAAATTACCTAAAGTAAGTTGTCTATCACTACAGTACATCAAAGCCGCGATCTATCGGAAACGTCGTTTGATTAGCGAAAAGTCTCCGGGAACCGGTCTCCTTGACGCTGCCAAGGGGAAGGACGGCCAGAAGGTGAGCCCTTGTAGTACGTAATACTAGATCCACTAGAGTTACCGTACATTACTATACCGGGTTAATATAGGGACTACTGAACACGCCGGCTCTCTTACCTGTTACTGGTTTATATGTCGATCGAGAATTCCAGACTCGGAAACAATCCTCAAACTAGTAGGATAAATTAATGGTGAACGCCTAACCCCGCTATGCCGGCAAGTAGCGCGGCTCACCCCCCGGCGGGATAAGTCGCCAACCAGTAAGGAAATGGTTCTATTGGCCAGTGTGACTGAATCTCTCGCCAGCGGTTACCAATCGGGTAGCTATCATTAACGGTCGCGACACTCGATCATTTGAGGGATATGCTATCAAGTGACGTTGTTTACTACAGCGGTAGGACAAGAGCCGGCTGAGGTCTTAACGACTTTTGGCCCGTGAAACAAGGGCTAGATCAGGCCGGACGTTGCGTTCAGCAGTTGCGGAGAACGCTAATCACAGTGCAAATTCTCTGGATTCACTGCCGAACCGAACGTATTGTTATTTAGCTACGGTTCCGAAGTCGTTACACCAAAATTATAGTTGCGTAACGAGGCTAGATGTTAGGGTAATGTCTGTGCCCCGTACGGAGACTCACCCAATCCTTTAGTCATTACTAAGTTTCGATTAGTCCCCCGGTAATGAGCACGGGCGTGAGGATGCAGCCAATCAGTATGGTCCTGCGTTGTACTATTATACACGCGGGTCTAGAAGAAGGTGCGCTCGGTTTTTTAAAAGCTGATAACCCTTGAACTGCGAAGCCCCAATGGCTCTCCCTGCGTTGCCCCAGAACTCACAGGCTGCACTAATAGAAGACGCGCGACGGCTATAGATTCCGTAAGTCGTGGTTCCGCATTAGAGGTCGGCGGTACTACGACGCACGTCGGTCTACTGCATTGAATCGCCCTTGATCACACGGGAGGATCAATTGTACAAATGCCTTCCCGTGCACCGTTCACGACAAGCTAGGGCGGGGTAGACCCATGACATAGATAAAGCGATAGTGCTAATCAGGGACGCTGAAATTTTCCGTGAAAGCTAGTGCCATTTATGTCCGCAATCACTTTCGTCTTGCATTTCTCCGCCGCTCTATTCCTCTCGCTGTCCCCGGATTGGCAACGTAATTGGGTATTAAATACTATATTAACCCTGCGGGGAGTTTCTCGACCTTAGCCCGAATCTCTACACATGAACATCTTAGGATCAACTTTATAGACGCTTCTTTGGTACCAACGTGTGCCGGATCTCAATTAGGTCTCTCCATGTGAGCGAGTCCAACAAGAGCCAGGGGAGATTAGCTCACGCAGGCTGTTAACACGACTTGTTCGTTGTTTATGGGCCGTCACCGGGGGTCAAAGTAAAGCGCATAGAGCGTTCGTACTATCCGGTCAAGAGCCCTTATCCTCTTTAAGTGATTATGCGGAGGCATGGGGTTCTGTGTGGTCTCGTTTAAACTTGAGCGGCGTTCGCTATTACCAGTTTATAAAATCCGTCAGGTGGGGCACGTTCCTCGAGTGTTAGTAATGCAAGTTGCTTGCAGGTAAATCACTGTGCTTTCCGGGGCAGAGCATGTTAAGACGTGTTATTATCTGGTTGATCTGGAGGGTCCGATTTACGGTGTCGCTAGTCGCTGATATGTTTAAGTAAATCTTCCATTGATCTGCGTTAATCTTCCCAATTCGTCTCACTACCCTACTTAGCTATAGGGAGCGTACGCATTTAACAACTGTTTACAGATTCGGAGCTTCGGGGCCATACTCACACGGTGGCAGTGTAAATGGGATCTCTCGGACATTATTCGAGTCTAACATCATCCAATTTCAAGGAGGAGGAGGGTCCTAAGTGCTCGTTAATATGGTCAGAGATAATCTCAAGTGCGTCGTGGCGTGGAGATTACGGGGCCTGCATAGCTCGACAAAGCTAGTCCATGACTCTAAGGTGAACGCAAGGTCGGGGGATAGGCATTCTTCGAGTTTGGCCGCTCGTATTATCCCCTCGAAGGGGTTTATTAACAAACATAACTCGATGCACGACTAAGCCTTGTTCCACGGTGTTCCTTCGAAACGTCTGGCGGTCGTTATCTTTAGATTTTATACGTCCGTGGTGTTGCGGTGGGAGGTTGTGCTCCCAGGCCGAAGTCTGCGTCATGTGATGCTAGTTAGCGTGGAGTTGTCCCCGCTCTGCGCTGATTATTAAAATCGAGCTGGGGCCGGACTGATAATTTCCTCGGGGTATTGCTTACAACGCGTATTATGCGATGGTGCAGAGCCAATTGGAAGATCGTCGGTTTTTTTACTTCTACAGTGGAGACTTTCATGGGGTTGATAAAAATTACGTAACTCTGGGCCCTTTCAGGTGAGTACTTCCCGGCACTGCTGTGCATGTGCTCGCCGTGCTAATCCAACGCATTGTTCAGAGATCAGAAACTCACTATACTAGACTGTCAAAGGAGGTCCGGGGGTCGGTCGATCAGGAAGTTCCGGAGCTTTGAATGTACGAACAGCTACCTAGGATGCGATTGCACTTCTAGTCACGTACACAAGTGCGTTTTTTCCAATAAGTTAAATGTAAGCAAATCGTCACACAAAATCTCTCACCACGGCTCGATAGCCCTAGATACTATACAGAACTCGGCCGATCTTGTTTTACGAGAGCAGAAGTGAATAATCTCAACTGTCTAGGTCCGGGTCTATGCTACCCTTTTTGATAGGAACAGAAATAAACTCCCCCAGTGGTTTAAGGCACGAATCTAGCGGGGAAGTTATTTCCGTCAAGCGATATACCCTCTATGTACAATTACGAATCTGATCAAAAGTTGAATTCCCGGGCAGGTGGAATCCACCCGTCACTTTTATGAGCCGTACCTGCTTGTATTTGGACAAAACAGTGGATTCGCAACAGATCCGACTGAAAGACGTCGGGCAAGTCATTTCTAATCATCCACGCAATGCTCACCCAAGCTCGGGTCCTACTATGGGTGTGTACGGAATCCACAGCATGGTTGCTTGGTACAGCCGGAGGTACCTATATCGTAATACAAAGTCAGTACTACCCTATCGGAGCTTGCTACTCATATAGGTTGTGGCAGGAGGCCATGTTAGCGCAGCGCAGGCATCAATGCTTTCTAACTTCCGGAAGGTACACCGTTACGGGAGTATGACCTAAAGCCCCCTATATCCTCGTGTTTATCCCCCTGGCACTGTCAAATATCAGAGTTAACGACACAGAATTTGGCAGACGCCTATTGCGACCAAGTACTCACGACGTGCTCAACTCATGGCTATAGAGAACGCCGATACGTAATTCGCCAAGGCGGCCAAGGTGCCGTAATTTCTTGCGTCTAGTGGAGCGTGCATTTTCACAATGGTACCAGTCGATGGATGGGACCACTTAGGCAGTTCTTCCGCAAAGCGCTGTGTCGTGACGTTGTCGCAACTGCAAAGCCGGCTAGCGTATTTGGCCTAGTGGAACATGAGAACAAAGCTCGCTCTCAAGAGGCACATTCCTAGCACGCATGTACGTTTTACATATTCACTTTACGACTGTTTTATGGACCACGACGAGGCGACCAGCTAAACGGTCGAGCCTAGTAGCTTCAGGCCCGCAAAGCCGTCCACACATCCCTGAACGATGGCCA"
y = ""
i = len(t)
while i != 0 :
if t[i-1] == "A":
y = y + "T"
i += -1
elif t[i-1] == "G":
y = y + "C"
i += -1
elif t[i-1] == "T":
y = y + "A"
i += -1
elif t[i-1] == "C":
y = y + "G"
i += -1
print y
| 462.842105
| 8,489
| 0.974301
| 53
| 8,794
| 161.584906
| 0.301887
| 0.001868
| 0.001752
| 0.002452
| 0.003153
| 0.003153
| 0
| 0
| 0
| 0
| 0
| 0.001041
| 0.017171
| 8,794
| 19
| 8,490
| 462.842105
| 0.989818
| 0
| 0
| 0.222222
| 0
| 0
| 0.966003
| 0.964525
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.055556
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35e540dc3c381ff4b46e2a9e9c3ec9227da2f9e0
| 19,304
|
py
|
Python
|
old training/1. Finding the learning-rate/3.Weight_decay_finder.py
|
marctimjen/Artefact-Rejection
|
4e850d172fa8c08ba1776c46e760484673d7e7ad
|
[
"MIT"
] | null | null | null |
old training/1. Finding the learning-rate/3.Weight_decay_finder.py
|
marctimjen/Artefact-Rejection
|
4e850d172fa8c08ba1776c46e760484673d7e7ad
|
[
"MIT"
] | null | null | null |
old training/1. Finding the learning-rate/3.Weight_decay_finder.py
|
marctimjen/Artefact-Rejection
|
4e850d172fa8c08ba1776c46e760484673d7e7ad
|
[
"MIT"
] | null | null | null |
import neptune.new as neptune
import os
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.optim import SGD, Adam
from torch.utils.data import DataLoader, random_split
from torch.optim.lr_scheduler import CyclicLR
import torch.multiprocessing as mp
import numpy as np
import random
import sys
sys.path.append("..") # adds higher directory to python modules path
from LoaderPACK.Unet import Unet
from LoaderPACK.Loader import load_whole_data, load_shuffle_5_min
from LoaderPACK.Accuarcy_finder import Accuarcy_find
from LoaderPACK.Accuarcy_upload import Accuarcy_upload
from multiprocessing import Process
from LoaderPACK.trainer import net_train
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
def net_SGD1(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=0.6, weight_decay=0)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net1"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 0.6, "optimizer_weight_decay": 0,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_SGD2(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=0.6, weight_decay=0.01)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net2"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 0.6, "optimizer_weight_decay": 0.01,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_SGD3(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=0.6, weight_decay=0.00001)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net3"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 0.6, "optimizer_weight_decay": 0.00001,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_SGD4(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=1, weight_decay=0)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net4"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 1, "optimizer_weight_decay": 0,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_SGD5(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=1, weight_decay=0.01)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net5"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 1, "optimizer_weight_decay": 0.01,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_SGD6(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=1, weight_decay=0.00001)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net6"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 1, "optimizer_weight_decay": 0.00001,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_SGD7(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=2, weight_decay=0)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net7"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 2, "optimizer_weight_decay": 0,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_SGD8(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=2, weight_decay=0.01)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net8"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 2, "optimizer_weight_decay": 0.01,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_SGD9(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = SGD(model.parameters(), lr=2, weight_decay=0.00001)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "SGD_net9"
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": 2, "optimizer_weight_decay": 0.00001,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_ADAM1(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = Adam(model.parameters(), lr=0.37, weight_decay=0)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "ADAM_net1"
params = {"optimizer":"ADAM", "batch_size":batch_size,
"optimizer_learning_rate": 0.37, "optimizer_weight_decay": 0,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_ADAM2(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = Adam(model.parameters(), lr=0.37, weight_decay=0.01)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "ADAM_net2"
params = {"optimizer":"ADAM", "batch_size":batch_size,
"optimizer_learning_rate": 0.37, "optimizer_weight_decay": 0.01,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_ADAM3(device, train_file_loader, val_file_loader):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
model = Unet(n_channels=1, n_classes=2).to(device)
optimizer = Adam(model.parameters(), lr=0.37, weight_decay=0.00001)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
nEpoch = 100
batch_size = 20
net_name = "ADAM_net3"
params = {"optimizer":"ADAM", "batch_size":batch_size,
"optimizer_learning_rate": 0.37, "optimizer_weight_decay": 0.00001,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet"}
run[f"{net_name}/parameters"] = params
net_train(device = device,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
batch_size = batch_size,
train_file_loader = train_file_loader,
val_file_loader = val_file_loader,
run = run,
path = "/home/tyson/networks/",
scheduler = None)
def net_starter(nets, device, train_file_loader, val_file_loader):
for net in nets:
pr1 = mp.Process(target=net, args = (device,
train_file_loader,
val_file_loader,))
pr1.start()
pr1.join()
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# Set up the datasets
np.random.seed(42)
#val_set, train_set = torch.utils.data.random_split(
# random.sample(range(1, 226 + 1), 200), [26, 200],
# generator=torch.Generator().manual_seed(42))
val_set, train_set = torch.utils.data.random_split(
random.sample(range(1, 226 + 1), 50), [10, 40],
generator=torch.Generator().manual_seed(42))
train_load_file = load_whole_data(path = "/home/tyson/model_data",
ind = train_set)
#train_load_file = load_whole_data(path = "C:/Users/Marc/Desktop/model_data",
# ind = train_set)
train_file_loader = torch.utils.data.DataLoader(train_load_file,
batch_size=1,
shuffle=True,
num_workers=0)
val_load_file = load_whole_data(path = "/home/tyson/model_data",
ind = val_set)
#val_load_file = load_whole_data(path = "C:/Users/Marc/Desktop/model_data",
# ind = val_set)
val_file_loader = torch.utils.data.DataLoader(val_load_file,
batch_size=1,
shuffle=True,
num_workers=0)
core = torch.cuda.device_count()
networks = [net_SGD1, net_SGD2, net_SGD3, net_SGD4, net_SGD5, net_SGD6,
net_SGD7, net_SGD8, net_SGD9, net_ADAM1, net_ADAM2, net_ADAM3]
cuda_dict = dict()
for i in range(core):
cuda_dict[i] = []
for i in range(len(networks)):
cuda_dict[i % core].append(networks[i])
pres = []
for i in range(core):
pres.append(mp.Process(target=net_starter, args = (cuda_dict.get(i),
f"cuda:{i}",
train_file_loader,
val_file_loader,)))
for process in pres:
process.start()
for process in pres:
process.join()
| 32.663283
| 82
| 0.558226
| 2,126
| 19,304
| 4.80809
| 0.082314
| 0.078263
| 0.058697
| 0.06486
| 0.887889
| 0.880747
| 0.864312
| 0.852769
| 0.852769
| 0.852769
| 0
| 0.025379
| 0.330501
| 19,304
| 590
| 83
| 32.718644
| 0.765552
| 0.026419
| 0
| 0.745011
| 0
| 0
| 0.158859
| 0.101363
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028825
| false
| 0.002217
| 0.039911
| 0
| 0.068736
| 0.002217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
577e5419d40e6373e39f8b7f01e95910412bb05c
| 87,105
|
py
|
Python
|
phone_gen/patterns.py
|
tolstislon/phone_gen
|
3e85fa87ac706715f4dd0489774d8f5f618defdb
|
[
"MIT"
] | null | null | null |
phone_gen/patterns.py
|
tolstislon/phone_gen
|
3e85fa87ac706715f4dd0489774d8f5f618defdb
|
[
"MIT"
] | null | null | null |
phone_gen/patterns.py
|
tolstislon/phone_gen
|
3e85fa87ac706715f4dd0489774d8f5f618defdb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Auto-generated file 2022-04-22 09:25:28 UTC
Resource: https://github.com/google/libphonenumber v8.12.47
"""
PATTERNS = {
"info": "libphonenumber v8.12.47",
"data": {
"AC": {
"code": "247",
"pattern": "((6[2-467][\\d]{3}))",
"mobile": "((4[\\d]{4}))",
},
"AD": {
"code": "376",
"pattern": "(([78][\\d]{5}))",
"mobile": "((690[\\d]{6})|([356][\\d]{5}))",
},
"AE": {
"code": "971",
"pattern": "(([2-4679][2-8][\\d]{6}))",
"mobile": "((5[024-68][\\d]{7}))",
},
"AF": {
"code": "93",
"pattern": "((([25][0-8])|([34][0-4])|(6[0-5])[2-9][\\d]{6}))",
"mobile": "((7[\\d]{8}))",
},
"AG": {
"code": "1",
"pattern": "((268(4(6[0-38])|(84))|(56[0-2])[\\d]{4}))",
"mobile": "((268(464)|(7(1[3-9])|([28][\\d])|(3[0246])|(64)|(7[0-689]))[\\d]{4}))",
},
"AI": {
"code": "1",
"pattern": "((264(292)|(4(6[12])|(9[78]))[\\d]{4}))",
"mobile": "((264(235)|(4(69)|(76))|(5(3[6-9])|(8[1-4]))|(7(29)|(72))[\\d]{4}))",
},
"AL": {
"code": "355",
"pattern": "((4505[0-2][\\d]{3})|(([2358][16-9][\\d][2-9])|(4410)[\\d]{4})|(([2358][2-5][2-9])|(4([2-57-9][2-9])|(6[\\d]))[\\d]{5}))",
"mobile": "((6([78][2-9])|(9[\\d])[\\d]{6}))",
},
"AM": {
"code": "374",
"pattern": "((((1[0-25])|(47)[\\d])|(2(2[2-46])|(3[1-8])|(4[2-69])|(5[2-7])|(6[1-9])|(8[1-7]))|(3[12]2)[\\d]{5}))",
"mobile": "(((33)|(4[1349])|(55)|(77)|(88)|(9[13-9])[\\d]{6}))",
},
"AO": {
"code": "244",
"pattern": "((2[\\d]([0134][25-9])|([25-9][\\d])[\\d]{5}))",
"mobile": "((9[1-59][\\d]{7}))",
},
"AR": {
"code": "54",
"pattern": "((3888[013-9][\\d]{5})|((29(54)|(66))|(3(777)|(865))[2-8][\\d]{5})|(3(7(1[15])|(81))|(8(21)|(4[16])|(69)|(9[12]))[46][\\d]{5})|((2(2(2[59])|(44)|(52))|(3(26)|(44))|(473)|(9([07]2)|(2[26])|(34)|(46)))|(3327)[45][\\d]{5})|((2(284)|(302)|(657)|(920))|(3(4(8[27])|(92))|(541)|(755)|(878))[2-7][\\d]{5})|((2((26)|(62)2)|(32[03])|(477)|(9(42)|(83)))|(3(329)|(4([47]6)|(62)|(89))|(564))[2-6][\\d]{5})|(((11[1-8])|(670)[\\d])|(2(2(0[45])|(1[2-6])|(3[3-6]))|(3([06]4)|(7[45]))|(494)|(6(04)|(1[2-8])|([36][45])|(4[3-6]))|(80[45])|(9([17][4-6])|([48][45])|(9[3-6])))|(3(364)|(4(1[2-7])|([235][4-6])|(84))|(5(1[2-8])|([38][4-6]))|(6(2[45])|(44))|(7[069][45])|(8([03][45])|([17][2-6])|([58][3-6])))[\\d]{6})|(2(2(21)|(4[23])|(6[145])|(7[1-4])|(8[356])|(9[267]))|(3(16)|(3[13-8])|(43)|(5[346-8])|(9[3-5]))|(475)|(6(2[46])|(4[78])|(5[1568]))|(9(03)|(2[1457-9])|(3[1356])|(4[08])|([56][23])|(82))4[\\d]{5})|((2(2(57)|(81))|(3(24)|(46)|(92))|(9(01)|(23)|(64)))|(3(4(42)|(71))|(5(25)|(37)|(4[347])|(71))|(7(18)|(5[17])))[3-6][\\d]{5})|((2(2(02)|(2[3467])|(4[156])|(5[45])|(6[6-8])|(91))|(3(1[47])|(25)|([45][25])|(96))|(47[48])|(625)|(932))|(3(38[2578])|(4(0[0-24-9])|(3[78])|(4[457])|(58)|(6[03-9])|(72)|(83)|(9[136-8]))|(5(2[124])|([368][23])|(4[2689])|(7[2-6]))|(7(16)|(2[15])|(3[145])|(4[13])|(5[468])|(7[2-5])|(8[26]))|(8(2[5-7])|(3[278])|(4[3-5])|(5[78])|(6[1-378])|([78]7)|(94)))[4-6][\\d]{5}))",
"mobile": "((93888[013-9][\\d]{5})|(9(29(54)|(66))|(3(777)|(865))[2-8][\\d]{5})|(93(7(1[15])|(81))|(8(21)|(4[16])|(69)|(9[12]))[46][\\d]{5})|(9(2(2(2[59])|(44)|(52))|(3(26)|(44))|(473)|(9([07]2)|(2[26])|(34)|(46)))|(3327)[45][\\d]{5})|(9(2(284)|(302)|(657)|(920))|(3(4(8[27])|(92))|(541)|(755)|(878))[2-7][\\d]{5})|(9(2((26)|(62)2)|(32[03])|(477)|(9(42)|(83)))|(3(329)|(4([47]6)|(62)|(89))|(564))[2-6][\\d]{5})|((675[\\d])|(9(11[1-8][\\d])|(2(2(0[45])|(1[2-6])|(3[3-6]))|(3([06]4)|(7[45]))|(494)|(6(04)|(1[2-8])|([36][45])|(4[3-6]))|(80[45])|(9([17][4-6])|([48][45])|(9[3-6])))|(3(364)|(4(1[2-7])|([235][4-6])|(84))|(5(1[2-8])|([38][4-6]))|(6(2[45])|(44))|(7[069][45])|(8([03][45])|([17][2-6])|([58][3-6]))))[\\d]{6})|(92(2(21)|(4[23])|(6[145])|(7[1-4])|(8[356])|(9[267]))|(3(16)|(3[13-8])|(43)|(5[346-8])|(9[3-5]))|(475)|(6(2[46])|(4[78])|(5[1568]))|(9(03)|(2[1457-9])|(3[1356])|(4[08])|([56][23])|(82))4[\\d]{5})|(9(2(2(57)|(81))|(3(24)|(46)|(92))|(9(01)|(23)|(64)))|(3(4(42)|(71))|(5(25)|(37)|(4[347])|(71))|(7(18)|(5[17])))[3-6][\\d]{5})|(9(2(2(02)|(2[3467])|(4[156])|(5[45])|(6[6-8])|(91))|(3(1[47])|(25)|([45][25])|(96))|(47[48])|(625)|(932))|(3(38[2578])|(4(0[0-24-9])|(3[78])|(4[457])|(58)|(6[03-9])|(72)|(83)|(9[136-8]))|(5(2[124])|([368][23])|(4[2689])|(7[2-6]))|(7(16)|(2[15])|(3[145])|(4[13])|(5[468])|(7[2-5])|(8[26]))|(8(2[5-7])|(3[278])|(4[3-5])|(5[78])|(6[1-378])|([78]7)|(94)))[4-6][\\d]{5}))",
},
"AS": {
"code": "1",
"pattern": "((6846(22)|(33)|(44)|(55)|(77)|(88)|(9[19])[\\d]{4}))",
"mobile": "((684(2(48)|(5[2468])|(72))|(7(3[13])|(70)|(82))[\\d]{4}))",
},
"AT": {
"code": "43",
"pattern": "((1(11[\\d])|([2-9][\\d]{3:11}))|((316)|(463)|((51)|(66)|(73)2)[\\d]{3:10})|((2(1[467])|(2[13-8])|(5[2357])|(6[1-46-8])|(7[1-8])|(8[124-7])|(9[1458]))|(3(1[1-578])|(3[23568])|(4[5-7])|(5[1378])|(6[1-38])|(8[3-68]))|(4(2[1-8])|(35)|(7[1368])|(8[2457]))|(5(2[1-8])|(3[357])|(4[147])|(5[12578])|(6[37]))|(6(13)|(2[1-47])|(4[135-8])|(5[468]))|(7(2[1-8])|(35)|(4[13478])|(5[68])|(6[16-8])|(7[1-6])|(9[45]))[\\d]{4:10}))",
"mobile": "((6(5[0-3579])|(6[013-9])|([7-9][\\d])[\\d]{4:10}))",
},
"AU": {
"code": "61",
"pattern": "((((2([0-26-9][\\d])|(3[0-8])|(4[02-9])|(5[0135-9]))|(3([0-3589][\\d])|(4[0-578])|(6[1-9])|(7[0-35-9]))|(7([013-57-9][\\d])|(2[0-8]))[\\d]{3})|(8(51(0(0[03-9])|([12479][\\d])|(3[2-9])|(5[0-8])|(6[1-9])|(8[0-7]))|(1([0235689][\\d])|(1[0-69])|(4[0-589])|(7[0-47-9]))|(2(0[0-79])|([18][13579])|(2[14-9])|(3[0-46-9])|([4-6][\\d])|(7[89])|(9[0-4])))|((6[0-8])|([78][\\d])[\\d]{3})|(9([02-9][\\d]{3})|(1(([0-58][\\d])|(6[0135-9])[\\d])|(7(0[0-24-9])|([1-9][\\d]))|(9([0-46-9][\\d])|(5[0-79])))))[\\d]{3}))",
"mobile": "((4(83[0-38])|(93[0-6])[\\d]{5})|(4([0-3][\\d])|(4[047-9])|(5[0-25-9])|(6[06-9])|(7[02-9])|(8[0-24-9])|(9[0-27-9])[\\d]{6}))",
},
"AW": {
"code": "297",
"pattern": "((5(2[\\d])|(8[1-9])[\\d]{4}))",
"mobile": "(((290)|(5[69][\\d])|(6([03]0)|(22)|(4[0-2])|([69][\\d]))|(7([34][\\d])|(7[07]))|(9(6[45])|(9[4-8]))[\\d]{4}))",
},
"AX": {
"code": "358",
"pattern": "((18[1-8][\\d]{3:6}))",
"mobile": "((4946[\\d]{2:6})|((4[0-8])|(50)[\\d]{4:8}))",
},
"AZ": {
"code": "994",
"pattern": "(((2[12]428)|(3655[02])[\\d]{4})|((2(22[0-79])|(63[0-28]))|(3654)[\\d]{5})|(((1[28])|(46)[\\d])|(2([014-6]2)|([23]3))[\\d]{6}))",
"mobile": "((36554[\\d]{4})|(([16]0)|(4[04])|(5[015])|(7[07])|(99)[\\d]{7}))",
},
"BA": {
"code": "387",
"pattern": "(((3([05-79][2-9])|(1[4579])|([23][24-9])|(4[2-4689])|(8[2457-9]))|(49[2-579])|(5(0[2-49])|([13][2-9])|([268][2-4679])|(4[4689])|(5[2-79])|(7[2-69])|(9[2-4689]))[\\d]{5}))",
"mobile": "((6040[\\d]{5})|(6(03)|([1-356])|(44)|(7[\\d])[\\d]{6}))",
},
"BB": {
"code": "1",
"pattern": "((246521[0369][\\d]{3})|(246(2(2[78])|(7[0-4]))|(4(1[024-6])|(2[\\d])|(3[2-9]))|(5(20)|([34][\\d])|(54)|(7[1-3]))|(6(2[\\d])|(38))|(7[35]7)|(9(1[89])|(63))[\\d]{4}))",
"mobile": "((246((2([3568][\\d])|(4[0-57-9]))|(3(5[2-9])|(6[0-6]))|(4(46)|(5[\\d]))|(69[5-7])|(8([2-5][\\d])|(83))[\\d])|(52(1[147])|(20))[\\d]{3}))",
},
"BD": {
"code": "880",
"pattern": "(((4(31[\\d][\\d])|(423))|(5222)[\\d]{3}([\\d]{2})?)|(8332[6-9][\\d][\\d])|((3(03[56])|(224))|(4(22[25])|(653))[\\d]{3:4})|((3(42[47])|(529)|(823))|(4(027)|(525)|(65(28)|(8)))|(562)|(6257)|(7(1(5[3-5])|(6[12])|(7[156])|(89))|(22[589]56)|(32)|(42675)|(52([25689](56)|(8))|([347]8))|(71(6[1267])|(75)|(89))|(92374))|(82(2[59])|(32)56)|(9(03[23]56)|(23(256)|(373))|(31)|(5(1)|(2[4589]56)))[\\d]{3})|((3(02[348])|(22[35])|(324)|(422))|(4(22[67])|(32[236-9])|(6(2[46])|(5[57]))|(953))|(5526)|(6(024)|(6655))|(81)[\\d]{4:5})|((2(7(1[0-267])|(2[0-289])|(3[0-29])|(4[01])|(5[1-3])|(6[013])|(7[0178])|(91))|(8(0[125])|(1[1-6])|(2[0157-9])|(3[1-69])|(41)|(6[1-35])|(7[1-5])|(8[1-8])|(9[0-6]))|(9(0[0-2])|(1[0-4])|(2[568])|(3[3-6])|(5[5-7])|(6[0136-9])|(7[0-7])|(8[014-9])))|(3(0(2[025-79])|(3[2-4]))|(181)|(22[12])|(32[2356])|(824))|(4(02[09])|(22[348])|(32[045])|(523)|(6(27)|(54)))|(666(22)|(53))|(7(22[57-9])|(42[56])|(82[35])8)|(8(0[124-9])|(2(181)|(2[02-4679]8))|(4[12])|([5-7]2))|(9([04]2)|(2(2)|(328))|(81))[\\d]{4})|((2(222)|([45][\\d])[\\d])|(3(1(2[5-7])|([5-7]))|(425)|(822))|(4(033)|(1[\\d])|([257]1)|(332)|(4(2[246])|(5[25]))|(6(2[35])|(56)|(62))|(8(23)|(54))|(92[2-5]))|(5(02[03489])|(22[457])|(32[35-79])|(42[46])|(6([18])|(53))|(724)|(826))|(6(023)|(2(2[2-5])|(5[3-5])|(8))|(32[3478])|(42[34])|(52[47])|(6([18])|(6(2[34])|(5[24])))|([78]2[2-5])|(92[2-6]))|(7(02)|(21[\\d])|([3-589]1)|(6[12])|(72[24]))|(8(217)|(3[12])|([5-7]1))|(9[24]1)[\\d]{5})|(((3[2-8])|(5[2-57-9])|(6[03-589])1)|(4[4689][18])[\\d]{5})|([59]1[\\d]{5}))",
"mobile": "(((1[13-9][\\d])|(644)[\\d]{7})|((3[78])|(44)|(66)[02-9][\\d]{7}))",
},
"BE": {
"code": "32",
"pattern": "((80[2-8][\\d]{5})|((1[0-69])|([23][2-8])|(4[23])|(5[\\d])|(6[013-57-9])|(71)|(8[1-79])|(9[2-4])[\\d]{6}))",
"mobile": "((4[5-9][\\d]{7}))",
},
"BF": {
"code": "226",
"pattern": "((2(0(49)|(5[23])|(6[5-7])|(9[016-9]))|(4(4[569])|(5[4-6])|(6[5-7])|(7[0179]))|(5([34][\\d])|(50)|(6[5-7]))[\\d]{4}))",
"mobile": "(((0[125-7])|(5[1-8])|([67][\\d])[\\d]{6}))",
},
"BG": {
"code": "359",
"pattern": "((2[\\d]{5:7})|((43[1-6])|(70[1-9])[\\d]{4:5})|(([36][\\d])|(4[124-7])|([57][1-9])|(8[1-6])|(9[1-7])[\\d]{5:6}))",
"mobile": "(((43[07-9])|(99[69][\\d])[\\d]{5})|((8[7-9])|(98)[\\d]{7}))",
},
"BH": {
"code": "973",
"pattern": "(((1(3[1356])|(6[0156])|(7[\\d])[\\d])|(6(1[16][\\d])|(500)|(6(0[\\d])|(3[12])|(44)|(7[7-9])|(88))|(9[69][69]))|(7(1(11)|(78))|(7[\\d][\\d]))[\\d]{4}))",
"mobile": "(((3([1-79][\\d])|(8[0-47-9])[\\d])|(6(3(00)|(33)|(6[16]))|(6(3[03-9])|([69][\\d])|(7[0-6])))[\\d]{4}))",
},
"BI": {
"code": "257",
"pattern": "(((22)|(31)[\\d]{6}))",
"mobile": "(((29)|(6[1257-9])|(7[125-9])[\\d]{6}))",
},
"BJ": {
"code": "229",
"pattern": "((2(02)|(1[037])|(2[45])|(3[68])[\\d]{5}))",
"mobile": "(((40)|(5[1-8])|(6[\\d])|(9[013-9])[\\d]{6}))",
},
"BL": {
"code": "590",
"pattern": "((590(2[7-9])|(5[12])|(87)[\\d]{4}))",
"mobile": "((69(0[\\d][\\d])|(1(2[2-9])|(3[0-5]))[\\d]{4}))",
},
"BM": {
"code": "1",
"pattern": "((441([46][\\d][\\d])|(5(4[\\d])|(60)|(89))[\\d]{4}))",
"mobile": "((441([2378][\\d])|(5[0-39])[\\d]{5}))",
},
"BN": {
"code": "673",
"pattern": "((22[0-7][\\d]{4})|((2[013-9])|([34][\\d])|(5[0-25-9])[\\d]{5}))",
"mobile": "(((22[89])|([78][\\d][\\d])[\\d]{4}))",
},
"BO": {
"code": "591",
"pattern": "(((2(2[\\d][\\d])|(5(11)|([258][\\d])|(9[67]))|(6(12)|(2[\\d])|(9[34]))|(8(2[34])|(39)|(62)))|(3(3[\\d][\\d])|(4(6[\\d])|(8[24]))|(8(25)|(42)|(5[257])|(86)|(9[25]))|(9([27][\\d])|(3[2-4])|(4[248])|(5[24])|(6[2-6])))|(4(4[\\d][\\d])|(6(11)|([24689][\\d])|(72)))[\\d]{4}))",
"mobile": "(([67][\\d]{7}))",
},
"BQ": {
"code": "599",
"pattern": "(((318[023])|(41(6[023])|(70))|(7(1[578])|(2[05])|(50)[\\d])[\\d]{3}))",
"mobile": "(((31(8[14-8])|(9[14578]))|(416[14-9])|(7(0[01])|(7[07])|(8[\\d])|(9[056])[\\d])[\\d]{3}))",
},
"BR": {
"code": "55",
"pattern": "((([14689][1-9])|(2[12478])|(3[1-578])|(5[13-5])|(7[13-579])[2-5][\\d]{7}))",
"mobile": "((([14689][1-9])|(2[12478])|(3[1-578])|(5[13-5])|(7[13-579])(7)|(9[\\d])[\\d]{7}))",
},
"BS": {
"code": "1",
"pattern": "((242(3(02)|([236][1-9])|(4[0-24-9])|(5[0-68])|(7[347])|(8[0-4])|(9[2-467]))|(461)|(502)|(6(0[1-4])|(12)|(2[013])|([45]0)|(7[67])|(8[78])|(9[89]))|(7(02)|(88))[\\d]{4}))",
"mobile": "((242(3(5[79])|(7[56])|(95))|(4([23][1-9])|(4[1-35-9])|(5[1-8])|(6[2-8])|(7[\\d])|(81))|(5(2[45])|(3[35])|(44)|(5[1-46-9])|(65)|(77))|(6[34]6)|(7(27)|(38))|(8(0[1-9])|(1[02-9])|(2[\\d])|([89]9))[\\d]{4}))",
},
"BT": {
"code": "975",
"pattern": "(((2[3-6])|([34][5-7])|(5[236])|(6[2-46])|(7[246])|(8[2-4])[\\d]{5}))",
"mobile": "(((1[67])|(77)[\\d]{6}))",
},
"BW": {
"code": "267",
"pattern": "(((2(4[0-48])|(6[0-24])|(9[0578]))|(3(1[0-35-9])|(55)|([69][\\d])|(7[013]))|(4(6[03])|(7[1267])|(9[0-5]))|(5(3[03489])|(4[0489])|(7[1-47])|(88)|(9[0-49]))|(6(2[1-35])|(5[149])|(8[067]))[\\d]{4}))",
"mobile": "(((321)|(7([1-7][\\d])|(8[01]))[\\d]{5}))",
},
"BY": {
"code": "375",
"pattern": "(((1(5(1[1-5])|([24][\\d])|(6[2-4])|(9[1-7]))|(6([235][\\d])|(4[1-7]))|(7[\\d][\\d]))|(2(1([246][\\d])|(3[0-35-9])|(5[1-9]))|(2([235][\\d])|(4[0-8]))|(3([26][\\d])|(3[02-79])|(4[024-7])|(5[03-7])))[\\d]{5}))",
"mobile": "(((2(5[5-79])|(9[1-9]))|((33)|(44)[\\d])[\\d]{6}))",
},
"BZ": {
"code": "501",
"pattern": "(((2([02][\\d])|(36)|([68]0))|([3-58]([02][\\d])|([68]0))|(7([02][\\d])|(32)|([68]0))[\\d]{4}))",
"mobile": "((6[0-35-7][\\d]{5}))",
},
"CA": {
"code": "1",
"pattern": "(((2(04)|([23]6)|([48]9)|(50))|(3(06)|(43)|(6[578]))|(4(03)|(1[68])|(3[178])|(50)|(74))|(5(06)|(1[49])|(48)|(79)|(8[17]))|(6(04)|(13)|(39)|(47)|(72))|(7(0[59])|(78)|(8[02]))|(8([06]7)|(19)|(25)|(73))|(90[25])[2-9][\\d]{6}))",
"mobile": "(((2(04)|([23]6)|([48]9)|(50))|(3(06)|(43)|(6[578]))|(4(03)|(1[68])|(3[178])|(50)|(74))|(5(06)|(1[49])|(48)|(79)|(8[17]))|(6(04)|(13)|(39)|(47)|(72))|(7(0[59])|(78)|(8[02]))|(8([06]7)|(19)|(25)|(73))|(90[25])[2-9][\\d]{6}))",
},
"CC": {
"code": "61",
"pattern": "((8(51(0(02)|(31)|(60)|(89))|(1(18)|(76))|(223))|(91(0(1[0-2])|(29))|(1([28]2)|(50)|(79))|(2(10)|(64))|(3([06]8)|(22))|(4[29]8)|(62[\\d])|(70[23])|(959))[\\d]{3}))",
"mobile": "((4(83[0-38])|(93[0-6])[\\d]{5})|(4([0-3][\\d])|(4[047-9])|(5[0-25-9])|(6[06-9])|(7[02-9])|(8[0-24-9])|(9[0-27-9])[\\d]{6}))",
},
"CD": {
"code": "243",
"pattern": "((12[\\d]{7})|([1-6][\\d]{6}))",
"mobile": "((88[\\d]{5})|((8[0-59])|(9[017-9])[\\d]{7}))",
},
"CF": {
"code": "236",
"pattern": "((2[12][\\d]{6}))",
"mobile": "((7[02457][\\d]{6}))",
},
"CG": {
"code": "242",
"pattern": "((222[1-589][\\d]{5}))",
"mobile": "((026(1[0-5])|(6[6-9])[\\d]{4})|(0([14-6][\\d][\\d])|(2(40)|(5[5-8])|(6[07-9]))[\\d]{5}))",
},
"CH": {
"code": "41",
"pattern": "(((2[12467])|(3[1-4])|(4[134])|(5[256])|(6[12])|([7-9]1)[\\d]{7}))",
"mobile": "((7[35-9][\\d]{7}))",
},
"CI": {
"code": "225",
"pattern": "((2([15][\\d]{3})|(7(2(0[23])|(1[2357])|([23][45])|(4[3-5]))|(3(06)|(1[69])|([2-6]7)))[\\d]{5}))",
"mobile": "((0704[0-7][\\d]{5})|(0([15][\\d][\\d])|(7(0[0-37-9])|([4-9][7-9]))[\\d]{6}))",
},
"CK": {
"code": "682",
"pattern": "(((2[\\d])|(3[13-7])|(4[1-5])[\\d]{3}))",
"mobile": "(([578][\\d]{4}))",
},
"CL": {
"code": "56",
"pattern": "((2(1982[0-6])|(3314[05-9])[\\d]{3})|((2(1(160)|(962))|(3(2[\\d][\\d])|(3([034][\\d])|(1[0-35-9])|(2[1-9])|(5[0-2]))|(600))|(6469))|(80[1-9][\\d][\\d])|(9(3([0-57-9][\\d][\\d])|(6(0[02-9])|([1-9][\\d])))|(6([0-8][\\d][\\d])|(9([02-79][\\d])|(1[05-9])))|(7[1-9][\\d][\\d])|(9([03-9][\\d][\\d])|(1([0235-9][\\d])|(4[0-24-9]))|(2([0-79][\\d])|(8[0-46-9]))))[\\d]{4})|((22)|(3[2-5])|([47][1-35])|(5[1-3578])|(6[13-57])|(8[1-9])|(9[2458])[\\d]{7}))",
"mobile": "((2(1982[0-6])|(3314[05-9])[\\d]{3})|((2(1(160)|(962))|(3(2[\\d][\\d])|(3([034][\\d])|(1[0-35-9])|(2[1-9])|(5[0-2]))|(600))|(6469))|(80[1-9][\\d][\\d])|(9(3([0-57-9][\\d][\\d])|(6(0[02-9])|([1-9][\\d])))|(6([0-8][\\d][\\d])|(9([02-79][\\d])|(1[05-9])))|(7[1-9][\\d][\\d])|(9([03-9][\\d][\\d])|(1([0235-9][\\d])|(4[0-24-9]))|(2([0-79][\\d])|(8[0-46-9]))))[\\d]{4})|((22)|(3[2-5])|([47][1-35])|(5[1-3578])|(6[13-57])|(8[1-9])|(9[2458])[\\d]{7}))",
},
"CM": {
"code": "237",
"pattern": "((2(22)|(33)[\\d]{6}))",
"mobile": "(((24[23])|(6[5-9][\\d])[\\d]{6}))",
},
"CN": {
"code": "86",
"pattern": "(((10([02-79][\\d][\\d])|([18](0[1-9])|([1-9][\\d])))|(21([18](0[1-9])|([1-9][\\d]))|([2-79][\\d][\\d]))[\\d]{5})|((43[35])|(754)[\\d]{7:8})|(8(078[\\d]{7})|(51[\\d]{7:8}))|((10)|((2)|(85)1)|(43[35])|(754)(100[\\d][\\d])|(95[\\d]{3:4}))|((2[02-57-9])|(3(11)|(7[179]))|(4([15]1)|(3[12]))|(5(1[\\d])|(2[37])|(3[12])|(51)|(7[13-79])|(9[15]))|(7([39]1)|(5[57])|(6[09]))|(8(71)|(98))([02-8][\\d]{7})|(1(0(0[\\d][\\d]([\\d]{3})?)|([1-9][\\d]{5}))|([1-9][\\d]{6}))|(9([0-46-9][\\d]{6})|(5[\\d]{3}([\\d]([\\d]{2})?)?)))|((3(1[02-9])|(35)|(49)|(5[\\d])|(7[02-68])|(9[1-68]))|(4(1[02-9])|(2[179])|(3[46-9])|(5[2-9])|(6[47-9])|(7[\\d])|(8[23]))|(5(3[03-9])|(4[36])|(5[02-9])|(6[1-46])|(7[028])|(80)|(9[2-46-9]))|(6(3[1-5])|(6[0238])|(9[12]))|(7(01)|([17][\\d])|(2[248])|(3[04-9])|(4[3-6])|(5[0-3689])|(6[2368])|(9[02-9]))|(8(1[236-8])|(2[5-7])|(3[\\d])|(5[2-9])|(7[02-9])|(8[36-8])|(9[1-7]))|(9(0[1-3689])|(1[1-79])|([379][\\d])|(4[13])|(5[1-5]))([02-8][\\d]{6})|(1(0(0[\\d][\\d]([\\d]{2})?)|([1-9][\\d]{4}))|([1-9][\\d]{5}))|(9([0-46-9][\\d]{5})|(5[\\d]{3:5}))))",
"mobile": "((1740[0-5][\\d]{6})|(1([38][\\d])|(4[57])|(5[0-35-9])|(6[25-7])|(7[0-35-8])|(9[0135-9])[\\d]{8}))",
},
"CO": {
"code": "57",
"pattern": "((60[124-8][2-9][\\d]{6})|([124-8][2-9][\\d]{6}))",
"mobile": "((3333(0(0[\\d])|(1[0-5]))|([4-9][\\d][\\d])[\\d]{3})|((3(24[1-9])|(3(00)|(3[0-24-9])))|(9101)[\\d]{6})|(3(0[0-5])|(1[\\d])|(2[0-3])|(5[01])|(70)[\\d]{7}))",
},
"CR": {
"code": "506",
"pattern": "((210[7-9][\\d]{4})|(2([024-7][\\d])|(1[1-9])[\\d]{5}))",
"mobile": "(((3005[\\d])|(6500[01])[\\d]{3})|((5[07])|(6[0-4])|(7[0-3])|(8[3-9])[\\d]{6}))",
},
"CU": {
"code": "53",
"pattern": "(((3[23])|(48)[\\d]{4:6})|((31)|(4[36])|(8(0[25])|(78)[\\d])[\\d]{6})|((2[1-4])|(4[1257])|(7[\\d])[\\d]{5:6}))",
"mobile": "((5[\\d]{7}))",
},
"CV": {
"code": "238",
"pattern": "((2(2[1-7])|(3[0-8])|(4[12])|(5[1256])|(6[\\d])|(7[1-3])|(8[1-5])[\\d]{4}))",
"mobile": "(((36)|(5[1-389])|(9[\\d])[\\d]{5}))",
},
"CW": {
"code": "599",
"pattern": "((9(4(3[0-5])|(4[14])|(6[\\d]))|(50[\\d])|(7(2[014])|(3[02-9])|(4[4-9])|(6[357])|(77)|(8[7-9]))|(8(3[39])|([46][\\d])|(7[01])|(8[57-9]))[\\d]{4}))",
"mobile": "((953[01][\\d]{4})|(9(5[12467])|(6[5-9])[\\d]{5}))",
},
"CX": {
"code": "61",
"pattern": "((8(51(0(01)|(30)|(59)|(88))|(1(17)|(46)|(75))|(2(22)|(35)))|(91(00[6-9])|(1([28]1)|(49)|(78))|(2(09)|(63))|(3(12)|(26)|(75))|(4(56)|(97))|(64[\\d])|(7(0[01])|(1[0-2]))|(958))[\\d]{3}))",
"mobile": "((4(83[0-38])|(93[0-6])[\\d]{5})|(4([0-3][\\d])|(4[047-9])|(5[0-25-9])|(6[06-9])|(7[02-9])|(8[0-24-9])|(9[0-27-9])[\\d]{6}))",
},
"CY": {
"code": "357",
"pattern": "((2[2-6][\\d]{6}))",
"mobile": "((9[4-79][\\d]{6}))",
},
"CZ": {
"code": "420",
"pattern": "(((2[\\d])|(3[1257-9])|(4[16-9])|(5[13-9])[\\d]{7}))",
"mobile": "(((60[1-8])|(7(0[2-5])|([2379][\\d]))[\\d]{6}))",
},
"DE": {
"code": "49",
"pattern": "((32[\\d]{9:11})|(49[2-6][\\d]{10})|(49[0-7][\\d]{3:9})|(([34]0)|([68]9)[\\d]{3:13})|((2(0[1-689])|([1-3569][\\d])|(4[0-8])|(7[1-7])|(8[0-7]))|(3([3569][\\d])|(4[0-79])|(7[1-7])|(8[1-8]))|(4(1[02-9])|([2-48][\\d])|(5[0-6])|(6[0-8])|(7[0-79]))|(5(0[2-8])|([124-6][\\d])|([38][0-8])|([79][0-7]))|(6(0[02-9])|([1-358][\\d])|([47][0-8])|(6[1-9]))|(7(0[2-8])|(1[1-9])|([27][0-7])|(3[\\d])|([4-6][0-8])|(8[0-5])|(9[013-7]))|(8(0[2-9])|(1[0-79])|(2[\\d])|(3[0-46-9])|(4[0-6])|(5[013-9])|(6[1-8])|(7[0-8])|(8[0-24-6]))|(9(0[6-9])|([1-4][\\d])|([589][0-7])|(6[0-8])|(7[0-467]))[\\d]{3:12}))",
"mobile": "((15[0-25-9][\\d]{8})|(1(6[023])|(7[\\d])[\\d]{7:8}))",
},
"DJ": {
"code": "253",
"pattern": "((2(1[2-5])|(7[45])[\\d]{5}))",
"mobile": "((77[\\d]{6}))",
},
"DK": {
"code": "45",
"pattern": "((([2-7][\\d])|(8[126-9])|(9[1-46-9])[\\d]{6}))",
"mobile": "((([2-7][\\d])|(8[126-9])|(9[1-46-9])[\\d]{6}))",
},
"DM": {
"code": "1",
"pattern": "((767(2(55)|(66))|(4(2[01])|(4[0-25-9]))|(50[0-4])[\\d]{4}))",
"mobile": "((767(2([2-4689]5)|(7[5-7]))|(31[5-7])|(61[1-8])|(70[1-6])[\\d]{4}))",
},
"DO": {
"code": "1",
"pattern": "((8([04]9[2-9][\\d][\\d])|(29(2([0-59][\\d])|(6[04-9])|(7[0-27])|(8[0237-9]))|(3([0-35-9][\\d])|(4[7-9]))|([45][\\d][\\d])|(6([0-27-9][\\d])|([3-5][1-9])|(6[0135-8]))|(7(0[013-9])|([1-37][\\d])|(4[1-35689])|(5[1-4689])|(6[1-57-9])|(8[1-79])|(9[1-8]))|(8(0[146-9])|(1[0-48])|([248][\\d])|(3[1-79])|(5[01589])|(6[013-68])|(7[124-8])|(9[0-8]))|(9([0-24][\\d])|(3[02-46-9])|(5[0-79])|(60)|(7[0169])|(8[57-9])|(9[02-9])))[\\d]{4}))",
"mobile": "((8[024]9[2-9][\\d]{6}))",
},
"DZ": {
"code": "213",
"pattern": "((9619[\\d]{5})|((1[\\d])|(2[013-79])|(3[0-8])|(4[013-689])[\\d]{6}))",
"mobile": "(((5(4[0-29])|(5[\\d])|(6[0-2]))|(6([569][\\d])|(7[0-6]))|(7[7-9][\\d])[\\d]{6}))",
},
"EC": {
"code": "593",
"pattern": "(([2-7][2-7][\\d]{6}))",
"mobile": "((964[0-2][\\d]{5})|(9(39)|([57][89])|(6[0-36-9])|([89][\\d])[\\d]{6}))",
},
"EE": {
"code": "372",
"pattern": "(((3[23589])|(4[3-8])|(6[\\d])|(7[1-9])|(88)[\\d]{5}))",
"mobile": "(((5[\\d]{5})|(8(1(0(000)|([3-9][\\d][\\d]))|((1(0[236])|(1[\\d]))|((23)|([3-79][\\d])[\\d])[\\d]))|(2(0(000)|((19)|([2-7][\\d])[\\d]))|((([124-6][\\d])|(3[5-9])[\\d])|(7([679][\\d])|(8[13-9]))|(8([2-6][\\d])|(7[01]))[\\d]))|([349][\\d]{4}))[\\d][\\d])|(5(([02][\\d])|(5[0-478])[\\d])|(1([0-8][\\d])|(95))|(6(4[0-4])|(5[1-589]))[\\d]{3}))",
},
"EG": {
"code": "20",
"pattern": "((13[23][\\d]{6})|((15)|(57)[\\d]{6:7})|((2[2-4])|(3)|(4[05-8])|(5[05])|(6[24-689])|(8[2468])|(9[235-7])[\\d]{7}))",
"mobile": "((1[0-25][\\d]{8}))",
},
"EH": {
"code": "212",
"pattern": "((528[89][\\d]{5}))",
"mobile": "(((6([0-79][\\d])|(8[0-247-9]))|(7([017][\\d])|(6[0-367]))[\\d]{6}))",
},
"ER": {
"code": "291",
"pattern": "(((1(1[12568])|([24]0)|(55)|(6[146]))|(8[\\d][\\d])[\\d]{4}))",
"mobile": "(((17[1-3])|(7[\\d][\\d])[\\d]{4}))",
},
"ES": {
"code": "34",
"pattern": "((96906(0[0-8])|(1[1-9])|([2-9][\\d])[\\d][\\d])|(9(69(0[0-57-9])|([1-9][\\d]))|(73([0-8][\\d])|(9[1-9]))[\\d]{4})|((8([1356][\\d])|([28][0-8])|([47][1-9]))|(9([135][\\d])|([268][0-8])|(4[1-9])|(7[124-9]))[\\d]{6}))",
"mobile": "(((590[16]00[\\d])|(9(6906(09)|(10))|(7390[\\d][\\d]))[\\d][\\d])|((6[\\d])|(7[1-48])[\\d]{7}))",
},
"ET": {
"code": "251",
"pattern": "((11667[01][\\d]{3})|((11(1(1[124])|(2[2-7])|(3[1-5])|(5[5-8])|(8[6-8]))|(2(13)|(3[6-8])|(5[89])|(7[05-9])|(8[2-6]))|(3(2[01])|(3[0-289])|(4[1289])|(7[1-4])|(87))|(4(1[69])|(3[2-49])|(4[0-3])|(6[5-8]))|(5(1[578])|(44)|(5[0-4]))|(6(1[78])|(2[69])|(39)|(4[5-7])|(5[1-5])|(6[0-59])|(8[015-8])))|(2(2(11[1-9])|(22[0-7])|(33[\\d])|(44[1467])|(66[1-68]))|(5(11[124-6])|(33[2-8])|(44[1467])|(55[14])|(66[1-3679])|(77[124-79])|(880)))|(3(3(11[0-46-8])|((22)|(55)[0-6])|(33[0134689])|(44[04])|(66[01467]))|(4(44[0-8])|(55[0-69])|(66[0-3])|(77[1-5])))|(4(6(119)|(22[0-24-7])|(33[1-5])|(44[13-69])|(55[14-689])|(660)|(88[1-4]))|(7((11)|(22)[1-9])|(33[13-7])|(44[13-6])|(55[1-689])))|(5(7(227)|(55[05])|((66)|(77)[14-8]))|(8(11[149])|(22[013-79])|(33[0-68])|(44[013-8])|(550)|(66[1-5])|(77[\\d])))[\\d]{4}))",
"mobile": "((9[\\d]{8}))",
},
"FI": {
"code": "358",
"pattern": "(((1[3-79][1-8])|([235689][1-8][\\d])[\\d]{2:6}))",
"mobile": "((4946[\\d]{2:6})|((4[0-8])|(50)[\\d]{4:8}))",
},
"FJ": {
"code": "679",
"pattern": "((603[\\d]{4})|((3[0-5])|(6[25-7])|(8[58])[\\d]{5}))",
"mobile": "((([279][\\d])|(45)|(5[01568])|(8[034679])[\\d]{5}))",
},
"FK": {
"code": "500",
"pattern": "(([2-47][\\d]{4}))",
"mobile": "(([56][\\d]{4}))",
},
"FM": {
"code": "691",
"pattern": "((31(00[67])|(208)|(309)[\\d][\\d])|((3([2357]0[1-9])|(602)|(804)|(905))|((820)|(9[2-6][\\d])[\\d])[\\d]{3}))",
"mobile": "((31(00[67])|(208)|(309)[\\d][\\d])|((3([2357]0[1-9])|(602)|(804)|(905))|((820)|(9[2-7][\\d])[\\d])[\\d]{3}))",
},
"FO": {
"code": "298",
"pattern": "(((20)|([34][\\d])|(8[19])[\\d]{4}))",
"mobile": "((([27][1-9])|(5[\\d])|(91)[\\d]{4}))",
},
"FR": {
"code": "33",
"pattern": "((([1-35][\\d])|(4[1-9])[\\d]{7}))",
"mobile": "(((6([0-24-8][\\d])|(3[0-8])|(9[589]))|(7(00)|([3-9][\\d]))[\\d]{6}))",
},
"GA": {
"code": "241",
"pattern": "(([01]1[\\d]{6}))",
"mobile": "((((0[2-7])|(7[467])[\\d])|(6(0[0-4])|(10)|([256][\\d]))[\\d]{5})|([2-7][\\d]{6}))",
},
"GB": {
"code": "44",
"pattern": "(((1(1(3([0-58][\\d][\\d])|(73[0235]))|(4([0-5][\\d][\\d])|(69[7-9])|(70[0359]))|((5[0-26-9])|([78][0-49])[\\d][\\d])|(6([0-4][\\d][\\d])|(50[0-24-69])))|(2((0[024-9])|(2[3-9])|(3[3-79])|(4[1-689])|([58][02-9])|(6[0-47-9])|(7[013-9])|(9[\\d])[\\d][\\d])|(1([0-7][\\d][\\d])|(8([02][\\d])|(1[0-27-9]))))|((3(0[\\d])|(1[0-8])|([25][02-9])|(3[02-579])|([468][0-46-9])|(7[1-35-79])|(9[2-578]))|(4(0[03-9])|([137][\\d])|([28][02-57-9])|(4[02-69])|(5[0-8])|([69][0-79]))|(5(0[1-35-9])|([16][\\d])|(2[024-9])|(3[015689])|(4[02-9])|(5[03-9])|(7[0-35-9])|(8[0-468])|(9[0-57-9]))|(6(0[034689])|(1[\\d])|(2[0-35689])|([38][013-9])|(4[1-467])|(5[0-69])|(6[13-9])|(7[0-8])|(9[0-24578]))|(7(0[0246-9])|(2[\\d])|(3[0236-8])|(4[03-9])|(5[0-46-9])|(6[013-9])|(7[0-35-9])|(8[024-9])|(9[02-9]))|(8(0[35-9])|(2[1-57-9])|(3[02-578])|(4[0-578])|(5[124-9])|(6[2-69])|(7[\\d])|(8[02-9])|(9[02569]))|(9(0[02-589])|([18][\\d])|(2[02-689])|(3[1-57-9])|(4[2-9])|(5[0-579])|(6[2-47-9])|(7[0-24578])|(9[2-57]))[\\d][\\d]))|(2(0[013478])|(3[0189])|(4[017])|(8[0-46-9])|(9[0-2])[\\d]{3})[\\d]{4})|(1(2(0(46[1-4])|(87[2-9]))|(545[1-79])|(76(2[\\d])|(3[1-8])|(6[1-6]))|(9(7(2[0-4])|(3[2-5]))|(8(2[2-8])|(7[0-47-9])|(8[3-5]))))|(3(6(38[2-5])|(47[23]))|(8(47[04-9])|(64[0157-9])))|(4(044[1-7])|(20(2[23])|(8[\\d]))|(6(0(30)|(5[2-57])|(6[1-8])|(7[2-8]))|(140))|(8(052)|(87[1-3])))|(5(2(4(3[2-79])|(6[\\d]))|(76[\\d]))|(6(26[06-9])|(686)))|(6(06(4[\\d])|(7[4-79]))|(295[5-7])|(35[34][\\d])|(47(24)|(61))|(59(5[08])|(6[67])|(74))|(9(55[0-4])|(77[23])))|(7(26(6[13-9])|(7[0-7]))|((442)|(688)[\\d])|(50(2[0-3])|([3-68]2)|(76)))|(8(27[56][\\d])|(37(5[2-5])|(8[239]))|(843[2-58]))|(9(0(0(6[1-8])|(85))|(52[\\d]))|(3583)|(4(66[1-8])|(9(2[01])|(81)))|(63(23)|(3[1-4]))|(9561))[\\d]{3}))",
"mobile": "((7(457[0-57-9])|(700[01])|(911[028])[\\d]{5})|(7([1-3][\\d][\\d])|(4([0-46-9][\\d])|(5[0-689]))|(5(0[0-8])|([13-9][\\d])|(2[0-35-9]))|(7(0[1-9])|([1-7][\\d])|(8[02-9])|(9[0-689]))|(8([014-9][\\d])|([23][0-8]))|(9([024-9][\\d])|(1[02-9])|(3[0-689]))[\\d]{6}))",
},
"GD": {
"code": "1",
"pattern": "((473(2(3[0-2])|(69))|(3(2[89])|(86))|(4([06]8)|(3[5-9])|(4[0-49])|(5[5-79])|(73)|(90))|(63[68])|(7(58)|(84))|(800)|(938)[\\d]{4}))",
"mobile": "((473(4(0[2-79])|(1[04-9])|(2[0-5])|(58))|(5(2[01])|(3[3-8]))|(901)[\\d]{4}))",
},
"GE": {
"code": "995",
"pattern": "(((3([256][\\d])|(4[124-9])|(7[0-4]))|(4(1[\\d])|(2[2-7])|(3[1-79])|(4[2-8])|(7[239])|(9[1-7]))[\\d]{6}))",
"mobile": "((5((0555)|(1177)[5-9])|(757(7[7-9])|(8[01]))[\\d]{3})|(5(00(0[\\d])|(50))|(11(00)|(1[\\d])|(2[0-4])|(3[01]))|(5200)|(75(00)|([57]5))|(8(0([01][\\d])|(2[0-4]))|(58[89])|(8(55)|(88)))[\\d]{4})|(5(0070)|(11(33)|(51))|([25]222)|(3333)[0-4][\\d]{3})|((5([14]4)|(5[0157-9])|(68)|(7[0147-9])|(9[1-35-9]))|(790)[\\d]{6}))",
},
"GF": {
"code": "594",
"pattern": "((594([023][\\d])|(1[01])|(4[03-9])|(5[6-9])|(6[0-3])|(80)|(9[0-6])[\\d]{4}))",
"mobile": "((694([0-249][\\d])|(3[0-48])[\\d]{4}))",
},
"GG": {
"code": "44",
"pattern": "((1481[25-9][\\d]{5}))",
"mobile": "((7((781)|(839)[\\d])|(911[17])[\\d]{5}))",
},
"GH": {
"code": "233",
"pattern": "((3082[0-5][\\d]{4})|(3(0([237][\\d])|(8[01]))|([167](2[0-6])|(7[\\d])|(80))|(2(2[0-5])|(7[\\d])|(80))|(3(2[0-3])|(7[\\d])|(80))|(4(2[013-9])|(3[01])|(7[\\d])|(80))|(5(2[0-7])|(7[\\d])|(80))|(8(2[0-2])|(7[\\d])|(80))|(9([28]0)|(7[\\d]))[\\d]{5}))",
"mobile": "(((2([0346-8][\\d])|(5[67]))|(5([0457][\\d])|(6[01])|(9[1-9]))[\\d]{6}))",
},
"GI": {
"code": "350",
"pattern": "((21(6[24-7][\\d])|(90[0-2])[\\d]{3})|(2(00)|(2[25])[\\d]{5}))",
"mobile": "(((5[146-8][\\d])|(606)[\\d]{5}))",
},
"GL": {
"code": "299",
"pattern": "(((19)|(3[1-7])|(6[14689])|(70)|(8[14-79])|(9[\\d])[\\d]{4}))",
"mobile": "(([245][\\d]{5}))",
},
"GM": {
"code": "220",
"pattern": "(((4([23][\\d][\\d])|(4(1[024679])|([6-9][\\d])))|(5(5(3[\\d])|(4[0-7]))|(6[67][\\d])|(7(1[04])|(2[035])|(3[58])|(48)))|(8[\\d]{3})[\\d]{3}))",
"mobile": "((([23679][\\d])|(5[0-389])[\\d]{5}))",
},
"GN": {
"code": "224",
"pattern": "((3(0(24)|(3[12])|(4[1-35-7])|(5[13])|(6[189])|([78]1)|(9[1478]))|(1[\\d][\\d])[\\d]{4}))",
"mobile": "((6[0-356][\\d]{7}))",
},
"GP": {
"code": "590",
"pattern": "((590(0[1-68])|(1[0-24-7])|(2[0-68])|(3[1289])|(4[0-24-9])|(5[3-579])|(6[0189])|(7[08])|(8[0-689])|(9[\\d])[\\d]{4}))",
"mobile": "((69(0[\\d][\\d])|(1(2[2-9])|(3[0-5]))[\\d]{4}))",
},
"GQ": {
"code": "240",
"pattern": "((33[0-24-9][\\d][46][\\d]{4})|(3(33)|(5[\\d])[\\d][7-9][\\d]{4}))",
"mobile": "(((222)|(55[\\d])[\\d]{6}))",
},
"GR": {
"code": "30",
"pattern": "((2(1[\\d][\\d])|(2(2[1-46-9])|([36][1-8])|(4[1-7])|(5[1-4])|(7[1-5])|([89][1-9]))|(3(1[\\d])|(2[1-57])|([35][1-3])|(4[13])|(7[1-7])|(8[124-6])|(9[1-79]))|(4(1[\\d])|(2[1-8])|(3[1-4])|(4[13-5])|(6[1-578])|(9[1-5]))|(5(1[\\d])|([29][1-4])|(3[1-5])|(4[124])|(5[1-6]))|(6(1[\\d])|([269][1-6])|(3[1245])|(4[1-7])|(5[13-9])|(7[14])|(8[1-5]))|(7(1[\\d])|(2[1-5])|(3[1-6])|(4[1-7])|(5[1-57])|(6[135])|(9[125-7]))|(8(1[\\d])|(2[1-5])|([34][1-4])|(9[1-57]))[\\d]{6}))",
"mobile": "((68[57-9][\\d]{7})|((69)|(94)[\\d]{8}))",
},
"GT": {
"code": "502",
"pattern": "(([267][2-9][\\d]{6}))",
"mobile": "(([3-5][\\d]{7}))",
},
"GU": {
"code": "1",
"pattern": "((671(3(00)|(3[39])|(4[349])|(55)|(6[26]))|(4(00)|(56)|(7[1-9])|(8[0236-9]))|(5(55)|(6[2-5])|(88))|(6(3[2-578])|(4[24-9])|(5[34])|(78)|(8[235-9]))|(7([0479]7)|(2[0167])|(3[45])|(8[7-9]))|(8([2-57-9]8)|(6[48]))|(9(2[29])|(6[79])|(7[1279])|(8[7-9])|(9[78]))[\\d]{4}))",
"mobile": "((671(3(00)|(3[39])|(4[349])|(55)|(6[26]))|(4(00)|(56)|(7[1-9])|(8[0236-9]))|(5(55)|(6[2-5])|(88))|(6(3[2-578])|(4[24-9])|(5[34])|(78)|(8[235-9]))|(7([0479]7)|(2[0167])|(3[45])|(8[7-9]))|(8([2-57-9]8)|(6[48]))|(9(2[29])|(6[79])|(7[1279])|(8[7-9])|(9[78]))[\\d]{4}))",
},
"GW": {
"code": "245",
"pattern": "((443[\\d]{6}))",
"mobile": "((9(5[\\d])|(6[569])|(77)[\\d]{6}))",
},
"GY": {
"code": "592",
"pattern": "(((2(1[6-9])|(2[0-35-9])|(3[1-4])|(5[3-9])|(6[\\d])|(7[0-24-79]))|(3(2[25-9])|(3[\\d]))|(4(4[0-24])|(5[56]))|(77[1-57])[\\d]{4}))",
"mobile": "(((6[\\d][\\d])|(70[015-7])[\\d]{4}))",
},
"HK": {
"code": "852",
"pattern": "(((2([13-9][\\d])|(2[013-9])[\\d])|(3(([1569][0-24-9])|(4[0-246-9])|(7[0-24-69])[\\d])|(8(4[0-8])|(5[0-5])|(9[\\d])))|(58(0[1-8])|(1[2-9]))[\\d]{4}))",
"mobile": "(((46(0[0-7])|(1[0-6])|(4[0-57-9])|(6[0-4])|(7[0-8]))|(573[0-6])|(6(26[013-8])|(66[0-3]))|(70(7[1-5])|(8[0-4]))|(848[015-9])|(929[013-9])[\\d]{4})|((4(40)|(6[2358]))|(5([1-59][0-46-9])|(6[0-4689])|(7[0-24679]))|(6(0[1-9])|([13-59][\\d])|([268][0-57-9])|(7[0-79]))|(84[09])|(9(0[1-9])|(1[02-9])|([2358][0-8])|([467][\\d]))[\\d]{5}))",
},
"HN": {
"code": "504",
"pattern": "((2(2(0[0-39])|(1[1-367])|([23][\\d])|(4[03-6])|(5[57])|(6[245])|(7[0135689])|(8[01346-9])|(9[0-2]))|(4(0[78])|(2[3-59])|(3[13-9])|(4[0-68])|(5[1-35]))|(5(0[7-9])|(16)|(4[03-5])|(5[\\d])|(6[014-6])|(7[04])|(80))|(6([056][\\d])|(17)|(2[067])|(3[04])|(4[0-378])|([78][0-8])|(9[01]))|(7(6[46-9])|(7[02-9])|(8[034])|(91))|(8(79)|(8[0-357-9])|(9[1-57-9]))[\\d]{4}))",
"mobile": "(([37-9][\\d]{7}))",
},
"HR": {
"code": "385",
"pattern": "((1[\\d]{7})|((2[0-3])|(3[1-5])|(4[02-47-9])|(5[1-3])[\\d]{6:7}))",
"mobile": "((98[\\d]{6:7})|(975(1[\\d])|(96)[\\d]{4})|(9(0[1-9])|([1259][\\d])|(7[0679])[\\d]{6}))",
},
"HT": {
"code": "509",
"pattern": "((2(2[\\d])|(5[1-5])|(81)|(9[149])[\\d]{5}))",
"mobile": "(([34][\\d]{7}))",
},
"HU": {
"code": "36",
"pattern": "(((1[\\d])|([27][2-9])|(3[2-7])|(4[24-9])|(5[2-79])|(6[23689])|(8[2-57-9])|(9[2-69])[\\d]{6}))",
"mobile": "((([257]0)|(3[01])[\\d]{7}))",
},
"ID": {
"code": "62",
"pattern": "((2[124][\\d]{7:8})|(619[\\d]{8})|(2(1(14)|(500))|(2[\\d]{3})[\\d]{3})|(61[\\d]{5:8})|((2([35][1-4])|(6[0-8])|(7[1-6])|(8[\\d])|(9[1-8]))|(3(1)|([25][1-8])|(3[1-68])|(4[1-3])|(6[1-3568])|(7[0-469])|(8[\\d]))|(4(0[1-589])|(1[01347-9])|(2[0-36-8])|(3[0-24-68])|(43)|(5[1-378])|(6[1-5])|(7[134])|(8[1245]))|(5(1[1-35-9])|(2[25-8])|(3[124-9])|(4[1-3589])|(5[1-46])|(6[1-8]))|(6([25][\\d])|(3[1-69])|(4[1-6]))|(7(02)|([125][1-9])|([36][\\d])|(4[1-8])|(7[0-36-9]))|(9(0[12])|(1[013-8])|(2[0-479])|(5[125-8])|(6[23679])|(7[159])|(8[01346]))[\\d]{5:8}))",
"mobile": "((8[1-35-9][\\d]{7:10}))",
},
"IE": {
"code": "353",
"pattern": "(((1[\\d])|(21)[\\d]{6:7})|((2[24-9])|(4(0[24])|(5[\\d])|(7))|(5(0[45])|(1[\\d])|(8))|(6(1[\\d])|([237-9]))|(9(1[\\d])|([35-9]))[\\d]{5})|((23)|(4([1-469])|(8[\\d]))|(5[23679])|(6[4-6])|(7[14])|(9[04])[\\d]{7}))",
"mobile": "((8(22)|([35-9][\\d])[\\d]{6}))",
},
"IL": {
"code": "972",
"pattern": "((153[\\d]{8:9})|(29[1-9][\\d]{5})|((2[0-8])|([3489][\\d])[\\d]{6}))",
"mobile": "((5(([02368][\\d])|([19][2-9])|(4[1-9])[\\d])|(5(01)|(1[79])|(2[2-9])|(3[0-3])|(4[34])|(5[015689])|(6[6-8])|(7[0-267])|(8[7-9])|(9[1-9]))[\\d]{5}))",
},
"IM": {
"code": "44",
"pattern": "((1624(230)|([5-8][\\d][\\d])[\\d]{3}))",
"mobile": "((76245[06][\\d]{4})|(7(4576)|([59]24[\\d])|(624[0-4689])[\\d]{5}))",
},
"IN": {
"code": "91",
"pattern": "((2717([2-7][\\d])|(95)[\\d]{4})|((271[0-689])|(782[0-6])[2-7][\\d]{5})|((170[24])|(2(([02][2-79])|(90)[\\d])|(80[13468]))|((3(23)|(80))|(683)|(79[1-7])[\\d])|(4(20[24])|(72[2-8]))|(552[1-7])[\\d]{6})|((11)|(33)|(4[04])|(80)[2-7][\\d]{7})|((342)|(674)|(788)([0189][2-7])|([2-7][\\d])[\\d]{5})|((1(2[0-249])|(3[0-25])|(4[145])|([59][14])|(6[014])|(7[1257])|(8[01346]))|(2(1[257])|(3[013])|(4[01])|(5[0137])|(6[0158])|(78)|(8[1568])|(9[14]))|(3(26)|(4[13])|(5[34])|(6[01489])|(7[02-46])|(8[159]))|(4(1[36])|(2[1-47])|(3[15])|(5[12])|(6[0-26-9])|(7[014-9])|(8[013-57])|(9[014-7]))|(5(1[025])|(22)|([36][25])|(4[28])|([578]1)|(9[15]))|(6(12)|([2-47]1)|(5[17])|(6[13])|(80))|(7(12)|(2[14])|(3[134])|(4[47])|(5[15])|([67]1))|(8(16)|(2[014])|(3[126])|(6[136])|(7[078])|(8[34])|(91))[2-7][\\d]{6})|((1(2[35-8])|(3[346-9])|(4[236-9])|([59][0235-9])|(6[235-9])|(7[34689])|(8[257-9]))|(2(1[134689])|(3[24-8])|(4[2-8])|(5[25689])|(6[2-4679])|(7[3-79])|(8[2-479])|(9[235-9]))|(3(01)|(1[79])|(2[1245])|(4[5-8])|(5[125689])|(6[235-7])|(7[157-9])|(8[2-46-8]))|(4(1[14578])|(2[5689])|(3[2-467])|(5[4-7])|(6[35])|(73)|(8[2689])|(9[2389]))|(5([16][146-9])|(2[14-8])|(3[1346])|(4[14-69])|(5[46])|(7[2-4])|(8[2-8])|(9[246]))|(6(1[1358])|(2[2457])|(3[2-4])|(4[235-7])|(5[2-689])|(6[24578])|(7[235689])|(8[124-6]))|(7(1[013-9])|(2[0235-9])|(3[2679])|(4[1-35689])|(5[2-46-9])|([67][02-9])|(8[013-7])|(9[089]))|(8(1[1357-9])|(2[235-8])|(3[03-57-9])|(4[0-24-9])|(5[\\d])|(6[2457-9])|(7[1-6])|(8[1256])|(9[2-4]))[\\d][2-7][\\d]{5}))",
"mobile": "(((61279)|(7(887[02-9])|(9(313)|(79[07-9])))|(8(079[04-9])|((84)|(91)7[02-8]))[\\d]{5})|((6(12)|([2-47]1)|(5[17])|(6[13])|(80)[0189])|(7(1(2[0189])|(9[0-5]))|(2([14][017-9])|(8[0-59]))|(3(2[5-8])|([34][017-9])|(9[016-9]))|(4(1[015-9])|([29][89])|(39)|(8[389]))|(5([15][017-9])|(2[04-9])|(9[7-9]))|(6(0[0-47])|(1[0-257-9])|(2[0-4])|(3[19])|(5[4589]))|(70[0289])|(88[089])|(97[02-8]))|(8(0(6[67])|(7[02-8]))|(70[017-9])|(84[01489])|(91[0-289]))[\\d]{6})|((7(31)|(4[47]))|(8(16)|(2[014])|(3[126])|(6[136])|(7[78])|(83))([0189][\\d])|(7[02-8])[\\d]{5})|((6([09][\\d])|(1[04679])|(2[03689])|(3[05-9])|(4[0489])|(50)|(6[069])|(7[07])|(8[7-9]))|(7(0[\\d])|(2[0235-79])|(3[05-8])|(40)|(5[0346-8])|(6[6-9])|(7[1-9])|(8[0-79])|(9[089]))|(8(0[01589])|(1[0-57-9])|(2[235-9])|(3[03-57-9])|([45][\\d])|(6[02457-9])|(7[1-69])|(8[0-25-9])|(9[02-9]))|(9[\\d][\\d])[\\d]{7})|((6((1[1358])|(2[2457])|(3[2-4])|(4[235-7])|(5[2-689])|(6[24578])|(8[124-6])[\\d])|(7([235689][\\d])|(4[0189])))|(7(1([013-8][\\d])|(9[6-9]))|(28[6-8])|(3(2[0-49])|(9[2-5]))|(4(1[2-4])|([29][0-7])|(3[0-8])|([56][\\d])|(8[0-24-7]))|(5(2[1-3])|(9[0-6]))|(6(0[5689])|(2[5-9])|(3[02-8])|(4[\\d])|(5[0-367]))|(70[13-7])|(881))[0189][\\d]{5}))",
},
"IO": {"code": "246", "pattern": "((37[\\d]{5}))", "mobile": "((38[\\d]{5}))"},
"IQ": {
"code": "964",
"pattern": "((1[\\d]{7})|((2[13-5])|(3[02367])|(4[023])|(5[03])|(6[026])[\\d]{6:7}))",
"mobile": "((7[3-9][\\d]{8}))",
},
"IR": {
"code": "98",
"pattern": "(((1[137])|(2[13-68])|(3[1458])|(4[145])|(5[1468])|(6[16])|(7[1467])|(8[13467])([03-57][\\d]{7})|([16][\\d]{3}([\\d]{4})?)|([289][\\d]{3}([\\d]([\\d]{3})?)?))|(94(000[09])|(2(121)|([2689]0[\\d]))|(30[0-2][\\d])|(4(111)|(40[\\d]))[\\d]{4}))",
"mobile": "((9((0([0-35][\\d])|(4[4-6]))|(([13][\\d])|(2[0-3])[\\d])[\\d])|(9([0-46][\\d][\\d])|(5[15]0)|(8(1[\\d])|(88))|(9(0[013])|([19][\\d])|(21)|(77)|(8[7-9])))[\\d]{5}))",
},
"IS": {
"code": "354",
"pattern": "(((4(1[0-24-69])|(2[0-7])|([37][0-8])|(4[0-24589])|(5[0-68])|(6[\\d])|(8[0-36-8]))|(5(05)|([156][\\d])|(2[02578])|(3[0-579])|(4[03-7])|(7[0-2578])|(8[0-35-9])|(9[013-689]))|(872)[\\d]{4}))",
"mobile": "(((38[589][\\d][\\d])|(6(1[1-8])|(2[0-6])|(3[026-9])|(4[014679])|(5[0159])|(6[0-69])|(70)|(8[06-8])|(9[\\d]))|(7(5[057])|([6-9][\\d]))|(8(2[0-59])|([3-69][\\d])|(8[28]))[\\d]{4}))",
},
"IT": {
"code": "39",
"pattern": "((0669[0-79][\\d]{1:6})|(0(1([0159][\\d])|([27][1-5])|(31)|(4[1-4])|(6[1356])|(8[2-57]))|(2[\\d][\\d])|(3([0159][\\d])|(2[1-4])|(3[12])|([48][1-6])|(6[2-59])|(7[1-7]))|(4([0159][\\d])|([23][1-9])|(4[245])|(6[1-5])|(7[1-4])|(81))|(5([0159][\\d])|(2[1-5])|(3[2-6])|(4[1-79])|(6[4-6])|(7[1-578])|(8[3-8]))|(6([0-57-9][\\d])|(6[0-8]))|(7([0159][\\d])|(2[12])|(3[1-7])|(4[2-46])|(6[13569])|(7[13-6])|(8[1-59]))|(8([0159][\\d])|(2[3-578])|(3[1-356])|([6-8][1-5]))|(9([0159][\\d])|([238][1-5])|(4[12])|(6[1-8])|(7[1-6]))[\\d]{2:7}))",
"mobile": "((3[1-9][\\d]{8})|(3[2-9][\\d]{7}))",
},
"JE": {
"code": "44",
"pattern": "((1534[0-24-8][\\d]{5}))",
"mobile": "((7(((50)|(82)9)|(937)[\\d])|(7(00[378])|(97[7-9]))[\\d]{5}))",
},
"JM": {
"code": "1",
"pattern": "((8766060[\\d]{3})|((658(2([0-8][\\d])|(9[0-46-9]))|([3-9][\\d][\\d]))|(876(52[35])|(6(0[1-3579])|(1[02357-9])|([23][\\d])|(40)|(5[06])|(6[2-589])|(7[0257])|(8[04])|(9[4-9]))|(7(0[2-689])|([1-6][\\d])|(8[056])|(9[45]))|(9(0[1-8])|(1[02378])|([2-8][\\d])|(9[2-468])))[\\d]{4}))",
"mobile": "(((658295)|(876(2(0[2-9])|([14-9][\\d])|(2[013-9])|(3[3-9]))|([348][\\d][\\d])|(5(0[1-9])|([1-9][\\d]))|(6(4[89])|(6[67]))|(7(0[07])|(7[\\d])|(8[1-47-9])|(9[0-36-9]))|(9([01]9)|(9[0579])))[\\d]{4}))",
},
"JO": {
"code": "962",
"pattern": "((87(000)|(90[01])[\\d]{3})|((2(6(2[0-35-9])|(3[0-578])|(4[24-7])|(5[0-24-8])|([6-8][023])|(9[0-3]))|(7(0[1-79])|(10)|(2[014-7])|(3[0-689])|(4[019])|(5[0-3578])))|(32(0[1-69])|(1[1-35-7])|(2[024-7])|(3[\\d])|(4[0-3])|([5-7][023]))|(53(0[0-3])|([13][023])|(2[0-59])|(49)|(5[0-35-9])|(6[15])|(7[45])|(8[1-6])|(9[0-36-9]))|(6(2([05]0)|(22))|(3(00)|(33))|(4(0[0-25])|(1[2-7])|(2[0569])|([38][07-9])|(4[025689])|(6[0-589])|(7[\\d])|(9[0-2]))|(5([01][056])|(2[034])|(3[0-57-9])|(4[178])|(5[0-69])|(6[0-35-9])|(7[1-379])|(8[0-68])|(9[0239])))|(87(20)|(7[078])|(99))[\\d]{4}))",
"mobile": "((7([78][0-25-9])|(9[\\d])[\\d]{6}))",
},
"JP": {
"code": "81",
"pattern": "(((1(1[235-8])|(2[3-6])|(3[3-9])|(4[2-6])|([58][2-8])|(6[2-7])|(7[2-9])|(9[1-9]))|((2[2-9])|([36][1-9])[\\d])|(4([2-578][\\d])|(6[02-8])|(9[2-59]))|(5([2-589][\\d])|(6[1-9])|(7[2-8]))|(7([25-9][\\d])|(3[4-9])|(4[02-9]))|(8([2679][\\d])|(3[2-9])|(4[5-9])|(5[1-9])|(8[03-9]))|(9([2-58][\\d])|([679][1-9]))[\\d]{6}))",
"mobile": "(([7-9]0[1-9][\\d]{7}))",
},
"KE": {
"code": "254",
"pattern": "(((4[245])|(5[1-79])|(6[01457-9])[\\d]{5:7})|((4[136])|(5[08])|(62)[\\d]{7})|(([24]0)|(66)[\\d]{6:7}))",
"mobile": "(((1(0[0-6])|(1[0-5])|(2[014]))|(7[\\d][\\d])[\\d]{6}))",
},
"KG": {
"code": "996",
"pattern": "((312(5[0-79][\\d])|(9([0-689][\\d])|(7[0-24-9]))[\\d]{3})|((3(1(2[0-46-8])|(3[1-9])|(47)|([56][\\d]))|(2(22)|(3[0-479])|(6[0-7]))|(4(22)|(5[6-9])|(6[\\d]))|(5(22)|(3[4-7])|(59)|(6[\\d]))|(6(22)|(5[35-7])|(6[\\d]))|(7(22)|(3[468])|(4[1-9])|(59)|([67][\\d]))|(9(22)|(4[1-8])|(6[\\d])))|(6(09)|(12)|(2[2-4])[\\d])[\\d]{5}))",
"mobile": "((312(58[\\d])|(973)[\\d]{3})|((2(0[0-35])|(2[\\d]))|(5[0-24-7][\\d])|(7([07][\\d])|(55))|(880)|(99[05-9])[\\d]{6}))",
},
"KH": {
"code": "855",
"pattern": "((23(4([2-4])|([56][\\d]))|([568][\\d][\\d])[\\d]{4})|(23[236-9][\\d]{5})|((2[4-6])|(3[2-6])|(4[2-4])|([5-7][2-5])(([237-9])|(4[56])|(5[\\d])[\\d]{5})|(6[\\d]{5:6})))",
"mobile": "((((1[28])|(3[18])|(9[67])[\\d])|(6[016-9])|(7([07-9])|([16][\\d]))|(8([013-79])|(8[\\d]))[\\d]{6})|((1[\\d])|(9[0-57-9])[\\d]{6})|((2[3-6])|(3[2-6])|(4[2-4])|([5-7][2-5])48[\\d]{5}))",
},
"KI": {
"code": "686",
"pattern": "((([24][\\d])|(3[1-9])|(50)|(65(02[12])|(12[56])|(22[89])|([3-5]00))|(7(27[\\d][\\d])|(3100)|(5(02[12])|(12[56])|(22[89])|([34](00)|(81))|(500)))|(8[0-5])[\\d]{3}))",
"mobile": "(((63[\\d]{3})|(73(0[0-5][\\d])|(140))[\\d]{3})|([67]200[01][\\d]{3}))",
},
"KM": {
"code": "269",
"pattern": "((7[4-7][\\d]{5}))",
"mobile": "(([34][\\d]{6}))",
},
"KN": {
"code": "1",
"pattern": "((869(2(29)|(36))|(302)|(4(6[015-9])|(70))|(56[5-7])[\\d]{4}))",
"mobile": "((869(48[89])|(55[6-8])|(66[\\d])|(76[02-7])[\\d]{4}))",
},
"KP": {
"code": "850",
"pattern": "((((195)|(2)[\\d])|(3[19])|(4[159])|(5[37])|(6[17])|(7[39])|(85)[\\d]{6}))",
"mobile": "((19[1-3][\\d]{7}))",
},
"KR": {
"code": "82",
"pattern": "(((2)|(3[1-3])|([46][1-4])|(5[1-5])[1-9][\\d]{6:7})|((3[1-3])|([46][1-4])|(5[1-5])1[\\d]{2:3}))",
"mobile": "((1(05([0-8][\\d])|(9[0-6]))|(22[13][\\d])[\\d]{4:5})|(1(0[1-46-9])|([16-9][\\d])|(2[013-9])[\\d]{6:7}))",
},
"KW": {
"code": "965",
"pattern": "((2([23][\\d][\\d])|(4([1-35-9][\\d])|(44))|(5(0[034])|([2-46][\\d])|(5[1-3])|(7[1-7]))[\\d]{4}))",
"mobile": "(((41[\\d][\\d])|(5(([05][\\d])|(1[0-7])|(6[56])[\\d])|(2(22)|(5[25]))|(7(55)|(77))|(88[58]))|(6((0[034679])|(5[015-9])|(6[\\d])[\\d])|(111)|(222)|(333)|(444)|(7(0[013-9])|([67][\\d]))|(888)|(9([069][\\d])|(3[039])))|(9((0[09])|(22)|([4679][\\d])|(8[057-9])[\\d])|(1(1[01])|(99))|(3(00)|(33))|(5(00)|(5[\\d])))[\\d]{4}))",
},
"KY": {
"code": "1",
"pattern": "((345(2(22)|(3[23])|(44)|(66))|(333)|(444)|(6(23)|(38)|(40))|(7(30)|(4[35-79])|(6[6-9])|(77))|(8(00)|(1[45])|([48]8))|(9(14)|(4[035-9]))[\\d]{4}))",
"mobile": "((345(32[1-9])|(42[0-4])|(5(1[67])|(2[5-79])|(4[6-9])|(50)|(76))|(649)|(82[56])|(9(1[679])|(2[2-9])|(3[06-9])|(90))[\\d]{4}))",
},
"KZ": {
"code": "7",
"pattern": "(((33622)|(7(1(0([23][\\d])|(4[0-3])|(59)|(63))|(1([23][\\d])|(4[0-79])|(59))|(2([23][\\d])|(59))|(3(2[\\d])|(3[0-79])|(4[0-35-9])|(59))|(4([24][\\d])|(3[013-9])|(5[1-9]))|(5(2[\\d])|(3[1-9])|(4[0-7])|(59))|(6([2-4][\\d])|(5[19])|(61))|(72[\\d])|(8([27][\\d])|(3[1-46-9])|(4[0-5])))|(2(1([23][\\d])|(4[46-9])|(5[3469]))|(2(2[\\d])|(3[0679])|(46)|(5[12679]))|(3([2-4][\\d])|(5[139]))|(4(2[\\d])|(3[1-35-9])|(59))|(5([23][\\d])|(4[0-246-8])|(59)|(61))|(6(2[\\d])|(3[1-9])|(4[0-4])|(59))|(7([2379][\\d])|(40)|(5[279]))|(8([23][\\d])|(4[0-3])|(59))|(9(2[\\d])|(3[124578])|(59))))[\\d]{5}))",
"mobile": "((7(0[0-25-8])|(47)|(6[0-4])|(7[15-8])|(85)[\\d]{7}))",
},
"LA": {
"code": "856",
"pattern": "(((2[13])|([35-7][14])|(41)|(8[1468])[\\d]{6}))",
"mobile": "(((20([239][\\d])|(5[24-9])|(7[6-8])|(88))|(302[\\d])[\\d]{6}))",
},
"LB": {
"code": "961",
"pattern": "((7(62)|(8[0-7])|(9[04-9])[\\d]{4})|(([14-69][\\d])|(2([14-69][\\d])|([78][1-9]))|(7[2-57])|(8[02-9])[\\d]{5}))",
"mobile": "((793([01][\\d])|(2[0-4])[\\d]{3})|(((3)|(81)[\\d])|(7([01][\\d])|(6[013-9])|(8[89])|(9[12]))[\\d]{5}))",
},
"LC": {
"code": "1",
"pattern": "((758(234)|(4(30)|(5[\\d])|(6[2-9])|(8[0-2]))|(57[0-2])|((63)|(75)8)[\\d]{4}))",
"mobile": "((758(28[4-7])|(384)|(4(6[01])|(8[4-9]))|(5(1[89])|(20)|(84))|(7(1[2-9])|(2[\\d])|(3[0-3]))|(812)[\\d]{4}))",
},
"LI": {
"code": "423",
"pattern": "(((2(01)|(1[27])|(2[02])|(3[\\d])|(6[02-578])|(96))|(3([24]0)|(33)|(7[0135-7])|(8[048])|(9[0269]))[\\d]{4}))",
"mobile": "(((6((4[5-9])|(5[0-4])[\\d])|(6([0245][\\d])|([17]0)|(3[7-9]))[\\d])|(7([37-9][\\d])|(42)|(56))[\\d]{4}))",
},
"LK": {
"code": "94",
"pattern": "(((12[2-9])|(602)|(8[12][\\d])|(9(1[\\d])|(22)|(9[245]))[\\d]{6})|((11)|(2[13-7])|(3[1-8])|(4[157])|(5[12457])|(6[35-7])[2-57][\\d]{6}))",
"mobile": "((7([0-25-8][\\d])|(4[0-4])[\\d]{6}))",
},
"LR": {
"code": "231",
"pattern": "(((2[\\d]{3})|(33333)[\\d]{4}))",
"mobile": "((((330)|(555)|((77)|(88)[\\d])[\\d])|(4[67])[\\d]{5})|([56][\\d]{6}))",
},
"LS": {"code": "266", "pattern": "((2[\\d]{7}))", "mobile": "(([56][\\d]{7}))"},
"LT": {
"code": "370",
"pattern": "(((3[1478])|(4[124-6])|(52)[\\d]{6}))",
"mobile": "((6[\\d]{7}))",
},
"LU": {
"code": "352",
"pattern": "(((35[013-9])|(80[2-9])|(90[89])[\\d]{1:8})|((2[2-9])|(3[0-46-9])|([457][\\d])|(8[13-9])|(9[2-579])[\\d]{2:9}))",
"mobile": "((6([269][18])|(5[1568])|(7[189])|(81)[\\d]{6}))",
},
"LV": {"code": "371", "pattern": "((6[\\d]{7}))", "mobile": "((2[\\d]{7}))"},
"LY": {
"code": "218",
"pattern": "(((2(0[56])|([1-6][\\d])|(7[124579])|(8[124]))|(3(1[\\d])|(2[2356]))|(4([17][\\d])|(2[1-357])|(5[2-4])|(8[124]))|(5([1347][\\d])|(2[1-469])|(5[13-5])|(8[1-4]))|(6([1-479][\\d])|(5[2-57])|(8[1-5]))|(7([13][\\d])|(2[13-79]))|(8([124][\\d])|(5[124])|(84))[\\d]{6}))",
"mobile": "((9[1-6][\\d]{7}))",
},
"MA": {
"code": "212",
"pattern": "((5(29([189][05])|(2[29])|(3[01]))|(38(8[057])|(9[05]))[\\d]{4})|(5(2([0-25-7][\\d])|(3[1-578])|(4[02-46-8])|(8[0235-7])|(90))|(3([0-47][\\d])|(5[02-9])|(6[02-8])|(80)|(9[3-9]))|((4[067])|(5[03])[\\d])[\\d]{5}))",
"mobile": "(((6([0-79][\\d])|(8[0-247-9]))|(7([017][\\d])|(6[0-367]))[\\d]{6}))",
},
"MC": {
"code": "377",
"pattern": "(((870)|(9[2-47-9][\\d])[\\d]{5}))",
"mobile": "((4([46][\\d])|(5[1-9])[\\d]{5})|((3)|(6[\\d])[\\d]{7}))",
},
"MD": {
"code": "373",
"pattern": "((((2[1-9])|(3[1-79])[\\d])|(5(33)|(5[257]))[\\d]{5}))",
"mobile": "((562[\\d]{5})|((6[\\d])|(7[16-9])[\\d]{6}))",
},
"ME": {
"code": "382",
"pattern": "(((20[2-8])|(3([0-2][2-7])|(3[24-7]))|(4(0[2-467])|(1[2467]))|(5(0[2467])|(1[24-7])|(2[2-467]))[\\d]{5}))",
"mobile": "((6([07-9][\\d])|(3[024])|(6[0-25])[\\d]{5}))",
},
"MF": {
"code": "590",
"pattern": "((590(0[079])|([14]3)|([27][79])|(30)|(5[0-268])|(87)[\\d]{4}))",
"mobile": "((69(0[\\d][\\d])|(1(2[2-9])|(3[0-5]))[\\d]{4}))",
},
"MG": {
"code": "261",
"pattern": "((2072[29][\\d]{4})|(20(2[\\d])|(4[47])|(5[3467])|(6[279])|(7[35])|(8[268])|(9[245])[\\d]{5}))",
"mobile": "((3[2-489][\\d]{7}))",
},
"MH": {
"code": "692",
"pattern": "(((247)|(528)|(625)[\\d]{4}))",
"mobile": "((((23)|(54)5)|(329)|(45[56])[\\d]{4}))",
},
"MK": {
"code": "389",
"pattern": "((((2(62)|(77)0)|(3444)[\\d])|(4[56]440)[\\d]{3})|((34)|(4[357])700[\\d]{3})|((2([23][\\d])|(5[0-578])|(6[01])|(82))|(3(1[3-68])|([23][2-68])|(4[23568]))|(4([23][2-68])|(4[3-68])|(5[2568])|(6[25-8])|(7[24-68])|(8[4-68]))[\\d]{5}))",
"mobile": "((7(3555)|(4(60[\\d])|(747))|(94([01][\\d])|(2[0-4]))[\\d]{3})|(7([0-25-8][\\d])|(3[1-4])|(42)|(9[23])[\\d]{5}))",
},
"ML": {
"code": "223",
"pattern": "((2(07[0-8])|(12[67])[\\d]{4})|((2(02)|(1[4-689]))|(4(0[0-4])|(4[1-39]))[\\d]{5}))",
"mobile": "((2(0(01)|(79))|(17[\\d])[\\d]{4})|((5[01])|([679][\\d])|(8[239])[\\d]{6}))",
},
"MM": {
"code": "95",
"pattern": "(((1((2[\\d])|(3[56])|([89][0-6])[\\d])|(4(2[2-469])|(39)|(46)|(6[25])|(7[0-3])|(83))|(6))|(2(2(00)|(8[34]))|(4(0[\\d])|(2[246])|(39)|(46)|(62)|(7[0-3])|(83))|(51[\\d][\\d]))|(4(2(2[\\d][\\d])|(48[0-3]))|(3(20[\\d])|(4(70)|(83))|(56))|(420[\\d])|(5470))|(6(0([23])|(88[\\d]))|((124)|([56]2[\\d])[\\d])|(247[23])|(3(20[\\d])|(470))|(4(2[04][\\d])|(47[23]))|(7((3[\\d])|(8[01459])[\\d])|(4(39)|(60)|(7[013]))))[\\d]{4})|(5(2(2[\\d]{5:6})|(47[023][\\d]{4}))|((347[23])|(4(2(1)|(86))|(470))|(522[\\d])|(6(20[\\d])|(483))|(7(20[\\d])|(48[0-2]))|(8(20[\\d])|(47[02]))|(9(20[\\d])|(47[01]))[\\d]{4}))|(7((0470)|(4(25[\\d])|(470))|(5(202)|(470)|(96[\\d]))[\\d]{4})|(1(20[\\d]{4:5})|(4(70)|(83)[\\d]{4})))|(8(1(2[\\d]{5:6})|(4(10)|(7[01][\\d])[\\d]{3}))|(2(2[\\d]{5:6})|((320)|(490[\\d])[\\d]{3}))|((3(2[\\d][\\d])|(470))|(4[24-7])|(5(2[\\d])|(4[1-9])|(51)[\\d])|(6[23])[\\d]{4}))|((1[2-6][\\d])|(4(2[24-8])|(3[2-7])|([46][2-6])|(5[3-5]))|(5([27][2-8])|(3[2-68])|(4[24-8])|(5[23])|(6[2-4])|(8[24-7])|(9[2-7]))|(6([19]20)|(42[03-6])|((52)|(7[45])[\\d]))|(7([04][24-8])|([15][2-7])|(22)|(3[2-4]))|(8(1[2-689])|(2[2-8])|([35]2[\\d]))[\\d]{4})|(25[\\d]{5:6})|((2[2-9])|(6(1[2356])|([24][2-6])|(3[24-6])|(5[2-4])|(6[2-8])|(7[235-7])|(8[245])|(9[24]))|(8(3[24])|(5[245]))[\\d]{4}))",
"mobile": "(((17[01])|(9(2([0-4])|([56][\\d][\\d]))|((3([0-36])|(4[\\d]))|((6[\\d])|(8[89])|(9[4-8])[\\d])|(7(3)|(40)|([5-9][\\d]))[\\d])|(4(([0245][\\d])|([1379])[\\d])|(88))|(5[0-6])[\\d])[\\d]{4})|(9[69]1[\\d]{6})|(9([68][\\d])|(9[089])[\\d]{5}))",
},
"MN": {
"code": "976",
"pattern": "(([12]2[1-3][\\d]{5:6})|(7(0[0-5][\\d])|(128)[\\d]{4})|(([12](1)|(27))|(5[368])[\\d]{6})|([12](3[2-8])|(4[2-68])|(5[1-4689])[\\d]{6:7}))",
"mobile": "(((83[01])|(920)[\\d]{5})|((5[05])|(8[05689])|(9[013-9])[\\d]{6}))",
},
"MO": {
"code": "853",
"pattern": "(((28[2-9])|(8(11)|([2-57-9][\\d]))[\\d]{5}))",
"mobile": "((6800[0-79][\\d]{3})|(6([235][\\d][\\d])|(6(0[0-5])|([1-9][\\d]))|(8(0[1-9])|([14-8][\\d])|(2[5-9])|([39][0-4]))[\\d]{4}))",
},
"MP": {
"code": "1",
"pattern": "((670(2(3[3-7])|(56)|(8[4-8]))|(32[1-38])|(4(33)|(8[348]))|(5(32)|(55)|(88))|(6(64)|(70)|(82))|(78[3589])|(8[3-9]8)|(989)[\\d]{4}))",
"mobile": "((670(2(3[3-7])|(56)|(8[4-8]))|(32[1-38])|(4(33)|(8[348]))|(5(32)|(55)|(88))|(6(64)|(70)|(82))|(78[3589])|(8[3-9]8)|(989)[\\d]{4}))",
},
"MQ": {
"code": "596",
"pattern": "((596([04-7][\\d])|(10)|(2[7-9])|(3[014-9])|(8[09])|(9[4-9])[\\d]{4}))",
"mobile": "((69(6([0-46-9][\\d])|(5[0-6]))|(727)[\\d]{4}))",
},
"MR": {
"code": "222",
"pattern": "(((25[08])|(35[\\d])|(45[1-7])[\\d]{5}))",
"mobile": "(([2-4][0-46-9][\\d]{6}))",
},
"MS": {
"code": "1",
"pattern": "((6644(1[0-3])|(91)[\\d]{4}))",
"mobile": "((664(3(49)|(9[1-6]))|(49[2-6])[\\d]{4}))",
},
"MT": {
"code": "356",
"pattern": "((20(3[1-4])|(6[059])[\\d]{4})|(2(0[19])|([1-357][\\d])|(60)[\\d]{5}))",
"mobile": "(((7(210)|([79][\\d][\\d]))|(9([29][\\d][\\d])|(69[67])|(8(1[1-3])|(89)|(97)))[\\d]{4}))",
},
"MU": {
"code": "230",
"pattern": "(((2([0346-8][\\d])|(1[0-7]))|(4([013568][\\d])|(2[4-7]))|(54([3-5][\\d])|(71))|(6[\\d][\\d])|(8(14)|(3[129]))[\\d]{4}))",
"mobile": "((5(4(2[1-389])|(7[1-9]))|(87[15-8])[\\d]{4})|(5(2[5-9])|(4[3-689])|([57][\\d])|(8[0-689])|(9[0-8])[\\d]{5}))",
},
"MV": {
"code": "960",
"pattern": "(((3(0[0-3])|(3[0-59]))|(6([57][02468])|(6[024-68])|(8[024689]))[\\d]{4}))",
"mobile": "((46[46][\\d]{4})|((7[\\d])|(9[13-9])[\\d]{5}))",
},
"MW": {
"code": "265",
"pattern": "(((1[2-9])|(2[12][\\d][\\d])[\\d]{5}))",
"mobile": "((111[\\d]{6})|((31)|(77)|(88)|(9[89])[\\d]{7}))",
},
"MX": {
"code": "52",
"pattern": "((6571[\\d]{6})|((2(0[01])|(2[1-9])|(3[1-35-8])|(4[13-9])|(7[1-689])|(8[1-578])|(9[467]))|(3(1[1-79])|([2458][1-9])|(3[\\d])|(7[1-8])|(9[1-5]))|(4(1[1-57-9])|([25-7][1-9])|(3[1-8])|(4[\\d])|(8[1-35-9])|(9[2-689]))|(5([56][\\d])|(88)|(9[1-79]))|(6(1[2-68])|([2-4][1-9])|(5[1-3689])|(6[1-57-9])|(7[1-7])|(8[67])|(9[4-8]))|(7([1-467][1-9])|(5[13-9])|(8[1-69])|(9[17]))|(8(1[\\d])|(2[13-689])|(3[1-6])|(4[124-6])|(6[1246-9])|(7[1-378])|(9[12479]))|(9(1[346-9])|(2[1-4])|(3[2-46-8])|(5[1348])|(6[1-9])|(7[12])|(8[1-8])|(9[\\d]))[\\d]{7}))",
"mobile": "((6571[\\d]{6})|((1(2(2[1-9])|(3[1-35-8])|(4[13-9])|(7[1-689])|(8[1-578])|(9[467]))|(3(1[1-79])|([2458][1-9])|(3[\\d])|(7[1-8])|(9[1-5]))|(4(1[1-57-9])|([24-7][1-9])|(3[1-8])|(8[1-35-9])|(9[2-689]))|(5([56][\\d])|(88)|(9[1-79]))|(6(1[2-68])|([2-4][1-9])|(5[1-3689])|(6[1-57-9])|(7[1-7])|(8[67])|(9[4-8]))|(7([1-467][1-9])|(5[13-9])|(8[1-69])|(9[17]))|(8(1[\\d])|(2[13-689])|(3[1-6])|(4[124-6])|(6[1246-9])|(7[1-378])|(9[12479]))|(9(1[346-9])|(2[1-4])|(3[2-46-8])|(5[1348])|([69][1-9])|(7[12])|(8[1-8])))|(2(2[1-9])|(3[1-35-8])|(4[13-9])|(7[1-689])|(8[1-578])|(9[467]))|(3(1[1-79])|([2458][1-9])|(3[\\d])|(7[1-8])|(9[1-5]))|(4(1[1-57-9])|([25-7][1-9])|(3[1-8])|(4[\\d])|(8[1-35-9])|(9[2-689]))|(5([56][\\d])|(88)|(9[1-79]))|(6(1[2-68])|([2-4][1-9])|(5[1-3689])|(6[1-57-9])|(7[1-7])|(8[67])|(9[4-8]))|(7([1-467][1-9])|(5[13-9])|(8[1-69])|(9[17]))|(8(1[\\d])|(2[13-689])|(3[1-6])|(4[124-6])|(6[1246-9])|(7[1-378])|(9[12479]))|(9(1[346-9])|(2[1-4])|(3[2-46-8])|(5[1348])|(6[1-9])|(7[12])|(8[1-8])|(9[\\d]))[\\d]{7}))",
},
"MY": {
"code": "60",
"pattern": "(((3(2[0-36-9])|(3[0-368])|(4[0-278])|(5[0-24-8])|(6[0-467])|(7[1246-9])|(8[\\d])|(9[0-57])[\\d])|(4(2[0-689])|([3-79][\\d])|(8[1-35689]))|(5(2[0-589])|([3468][\\d])|(5[0-489])|(7[1-9])|(9[23]))|(6(2[2-9])|(3[1357-9])|([46][\\d])|(5[0-6])|(7[0-35-9])|(85)|(9[015-8]))|(7([2579][\\d])|(3[03-68])|(4[0-8])|(6[5-9])|(8[0-35-9]))|(8([24][2-8])|(3[2-5])|(5[2-7])|(6[2-589])|(7[2-578])|([89][2-9]))|(9(0[57])|(13)|([25-7][\\d])|([3489][0-8]))[\\d]{5}))",
"mobile": "((1(1888[69])|(4400)|(8(47)|(8[27])[0-4])[\\d]{4})|(1(0([23568][\\d])|(4[0-6])|(7[016-9])|(9[0-8]))|(1([1-5][\\d][\\d])|(6(0[5-9])|([1-9][\\d]))|(7([0134][\\d])|(2[1-9])|(5[0-6])))|((([269])|(59)[\\d])|([37][1-9])|(4[235-9])[\\d])|(8(1[23])|([236][\\d])|(4[06])|(5[7-9])|(7[016-9])|(8[01])|(9[0-8]))[\\d]{5}))",
},
"MZ": {
"code": "258",
"pattern": "((2([1346][\\d])|(5[0-2])|([78][12])|(93)[\\d]{5}))",
"mobile": "((8[2-79][\\d]{7}))",
},
"NA": {
"code": "264",
"pattern": "((64426[\\d]{3})|(6(1(2[2-7])|(3[01378])|(4[0-4]))|(254)|(32[0237])|(4(27)|(41)|(5[25]))|(52[236-8])|(626)|(7(2[2-4])|(30))[\\d]{4:5})|(6(1((0[\\d])|(2[0189])|(3[24-69])|(4[5-9])[\\d])|(17)|(69)|(7[014]))|(2(17)|(5[0-36-8])|(69)|(70))|(3(17)|(2[14-689])|(34)|(6[289])|(7[01])|(81))|(4(17)|(2[0-2])|(4[06])|(5[0137])|(69)|(7[01]))|(5(17)|(2[0459])|(69)|(7[01]))|(6(17)|(25)|(38)|(42)|(69)|(7[01]))|(7(17)|(2[569])|(3[13])|(6[89])|(7[01]))[\\d]{4}))",
"mobile": "(((60)|(8[1245])[\\d]{7}))",
},
"NC": {
"code": "687",
"pattern": "(((2[03-9])|(3[0-5])|(4[1-7])|(88)[\\d]{4}))",
"mobile": "(((5[0-4])|([79][\\d])|(8[0-79])[\\d]{4}))",
},
"NE": {
"code": "227",
"pattern": "((2(0(20)|(3[1-8])|(4[13-5])|(5[14])|(6[14578])|(7[1-578]))|(1(4[145])|(5[14])|(6[14-68])|(7[169])|(88))[\\d]{4}))",
"mobile": "(((23)|(7[04])|([89][\\d])[\\d]{6}))",
},
"NF": {
"code": "672",
"pattern": "(((1(06)|(17)|(28)|(39))|(3[0-2][\\d])[\\d]{3}))",
"mobile": "(((14)|(3[58])[\\d]{4}))",
},
"NG": {
"code": "234",
"pattern": "(((([1-356][\\d])|(4[02-8])|(8[2-9])[\\d])|(9(0[3-9])|([1-9][\\d]))[\\d]{5})|(7(0([013-689][\\d])|(2[0-24-9])[\\d]{3:4})|([1-79][\\d]{6}))|(([12][\\d])|(4[147])|(5[14579])|(6[1578])|(7[1-3578])[\\d]{5}))",
"mobile": "(((702[0-24-9])|(8(01)|(19)[01])[\\d]{6})|((70[13-689])|(8(0[2-9])|(1[0-8]))|(9(0[1-9])|(1[2356]))[\\d]{7}))",
},
"NI": {
"code": "505",
"pattern": "((2[\\d]{7}))",
"mobile": "(((5(5[0-7])|([78][\\d]))|(6(20)|(3[035])|(4[045])|(5[05])|(77)|(8[1-9])|(9[059]))|((7[5-8])|(8[\\d])[\\d])[\\d]{5}))",
},
"NL": {
"code": "31",
"pattern": "(((1([035][\\d])|(1[13-578])|(6[124-8])|(7[24])|(8[0-467]))|(2([0346][\\d])|(2[2-46-9])|(5[125])|(9[479]))|(3([03568][\\d])|(1[3-8])|(2[01])|(4[1-8]))|(4([0356][\\d])|(1[1-368])|(7[58])|(8[15-8])|(9[23579]))|(5([0358][\\d])|([19][1-9])|(2[1-57-9])|(4[13-8])|(6[126])|(7[0-3578]))|(7[\\d][\\d])[\\d]{6}))",
"mobile": "((6[1-58][\\d]{7}))",
},
"NO": {
"code": "47",
"pattern": "(((2[1-4])|(3[1-3578])|(5[1-35-7])|(6[1-4679])|(7[0-8])[\\d]{6}))",
"mobile": "(((4[015-8])|(59)|(9[\\d])[\\d]{6}))",
},
"NP": {
"code": "977",
"pattern": "(((1[0-6][\\d])|(99[02-6])[\\d]{5})|((2[13-79])|(3[135-8])|(4[146-9])|(5[135-7])|(6[13-9])|(7[15-9])|(8[1-46-9])|(9[1-7])[2-6][\\d]{5}))",
"mobile": "((9(6[0-3])|(7[245])|(8[0-24-68])[\\d]{7}))",
},
"NR": {
"code": "674",
"pattern": "((444[\\d]{4}))",
"mobile": "(((55[3-9])|(666)|(8[\\d][\\d])[\\d]{4}))",
},
"NU": {
"code": "683",
"pattern": "(([47][\\d]{3}))",
"mobile": "((888[4-9][\\d]{3}))",
},
"NZ": {
"code": "64",
"pattern": "((24099[\\d]{3})|((3[2-79])|([49][2-9])|(6[235-9])|(7[2-57-9])[\\d]{6}))",
"mobile": "((2[0-27-9][\\d]{7:8})|(21[\\d]{6}))",
},
"OM": {
"code": "968",
"pattern": "((2[2-6][\\d]{6}))",
"mobile": "((1505[\\d]{4})|((7([1289][\\d])|(7[0-4]))|(9(0[1-9])|([1-9][\\d]))[\\d]{5}))",
},
"PA": {
"code": "507",
"pattern": "(((1(0[\\d])|(1[479])|(2[37])|(3[0137])|(4[17])|(5[05])|(6[58])|(7[0167])|(8[258])|(9[1389]))|(2([0235-79][\\d])|(1[0-7])|(4[013-9])|(8[02-9]))|(3([089][\\d])|(1[0-7])|(2[0-5])|(33)|(4[0-79])|(5[0-35])|(6[068])|(7[0-8]))|(4(00)|(3[0-579])|(4[\\d])|(7[0-57-9]))|(5([01][\\d])|(2[0-7])|([56]0)|(79))|(7(0[09])|(2[0-26-8])|(3[03])|(4[04])|(5[05-9])|(6[056])|(7[0-24-9])|(8[5-9])|(90))|(8(09)|(2[89])|(3[\\d])|(4[0-24-689])|(5[014])|(8[02]))|(9(0[5-9])|(1[0135-8])|(2[036-9])|(3[35-79])|(40)|(5[0457-9])|(6[05-9])|(7[04-9])|(8[35-8])|(9[\\d]))[\\d]{4}))",
"mobile": "(((1[16]1)|(21[89])|(6[\\d]{3})|(8(1[01])|(7[23]))[\\d]{4}))",
},
"PE": {
"code": "51",
"pattern": "((((4[34])|(5[14])[0-8][\\d])|(7(173)|(3[0-8][\\d]))|(8(10[05689])|(6(0[06-9])|(1[6-9])|(29))|(7(0[569])|([56]0)))[\\d]{4})|((1[0-8])|(4[12])|(5[236])|(6[1-7])|(7[246])|(8[2-4])[\\d]{6}))",
"mobile": "((9[\\d]{8}))",
},
"PF": {
"code": "689",
"pattern": "((4(0[4-689])|(9[4-68])[\\d]{5}))",
"mobile": "((8[7-9][\\d]{6}))",
},
"PG": {
"code": "675",
"pattern": "((((3[0-2])|(4[257])|(5[34])|(9[78])[\\d])|(64[1-9])|(85[02-46-9])[\\d]{4}))",
"mobile": "(((7[\\d])|(8[18])[\\d]{6}))",
},
"PH": {
"code": "63",
"pattern": "((((2[3-8])|(3[2-68])|(4[2-9])|(5[2-6])|(6[2-58])|(7[24578])[\\d]{3})|(88(22[\\d][\\d])|(42))[\\d]{4})|((2)|(8[2-8][\\d][\\d])[\\d]{5}))",
"mobile": "(((8(1[37])|(9[5-8]))|(9(0[5-9])|(1[0-24-9])|([235-7][\\d])|(4[2-9])|(8[135-9])|(9[1-9]))[\\d]{7}))",
},
"PK": {
"code": "92",
"pattern": "((((21)|(42)[2-9])|(58[126])[\\d]{7})|((2[25])|(4[0146-9])|(5[1-35-7])|(6[1-8])|(7[14])|(8[16])|(91)[2-9][\\d]{6:7})|((2(3[2358])|(4[2-4])|(9[2-8]))|(45[3479])|(54[2-467])|(60[468])|(72[236])|(8(2[2-689])|(3[23578])|(4[3478])|(5[2356]))|(9(2[2-8])|(3[27-9])|(4[2-6])|(6[3569])|(9[25-8]))[2-9][\\d]{5:6}))",
"mobile": "((3([0-24][\\d])|(3[0-7])|(55)|(64)[\\d]{7}))",
},
"PL": {
"code": "48",
"pattern": "((47[\\d]{7})|((1[2-8])|(2[2-69])|(3[2-4])|(4[1-468])|(5[24-689])|(6[1-3578])|(7[14-7])|(8[1-79])|(9[145])([02-9][\\d]{6})|(1([0-8][\\d]{5})|(9[\\d]{3}([\\d]{2})?))))",
"mobile": "((21(1([145][\\d])|(3[1-5]))|(2[0-4][\\d])[\\d]{4})|((45)|(5[0137])|(6[069])|(7[2389])|(88)[\\d]{7}))",
},
"PM": {
"code": "508",
"pattern": "(((4[1-3])|(50)[\\d]{4}))",
"mobile": "(((4[02-4])|(5[056])[\\d]{4}))",
},
"PR": {
"code": "1",
"pattern": "(((787)|(939)[2-9][\\d]{6}))",
"mobile": "(((787)|(939)[2-9][\\d]{6}))",
},
"PS": {
"code": "970",
"pattern": "(((22[2-47-9])|(42[45])|(82[014-68])|(92[3569])[\\d]{5}))",
"mobile": "((5[69][\\d]{7}))",
},
"PT": {
"code": "351",
"pattern": "((2([12][\\d])|([35][1-689])|(4[1-59])|(6[1-35689])|(7[1-9])|(8[1-69])|(9[1256])[\\d]{6}))",
"mobile": "((6[0356]92(30)|(9[\\d])[\\d]{3})|(((16)|(6[0356])93)|(9([1-36][\\d][\\d])|(480))[\\d]{5}))",
},
"PW": {
"code": "680",
"pattern": "(((2(55)|(77))|(345)|(488)|(5(35)|(44)|(87))|(6(22)|(54)|(79))|(7(33)|(47))|(8(24)|(55)|(76))|(900)[\\d]{4}))",
"mobile": "((((46)|(83)[0-5])|(6[2-4689]0)[\\d]{4})|((45)|(77)|(88)[\\d]{5}))",
},
"PY": {
"code": "595",
"pattern": "((([26]1)|(3[289])|(4[1246-8])|(7[1-3])|(8[1-36])[\\d]{5:7})|((2(2[4-68])|([4-68][\\d])|(7[15])|(9[1-5]))|(3(18)|(3[167])|(4[2357])|(51)|([67][\\d]))|(4(3[12])|(5[13])|(9[1-47]))|(5([1-4][\\d])|(5[02-4]))|(6(3[1-3])|(44)|(7[1-8]))|(7(4[0-4])|(5[\\d])|(6[1-578])|(75)|(8[0-8]))|(858)[\\d]{5:6}))",
"mobile": "((9(51)|(6[129])|([78][1-6])|(9[1-5])[\\d]{6}))",
},
"QA": {
"code": "974",
"pattern": "((4141[\\d]{4})|((23)|(4[04])[\\d]{6}))",
"mobile": "(((2[89])|([35-7][\\d])[\\d]{6}))",
},
"RE": {
"code": "262",
"pattern": "((26(2[\\d][\\d])|(30[0-5])[\\d]{4}))",
"mobile": "(((69(2[\\d][\\d])|(3(0[0-46])|(1[013])|(2[0-2])|(3[0-39])|(4[\\d])|(5[0-5])|(6[0-6])|(7[0-27])|(8[0-8])|(9[0-479])))|(9769[\\d])[\\d]{4}))",
},
"RO": {
"code": "40",
"pattern": "(([23][13-6][\\d]{7})|((2(19[\\d])|([3-6][\\d]9))|(31[\\d][\\d])[\\d][\\d]))",
"mobile": "((7020[\\d]{5})|(7(0[013-9])|(1[0-3])|([2-7][\\d])|(8[03-8])|(9[019])[\\d]{6}))",
},
"RS": {
"code": "381",
"pattern": "(((11[1-9][\\d])|((2[389])|(39)(0[2-9])|([2-9][\\d]))[\\d]{3:8})|((1[02-9])|(2[0-24-7])|(3[0-8])[2-9][\\d]{4:9}))",
"mobile": "((6([0-689])|(7[\\d])[\\d]{6:7}))",
},
"RU": {
"code": "7",
"pattern": "(((3(0[12])|(4[1-35-79])|(5[1-3])|(65)|(8[1-58])|(9[0145]))|(4(01)|(1[1356])|(2[13467])|(7[1-5])|(8[1-7])|(9[1-689]))|(8(1[1-8])|(2[01])|(3[13-6])|(4[0-8])|(5[15])|(6[1-35-79])|(7[1-37-9]))[\\d]{7}))",
"mobile": "((9[\\d]{9}))",
},
"RW": {
"code": "250",
"pattern": "(((06)|(2[23568][\\d])[\\d]{6}))",
"mobile": "((7[2389][\\d]{7}))",
},
"SA": {
"code": "966",
"pattern": "((1(1[\\d])|(2[24-8])|(3[35-8])|(4[3-68])|(6[2-5])|(7[235-7])[\\d]{6}))",
"mobile": "((579[01][\\d]{5})|(5([013-689][\\d])|(7[0-35-8])[\\d]{6}))",
},
"SB": {
"code": "677",
"pattern": "(((1[4-79])|([23][\\d])|(4[0-2])|(5[03])|(6[0-37])[\\d]{3}))",
"mobile": "((48[\\d]{3})|(((7[1-9])|(8[4-9])[\\d])|(9(1[2-9])|(2[013-9])|(3[0-2])|([46][\\d])|(5[0-46-9])|(7[0-689])|(8[0-79])|(9[0-8]))[\\d]{4}))",
},
"SC": {
"code": "248",
"pattern": "((4[2-46][\\d]{5}))",
"mobile": "((2[125-8][\\d]{5}))",
},
"SD": {
"code": "249",
"pattern": "((1(5[\\d])|(8[35-7])[\\d]{6}))",
"mobile": "(((1[0-2])|(9[0-3569])[\\d]{7}))",
},
"SE": {
"code": "46",
"pattern": "(((([12][136])|(3[356])|(4[0246])|(6[03])|(8[\\d])[\\d])|(90[1-9])[\\d]{4:6})|((1(2[0-35])|(4[0-4])|(5[0-25-9])|(7[13-6])|([89][\\d]))|(2(2[0-7])|(4[0136-8])|(5[0138])|(7[018])|(8[01])|(9[0-57]))|(3(0[0-4])|(1[\\d])|(2[0-25])|(4[056])|(7[0-2])|(8[0-3])|(9[023]))|(4(1[013-8])|(3[0135])|(5[14-79])|(7[0-246-9])|(8[0156])|(9[0-689]))|(5(0[0-6])|([15][0-5])|(2[0-68])|(3[0-4])|(4[\\d])|(6[03-5])|(7[013])|(8[0-79])|(9[01]))|(6(1[1-3])|(2[0-4])|(4[02-57])|(5[0-37])|(6[0-3])|(7[0-2])|(8[0247])|(9[0-356]))|(9(1[0-68])|(2[\\d])|(3[02-5])|(4[0-3])|(5[0-4])|([68][01])|(7[0135-8]))[\\d]{5:6}))",
"mobile": "((7[02369][\\d]{7}))",
},
"SG": {
"code": "65",
"pattern": "((662[0-24-9][\\d]{4})|(6([0-578][\\d])|(6[013-57-9])|(9[0-35-9])[\\d]{5}))",
"mobile": "((8(051)|(95[0-2])[\\d]{4})|((8(0[1-4])|([1-8][\\d])|(9[0-4]))|(9[0-8][\\d])[\\d]{5}))",
},
"SH": {
"code": "290",
"pattern": "((2([0-57-9][\\d])|(6[4-9])[\\d][\\d]))",
"mobile": "(([56][\\d]{4}))",
},
"SI": {
"code": "386",
"pattern": "((([1-357][2-8])|(4[24-8])[\\d]{6}))",
"mobile": "((65(1[\\d])|(55)|([67]0)[\\d]{4})|(([37][01])|(4[0139])|(51)|(6[489])[\\d]{6}))",
},
"SJ": {
"code": "47",
"pattern": "((79[\\d]{6}))",
"mobile": "(((4[015-8])|(59)|(9[\\d])[\\d]{6}))",
},
"SK": {
"code": "421",
"pattern": "(((2(16)|([2-9][\\d]{3}))|((([3-5][1-8][\\d])|(819)[\\d])|(601[1-5])[\\d])[\\d]{4})|((2)|([3-5][1-8])1[67][\\d]{3})|([3-5][1-8]16[\\d][\\d]))",
"mobile": "((909[1-9][\\d]{5})|(9(0[1-8])|(1[0-24-9])|(4[03-57-9])|(5[\\d])[\\d]{6}))",
},
"SL": {
"code": "232",
"pattern": "((22[2-4][2-9][\\d]{4}))",
"mobile": "(((25)|(3[0-5])|(66)|(7[2-9])|(8[08])|(9[09])[\\d]{6}))",
},
"SM": {
"code": "378",
"pattern": "((0549(8[0157-9])|(9[\\d])[\\d]{4}))",
"mobile": "((6[16][\\d]{6}))",
},
"SN": {
"code": "221",
"pattern": "((3(0(1[0-2])|(80))|(282)|(3(8[1-9])|(9[3-9]))|(611)[\\d]{5}))",
"mobile": "((75(01)|([38]3)[\\d]{5})|(7([06-8][\\d])|(21)|(5[4-7])|(90)[\\d]{6}))",
},
"SO": {
"code": "252",
"pattern": "(((1[\\d])|(2[0-79])|(3[0-46-8])|(4[0-7])|(5[57-9])[\\d]{5})|(([134][\\d])|(8[125])[\\d]{4}))",
"mobile": "((((15)|((3[59])|(4[89])|(79)|(8[08])[\\d])|(6(0[5-7])|([1-9][\\d]))|(9(0[\\d])|([2-9]))[\\d])|(2(4[\\d])|(8))[\\d]{5})|((6[\\d])|(7[1-9])[\\d]{6}))",
},
"SR": {
"code": "597",
"pattern": "(((2[1-3])|(3[0-7])|((4)|(68)[\\d])|(5[2-58])[\\d]{4}))",
"mobile": "(((7[124-7])|(8[124-9])[\\d]{5}))",
},
"SS": {
"code": "211",
"pattern": "((1[89][\\d]{7}))",
"mobile": "(((12)|(9[1257-9])[\\d]{7}))",
},
"ST": {
"code": "239",
"pattern": "((22[\\d]{5}))",
"mobile": "((900[5-9][\\d]{3})|(9(0[1-9])|([89][\\d])[\\d]{4}))",
},
"SV": {
"code": "503",
"pattern": "((2([1-6][\\d]{3})|([79]90[034])|(890[0245])[\\d]{3}))",
"mobile": "((66([02-9][\\d][\\d])|(1([02-9][\\d])|(16))[\\d]{3})|((6[0-57-9])|(7[\\d])[\\d]{6}))",
},
"SX": {
"code": "1",
"pattern": "((7215(4[2-8])|(8[239])|(9[056])[\\d]{4}))",
"mobile": "((7215(1[02])|(2[\\d])|(5[034679])|(8[014-8])[\\d]{4}))",
},
"SY": {
"code": "963",
"pattern": "((21[\\d]{6:7})|((1([14][\\d])|([2356]))|(2[235])|(3([13][\\d])|(4))|(4[134])|(5[1-3])[\\d]{6}))",
"mobile": "((9(22)|([3-689][\\d])[\\d]{6}))",
},
"SZ": {
"code": "268",
"pattern": "(([23][2-5][\\d]{6}))",
"mobile": "((7[6-9][\\d]{6}))",
},
"TA": {"code": "290", "pattern": "((8[\\d]{3}))"},
"TC": {
"code": "1",
"pattern": "((649(266)|(712)|(9(4[\\d])|(50))[\\d]{4}))",
"mobile": "((649(2(3[129])|(4[1-79]))|(3[\\d][\\d])|(4[34][1-3])[\\d]{4}))",
},
"TD": {
"code": "235",
"pattern": "((22([37-9]0)|(5[0-5])|(6[89])[\\d]{4}))",
"mobile": "(((6[023568])|(77)|(9[\\d])[\\d]{6}))",
},
"TG": {
"code": "228",
"pattern": "((2(2[2-7])|(3[23])|(4[45])|(55)|(6[67])|(77)[\\d]{5}))",
"mobile": "(((7[09])|(9[0-36-9])[\\d]{6}))",
},
"TH": {
"code": "66",
"pattern": "(((1[0689])|(2[\\d])|(3[2-9])|(4[2-5])|(5[2-6])|(7[3-7])[\\d]{6}))",
"mobile": "((671[0-8][\\d]{5})|((14)|(6[1-6])|([89][\\d])[\\d]{7}))",
},
"TJ": {
"code": "992",
"pattern": "(((3(1[3-5])|(2[245])|(3[12])|(4[24-7])|(5[25])|(72))|(4(46)|(74)|(87))[\\d]{6}))",
"mobile": "((41[18][\\d]{6})|(([034]0)|([17][017])|(2[02])|(5[05])|(8[08])|(9[\\d])[\\d]{7}))",
},
"TK": {
"code": "690",
"pattern": "(((2[2-4])|([34][\\d])[\\d]{2:5}))",
"mobile": "((7[2-4][\\d]{2:5}))",
},
"TL": {
"code": "670",
"pattern": "(((2[1-5])|(3[1-9])|(4[1-4])[\\d]{5}))",
"mobile": "((7[2-8][\\d]{6}))",
},
"TM": {
"code": "993",
"pattern": "(((1(2[\\d])|(3[1-9]))|(2(22)|(4[0-35-8]))|(3(22)|(4[03-9]))|(4(22)|(3[128])|(4[\\d])|(6[15]))|(5(22)|(5[7-9])|(6[014-689]))[\\d]{5}))",
"mobile": "((6[\\d]{7}))",
},
"TN": {
"code": "216",
"pattern": "((81200[\\d]{3})|((3[0-2])|(7[\\d])[\\d]{6}))",
"mobile": "((3(001)|([12]40)[\\d]{4})|((([259][\\d])|(4[0-7])[\\d])|(3(1[1-35])|(6[0-4])|(91))[\\d]{5}))",
},
"TO": {
"code": "676",
"pattern": "(((2[\\d])|(3[0-8])|(4[0-4])|(50)|(6[09])|(7[0-24-69])|(8[05])[\\d]{3}))",
"mobile": "(((55[4-6])|(6([09][\\d])|(3[02])|(8[15-9]))|((7[\\d])|(8[46-9])[\\d])|(999)[\\d]{4}))",
},
"TR": {
"code": "90",
"pattern": "(((2([13][26])|([28][2468])|([45][268])|([67][246]))|(3([13][28])|([24-6][2468])|([78][02468])|(92))|(4([16][246])|([23578][2468])|(4[26]))[\\d]{7}))",
"mobile": "((56161[\\d]{5})|(5(0[15-7])|(1[06])|(24)|([34][\\d])|(5[1-59])|(9[46])[\\d]{7}))",
},
"TT": {
"code": "1",
"pattern": "((868(2(0[13])|(1[89])|([23][\\d])|(4[0-2]))|(6(0[7-9])|(1[02-8])|(2[1-9])|([3-69][\\d])|(7[0-79]))|(82[124])[\\d]{4}))",
"mobile": "((868((2[5-9])|(3[\\d])[\\d])|(4(3[0-6])|([6-9][\\d]))|(6(20)|(78)|(8[\\d]))|(7(0[1-9])|(1[02-9])|([2-9][\\d]))[\\d]{4}))",
},
"TV": {
"code": "688",
"pattern": "((2[02-9][\\d]{3}))",
"mobile": "(((7[01][\\d])|(90)[\\d]{4}))",
},
"TW": {
"code": "886",
"pattern": "(((2[2-8][\\d])|(370)|(55[01])|(7[1-9])[\\d]{6})|(4((0(0[1-9])|([2-48][\\d]))|(1[023][\\d])[\\d]{4:5})|(([239][\\d][\\d])|(4(0[56])|(12)|(49))[\\d]{5}))|(6([01][\\d]{7})|(4(0[56])|(12)|(24)|(4[09])[\\d]{4:5}))|(8((2(3[\\d])|(4[0-269])|([578]0)|(66))|(36[24-9])|(90[\\d][\\d])[\\d]{4})|(4(0[56])|(12)|(24)|(4[09])[\\d]{4:5}))|((2(2(0[\\d][\\d])|(4(0[68])|([249]0)|(3[0-467])|(5[0-25-9])|(6[0235689])))|((3([09][\\d])|(1[0-4]))|((4[\\d])|(5[0-49])|(6[0-29])|(7[0-5])[\\d])[\\d]))|(((3[2-9])|(5[2-8])|(6[0-35-79])|(8[7-9])[\\d][\\d])|(4(2([089][\\d])|(7[1-9]))|((3[0-4])|([78][\\d])|(9[01])[\\d]))[\\d])[\\d]{3}))",
"mobile": "(((40001[0-2])|(9[0-8][\\d]{4})[\\d]{3}))",
},
"TZ": {
"code": "255",
"pattern": "((2[2-8][\\d]{7}))",
"mobile": "((77[2-9][\\d]{6})|((6[1-9])|(7[1-689])[\\d]{7}))",
},
"UA": {
"code": "380",
"pattern": "(((3[1-8])|(4[13-8])|(5[1-7])|(6[12459])[\\d]{7}))",
"mobile": "(((50)|(6[36-8])|(7[1-3])|(9[1-9])[\\d]{7}))",
},
"UG": {
"code": "256",
"pattern": "((20(((24)|(81)0)|(30[67])[\\d])|(6(00[0-2])|(30[0-4]))[\\d]{3})|((20([0147][\\d])|(2[5-9])|(32)|(5[0-4])|(6[15-9]))|([34][\\d]{3})[\\d]{5}))",
"mobile": "((726[01][\\d]{5})|(7([0157-9][\\d])|(20)|(36)|([46][0-4])[\\d]{6}))",
},
"US": {
"code": "1",
"pattern": "((5(05([2-57-9][\\d][\\d])|(6([0-35-9][\\d])|(44)))|(82(2(0[0-3])|([268]2))|(3(0[02])|(22)|(33))|(4(00)|(4[24])|(65)|(82))|(5(00)|(29)|(58)|(83))|(6(00)|(66)|(82))|(7(58)|(77))|(8(00)|(42)|(88))|(9(00)|(9[89])))[\\d]{4})|((2(0[1-35-9])|(1[02-9])|(2[03-589])|(3[149])|(4[08])|(5[1-46])|(6[0279])|(7[0269])|(8[13]))|(3(0[1-57-9])|(1[02-9])|(2[01356])|(3[0-24679])|(4[167])|(5[12])|(6[014])|(8[056]))|(4(0[124-9])|(1[02-579])|(2[3-5])|(3[0245])|(4[023578])|(58)|(6[349])|(7[0589])|(8[04]))|(5(0[1-47-9])|(1[0235-8])|(20)|(3[0149])|(4[01])|(5[19])|(6[1-47])|(7[0-5])|(8[056]))|(6(0[1-35-9])|(1[024-9])|(2[03689])|([34][016])|(5[0179])|(6[0-279])|(78)|(8[0-29]))|(7(0[1-46-8])|(1[2-9])|(2[04-7])|(3[1247])|(4[037])|(5[47])|(6[02359])|(7[0-59])|(8[156]))|(8(0[1-68])|(1[02-8])|(2[08])|(3[0-289])|(4[03578])|(5[046-9])|(6[02-5])|(7[028]))|(9(0[1346-9])|(1[02-9])|(2[0589])|(3[0146-8])|(4[01579])|(5[12469])|(7[0-389])|(8[04-69]))[2-9][\\d]{6}))",
"mobile": "((5(05([2-57-9][\\d][\\d])|(6([0-35-9][\\d])|(44)))|(82(2(0[0-3])|([268]2))|(3(0[02])|(22)|(33))|(4(00)|(4[24])|(65)|(82))|(5(00)|(29)|(58)|(83))|(6(00)|(66)|(82))|(7(58)|(77))|(8(00)|(42)|(88))|(9(00)|(9[89])))[\\d]{4})|((2(0[1-35-9])|(1[02-9])|(2[03-589])|(3[149])|(4[08])|(5[1-46])|(6[0279])|(7[0269])|(8[13]))|(3(0[1-57-9])|(1[02-9])|(2[01356])|(3[0-24679])|(4[167])|(5[12])|(6[014])|(8[056]))|(4(0[124-9])|(1[02-579])|(2[3-5])|(3[0245])|(4[023578])|(58)|(6[349])|(7[0589])|(8[04]))|(5(0[1-47-9])|(1[0235-8])|(20)|(3[0149])|(4[01])|(5[19])|(6[1-47])|(7[0-5])|(8[056]))|(6(0[1-35-9])|(1[024-9])|(2[03689])|([34][016])|(5[0179])|(6[0-279])|(78)|(8[0-29]))|(7(0[1-46-8])|(1[2-9])|(2[04-7])|(3[1247])|(4[037])|(5[47])|(6[02359])|(7[0-59])|(8[156]))|(8(0[1-68])|(1[02-8])|(2[08])|(3[0-289])|(4[03578])|(5[046-9])|(6[02-5])|(7[028]))|(9(0[1346-9])|(1[02-9])|(2[0589])|(3[0146-8])|(4[01579])|(5[12469])|(7[0-389])|(8[04-69]))[2-9][\\d]{6}))",
},
"UY": {
"code": "598",
"pattern": "(((1(770)|(987))|((2[\\d])|(4[2-7])[\\d][\\d])[\\d]{4}))",
"mobile": "((9[1-9][\\d]{6}))",
},
"UZ": {
"code": "998",
"pattern": "(((6(1(22)|(3[124])|(4[1-4])|(5[1-3578])|(64))|(2(22)|(3[0-57-9])|(41))|(5(22)|(3[3-7])|(5[024-8]))|(6[\\d][\\d])|(7([23][\\d])|(7[69]))|(9(22)|(4[1-8])|(6[135])))|(7(0(5[4-9])|(6[0146])|(7[124-6])|(9[135-8]))|((1[12])|(8[\\d])[\\d])|(2(22)|(3[13-57-9])|(4[1-3579])|(5[14]))|(3(2[\\d])|(3[1578])|(4[1-35-7])|(5[1-57])|(61))|(4(2[\\d])|(3[1-579])|(7[1-79]))|(5(22)|(5[1-9])|(6[1457]))|(6(22)|(3[12457])|(4[13-8]))|(9(22)|(5[1-9])))[\\d]{5}))",
"mobile": "((((33)|(88)|(9[0-57-9])[\\d]{3})|(55(50[013])|(90[\\d]))|(6(1(2(2[01])|(98))|(35[0-4])|(50[\\d])|(61[23])|(7([01][017])|(4[\\d])|(55)|(9[5-9])))|(2((11)|(7[\\d])[\\d])|(2([12]1)|(9[01379]))|(5([126][\\d])|(3[0-4])))|(5(19[01])|(2(27)|(9[26]))|((30)|(59)|(7[\\d])[\\d]))|(6(2(1[5-9])|(2[0367])|(38)|(41)|(52)|(60))|((3[79])|(9[0-3])[\\d])|(4(56)|(83))|(7([07][\\d])|(1[017])|(3[07])|(4[047])|(5[057])|(67)|(8[0178])|(9[79])))|(7(2(24)|(3[237])|(4[5-9])|(7[15-8]))|(5(7[12])|(8[0589]))|(7(0[\\d])|([39][07]))|(9(0[\\d])|(7[079])))|(9(2(1[1267])|(3[01])|(5[\\d])|(7[0-4]))|((5[67])|(7[\\d])[\\d])|(6(2[0-26])|(8[\\d]))))|(7([07][\\d]{3})|(1(13[01])|(6(0[47])|(1[67])|(66))|(71[3-69])|(98[\\d]))|(2(2(2[79])|(95))|(3(2[5-9])|(6[0-6]))|(57[\\d])|(7(0[\\d])|(1[17])|(2[27])|(3[37])|(44)|(5[057])|(66)|(88)))|(3(2(1[0-6])|(21)|(3[469])|(7[159]))|((33)|(9[4-6])[\\d])|(5(0[0-4])|(5[579])|(9[\\d]))|(7([0-3579][\\d])|(4[0467])|(6[67])|(8[078])))|(4(2(29)|(5[0257])|(6[0-7])|(7[1-57]))|(5(1[0-4])|(8[\\d])|(9[5-9]))|(7(0[\\d])|(1[024589])|(2[0-27])|(3[0137])|([46][07])|(5[01])|(7[5-9])|(9[079]))|(9(7[015-9])|([89][\\d])))|(5(112)|(2(0[\\d])|(2[29])|([49]4))|(3[1568][\\d])|(52[6-9])|(7(0[01578])|(1[017])|([23]7)|(4[047])|([5-7][\\d])|(8[78])|(9[079])))|(6(2(2[1245])|(4[2-4]))|(39[\\d])|(41[179])|(5([349][\\d])|(5[0-2]))|(7(0[017])|([13][\\d])|(22)|(44)|(55)|(67)|(88)))|(9(22[128])|(3(2[0-4])|(7[\\d]))|(57[02569])|(7(2[05-9])|(3[37])|(4[\\d])|(60)|(7[2579])|(87)|(9[07]))))[\\d]{4}))",
},
"VA": {
"code": "39",
"pattern": "((06698[\\d]{1:6}))",
"mobile": "((3[1-9][\\d]{8})|(3[2-9][\\d]{7}))",
},
"VC": {
"code": "1",
"pattern": "((784(266)|(3(6[6-9])|(7[\\d])|(8[0-6]))|(4(38)|(5[0-36-8])|(8[0-8]))|(5(55)|(7[0-2])|(93))|(638)|(784)[\\d]{4}))",
"mobile": "((784(4(3[0-5])|(5[45])|(89)|(9[0-8]))|(5(2[6-9])|(3[0-4]))|(720)[\\d]{4}))",
},
"VE": {
"code": "58",
"pattern": "(((2(12)|(3[457-9])|([467][\\d])|([58][1-9])|(9[1-6]))|([4-6]00)[\\d]{7}))",
"mobile": "((4(1[24-8])|(2[46])[\\d]{7}))",
},
"VG": {
"code": "1",
"pattern": "((284496[0-5][\\d]{3})|(284(229)|(4(22)|(9[45]))|(774)|(8(52)|(6[459]))[\\d]{4}))",
"mobile": "((284496[6-9][\\d]{3})|(284(245)|(3(0[0-3])|(4[0-7])|(68)|(9[34]))|(4(4[0-6])|(68)|(99))|(5(4[0-7])|(68)|(9[69]))[\\d]{4}))",
},
"VI": {
"code": "1",
"pattern": "((340(2(0[0-38])|(2[06-8])|(4[49])|(77))|(3(32)|(44))|(4(2[23])|(44)|(7[34])|(89))|(5(1[34])|(55))|(6(2[56])|(4[23])|(77)|(9[023]))|(7(1[2-57-9])|(2[57])|(7[\\d]))|(884)|(998)[\\d]{4}))",
"mobile": "((340(2(0[0-38])|(2[06-8])|(4[49])|(77))|(3(32)|(44))|(4(2[23])|(44)|(7[34])|(89))|(5(1[34])|(55))|(6(2[56])|(4[23])|(77)|(9[023]))|(7(1[2-57-9])|(2[57])|(7[\\d]))|(884)|(998)[\\d]{4}))",
},
"VN": {
"code": "84",
"pattern": "((2(0[3-9])|(1[0-689])|(2[0-25-9])|(3[2-9])|(4[2-8])|(5[124-9])|(6[0-39])|(7[0-7])|(8[2-79])|(9[0-4679])[\\d]{7}))",
"mobile": "(((5(2[238])|(59))|(89[689])|(99[013-9])[\\d]{6})|((3[\\d])|(5[689])|(7[06-9])|(8[1-8])|(9[0-8])[\\d]{7}))",
},
"VU": {
"code": "678",
"pattern": "(((38[0-8])|(48[4-9])[\\d][\\d])|((2[02-9])|(3[4-7])|(88)[\\d]{3}))",
"mobile": "((([58][\\d])|(7[013-7])[\\d]{5}))",
},
"WF": {
"code": "681",
"pattern": "((72[\\d]{4}))",
"mobile": "(((72)|(8[23])[\\d]{4}))",
},
"WS": {
"code": "685",
"pattern": "((6[1-9][\\d]{3})|(([2-5])|(60)[\\d]{4}))",
"mobile": "(((7[1-35-7])|(8([3-7])|(9[\\d]{3}))[\\d]{5}))",
},
"XK": {
"code": "383",
"pattern": "(((2[89])|(39)0[\\d]{6})|([23][89][\\d]{6}))",
"mobile": "((4[3-9][\\d]{6}))",
},
"YE": {
"code": "967",
"pattern": "((78[0-7][\\d]{4})|(17[\\d]{6})|(([12][2-68])|(3[2358])|(4[2-58])|(5[2-6])|(6[3-58])|(7[24-6])[\\d]{5}))",
"mobile": "((7[0137][\\d]{7}))",
},
"YT": {
"code": "262",
"pattern": "((269(0[67])|(5[0-3])|(6[\\d])|([78]0)[\\d]{4}))",
"mobile": "((639(0[0-79])|(1[019])|([267][\\d])|(3[09])|(40)|(5[05-9])|(9[04-79])[\\d]{4}))",
},
"ZA": {
"code": "27",
"pattern": "(((2(0330)|(4302))|(52087)0[\\d]{3})|((1[0-8])|(2[1-378])|(3[1-69])|(4[\\d])|(5[1346-8])[\\d]{7}))",
"mobile": "(((1(3492[0-25])|(4495[0235])|(549(20)|(5[01])))|(4[34]492[01])[\\d]{3})|(8[1-4][\\d]{3:7})|((2[27])|(47)|(54)4950[\\d]{3})|((1(049[2-4])|(9[12][\\d][\\d]))|((6[\\d])|(7[0-46-9])[\\d]{3})|(8(5[\\d]{3})|(7(08[67])|(158)|(28[5-9])|(310)))[\\d]{4})|((1[6-8])|(28)|(3[2-69])|(4[025689])|(5[36-8])4920[\\d]{3})|((12)|([2-5]1)492[\\d]{4}))",
},
"ZM": {
"code": "260",
"pattern": "((21[1-8][\\d]{6}))",
"mobile": "(((7[679])|(9[5-8])[\\d]{7}))",
},
"ZW": {
"code": "263",
"pattern": "(((1((3[\\d])|(9)[\\d])|([4-8]))|(2(((0(2[014])|(5))|((2[0157])|(31)|(84)|(9)[\\d][\\d])|([56]([14][\\d][\\d])|(20))|(7([089])|(2[03])|([35][\\d][\\d]))[\\d])|(4(2[\\d][\\d])|(8))[\\d])|(1(2)|([39][\\d]{4})))|(3((123)|((29[\\d])|(92)[\\d])[\\d][\\d])|(7([19])|([56][\\d])))|(5(0)|(1[2-478])|(26)|([37]2)|(4(2[\\d]{3})|(83))|(5(25[\\d][\\d])|([78]))|([689][\\d]))|(6(([16-8]21)|(28)|(52[013])[\\d][\\d])|([39]))|(8([1349]28)|(523)[\\d][\\d])[\\d]{3})|((4[\\d][\\d])|(9[2-9])[\\d]{4:5})|(((2(((0)|(8[146])[\\d])|(7[1-7])[\\d])|(2([278][\\d])|(92))|(58(2[\\d])|(3)))|(3([26])|(9[\\d]{3}))|(5(4[\\d])|(5)[\\d][\\d])[\\d])|(6((([0-246])|([78][\\d])[\\d])|(37)[\\d])|(5[2-8]))[\\d][\\d])|((2([569][\\d])|(8[2-57-9]))|(3([013-59][\\d])|(8[37]))|(6[89]8)[\\d]{3}))",
"mobile": "((7([178][\\d])|(3[1-9])[\\d]{6}))",
},
},
}
| 71.280687
| 1,767
| 0.316342
| 16,414
| 87,105
| 1.67875
| 0.06464
| 0.020033
| 0.021194
| 0.002323
| 0.221121
| 0.165124
| 0.152241
| 0.148394
| 0.148394
| 0.147233
| 0
| 0.291839
| 0.169887
| 87,105
| 1,221
| 1,768
| 71.339066
| 0.089244
| 0.001447
| 0
| 0.058484
| 1
| 0.268534
| 0.762254
| 0.687114
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
578e59330093b826b7d82330f59b1553ce7bc3f3
| 4,065
|
py
|
Python
|
tests/app/migrations/0001_initial.py
|
marteinn/wagtail-meta-preview
|
c07a1bace9b9f18dc4c4ef5429eb9b94fdc4eb0b
|
[
"MIT"
] | 22
|
2020-08-02T07:37:48.000Z
|
2022-03-23T08:10:42.000Z
|
tests/app/migrations/0001_initial.py
|
marteinn/wagtail-meta-preview
|
c07a1bace9b9f18dc4c4ef5429eb9b94fdc4eb0b
|
[
"MIT"
] | 2
|
2020-08-02T19:27:12.000Z
|
2020-08-16T10:02:59.000Z
|
tests/app/migrations/0001_initial.py
|
marteinn/wagtail-meta-preview
|
c07a1bace9b9f18dc4c4ef5429eb9b94fdc4eb0b
|
[
"MIT"
] | 2
|
2020-08-02T18:55:44.000Z
|
2020-08-03T12:01:54.000Z
|
# Generated by Django 3.0.8 on 2020-08-02 16:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailimages', '0022_uploadedimage'),
('wagtailcore', '0045_assign_unlock_grouppagepermission'),
]
operations = [
migrations.CreateModel(
name='TwitterPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('twitter_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Twitter title')),
('twitter_description', models.CharField(blank=True, max_length=200, null=True, verbose_name='Twitter description')),
('og_title', models.CharField(blank=True, max_length=100)),
('og_description', models.CharField(blank=True, max_length=100)),
('another_title', models.CharField(blank=True, max_length=100)),
('another_description', models.CharField(blank=True, max_length=100)),
('og_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Og image')),
('twitter_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Twitter image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MetaPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('twitter_title', models.CharField(blank=True, max_length=70, null=True, verbose_name='Twitter title')),
('twitter_description', models.CharField(blank=True, max_length=200, null=True, verbose_name='Twitter description')),
('og_title', models.CharField(blank=True, max_length=95, null=True, verbose_name='Facebook title')),
('og_description', models.CharField(blank=True, max_length=250, null=True, verbose_name='Facebook description')),
('og_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Facebook image')),
('twitter_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Twitter image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='FacebookPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('og_title', models.CharField(blank=True, max_length=95, null=True, verbose_name='Facebook title')),
('og_description', models.CharField(blank=True, max_length=250, null=True, verbose_name='Facebook description')),
('another_title', models.CharField(blank=True, max_length=100)),
('another_description', models.CharField(blank=True, max_length=100)),
('og_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='Facebook image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| 60.671642
| 196
| 0.637392
| 446
| 4,065
| 5.630045
| 0.170404
| 0.0681
| 0.111509
| 0.133811
| 0.882119
| 0.882119
| 0.882119
| 0.866189
| 0.84548
| 0.84548
| 0
| 0.019243
| 0.220172
| 4,065
| 66
| 197
| 61.590909
| 0.772871
| 0.01107
| 0
| 0.677966
| 1
| 0
| 0.197362
| 0.009457
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033898
| 0
| 0.101695
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17a7f53b70c142f50e5c21f873cd59472f6b8e56
| 126
|
py
|
Python
|
__init__.py
|
ValdrST/vrtg
|
1315cde2d8978da0f86e81bf1f25f06a710235a7
|
[
"MIT"
] | null | null | null |
__init__.py
|
ValdrST/vrtg
|
1315cde2d8978da0f86e81bf1f25f06a710235a7
|
[
"MIT"
] | null | null | null |
__init__.py
|
ValdrST/vrtg
|
1315cde2d8978da0f86e81bf1f25f06a710235a7
|
[
"MIT"
] | null | null | null |
from . import tools
from .tools import Graficador
from .tools import Server
from .tools import Console
from .main import main
| 21
| 29
| 0.801587
| 19
| 126
| 5.315789
| 0.368421
| 0.267327
| 0.445545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15873
| 126
| 5
| 30
| 25.2
| 0.95283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
17cae3ad1969e8b424f6f6998a9e7af680967188
| 45
|
py
|
Python
|
copyrightextractor/__init__.py
|
samaybhavsar/copyrightextractor
|
9dec2d475575739689e558a6f5beeaa52ff7c925
|
[
"MIT"
] | 1
|
2020-04-28T01:28:54.000Z
|
2020-04-28T01:28:54.000Z
|
copyrightextractor/__init__.py
|
samaybhavsar/copyrightextractor
|
9dec2d475575739689e558a6f5beeaa52ff7c925
|
[
"MIT"
] | 1
|
2018-03-11T03:42:17.000Z
|
2018-04-16T07:00:25.000Z
|
copyrightextractor/__init__.py
|
samaybhavsar/copyrightextractor
|
9dec2d475575739689e558a6f5beeaa52ff7c925
|
[
"MIT"
] | 1
|
2018-03-11T03:42:22.000Z
|
2018-03-11T03:42:22.000Z
|
from copyrightextractor import htmlextractor
| 22.5
| 44
| 0.911111
| 4
| 45
| 10.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
17ddeb24f47466aacb81a1aeba3a815c3adb4246
| 3,268
|
py
|
Python
|
dev/services/wms/ows_refactored/inland_water/wofs/ows_wofs_cfg.py
|
nf-s/dea-config
|
e0fe5fde8a99ad29472c5d14531ecc9208578040
|
[
"Apache-2.0"
] | 14
|
2018-08-20T00:31:50.000Z
|
2022-02-04T08:13:06.000Z
|
dev/services/wms/ows_refactored/inland_water/wofs/ows_wofs_cfg.py
|
nf-s/dea-config
|
e0fe5fde8a99ad29472c5d14531ecc9208578040
|
[
"Apache-2.0"
] | 145
|
2018-06-04T05:06:14.000Z
|
2022-02-22T23:02:00.000Z
|
dev/services/wms/ows_refactored/inland_water/wofs/ows_wofs_cfg.py
|
nf-s/dea-config
|
e0fe5fde8a99ad29472c5d14531ecc9208578040
|
[
"Apache-2.0"
] | 35
|
2018-06-04T05:04:15.000Z
|
2022-01-25T07:48:07.000Z
|
layers = {
"title": "DEA Water Observations",
"abstract": "Digital Earth Australia (DEA) Water Observations from Space (WOfS)",
"layers": [
{
"include": "ows_refactored.inland_water.wofs.individual.ows_c3_wo_cfg.layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.individual.ows_s2_wo_cfg.layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.multiyear.ows_wofs_summary_cfg.c3_wofs_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.annual.ows_wofs_annual_cfg.c3_statistics_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.seasonal.ows_wofs_apr_oct_cfg.c3_statistics_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.seasonal.ows_wofs_nov_mar_cfg.c3_statistics_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_alber_cfg.layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_annual_cfg.statistics_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_annual_cfg.clear_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_annual_cfg.wet_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_summary_cfg.statistics_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_summary_cfg.clear_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_summary_cfg.wet_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_summary_cfg.freq_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_summary_cfg.filtered_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_apr_oct_cfg.statistics_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_apr_oct_cfg.clear_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_apr_oct_cfg.wet_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_nov_mar_cfg.statistics_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_nov_mar_cfg.clear_layer",
"type": "python",
},
{
"include": "ows_refactored.inland_water.wofs.c2.ows_wofs_nov_mar_cfg.wet_layer",
"type": "python",
}
],
}
| 35.521739
| 108
| 0.556916
| 340
| 3,268
| 4.955882
| 0.114706
| 0.124629
| 0.249258
| 0.324036
| 0.898516
| 0.880712
| 0.880712
| 0.880712
| 0.84451
| 0.814837
| 0
| 0.009276
| 0.307222
| 3,268
| 91
| 109
| 35.912088
| 0.734982
| 0
| 0
| 0.233333
| 0
| 0
| 0.591674
| 0.449648
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
aa0aaf88ff6c65d73442698ef071ddd47fb01dc6
| 3,243
|
py
|
Python
|
aclark/db/migrations/0023_auto_20190705_2318.py
|
aclark4life/aclark-net-1
|
e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea
|
[
"MIT"
] | null | null | null |
aclark/db/migrations/0023_auto_20190705_2318.py
|
aclark4life/aclark-net-1
|
e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea
|
[
"MIT"
] | null | null | null |
aclark/db/migrations/0023_auto_20190705_2318.py
|
aclark4life/aclark-net-1
|
e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.10 on 2019-07-05 23:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("db", "0022_remove_project_user_hours")]
operations = [
migrations.RemoveField(model_name="client", name="icon_color"),
migrations.RemoveField(model_name="client", name="icon_name"),
migrations.RemoveField(model_name="client", name="icon_size"),
migrations.RemoveField(model_name="contact", name="icon_color"),
migrations.RemoveField(model_name="contact", name="icon_name"),
migrations.RemoveField(model_name="contact", name="icon_size"),
migrations.RemoveField(model_name="estimate", name="estimate_type"),
migrations.RemoveField(model_name="estimate", name="icon_color"),
migrations.RemoveField(model_name="estimate", name="icon_name"),
migrations.RemoveField(model_name="estimate", name="icon_size"),
migrations.RemoveField(model_name="invoice", name="icon_color"),
migrations.RemoveField(model_name="invoice", name="icon_name"),
migrations.RemoveField(model_name="invoice", name="icon_size"),
migrations.RemoveField(model_name="note", name="icon_color"),
migrations.RemoveField(model_name="note", name="icon_name"),
migrations.RemoveField(model_name="note", name="icon_size"),
migrations.RemoveField(model_name="profile", name="app_admin"),
migrations.RemoveField(model_name="profile", name="dashboard_items"),
migrations.RemoveField(model_name="profile", name="editor"),
migrations.RemoveField(model_name="profile", name="icon_color"),
migrations.RemoveField(model_name="profile", name="icon_name"),
migrations.RemoveField(model_name="profile", name="icon_size"),
migrations.RemoveField(model_name="profile", name="is_contact"),
migrations.RemoveField(model_name="profile", name="notify"),
migrations.RemoveField(model_name="profile", name="payment_method"),
migrations.RemoveField(model_name="profile", name="preferred_username"),
migrations.RemoveField(model_name="project", name="icon_color"),
migrations.RemoveField(model_name="project", name="icon_name"),
migrations.RemoveField(model_name="project", name="icon_size"),
migrations.RemoveField(model_name="report", name="icon_color"),
migrations.RemoveField(model_name="report", name="icon_name"),
migrations.RemoveField(model_name="report", name="icon_size"),
migrations.RemoveField(model_name="task", name="color"),
migrations.RemoveField(model_name="task", name="icon_color"),
migrations.RemoveField(model_name="task", name="icon_name"),
migrations.RemoveField(model_name="task", name="icon_size"),
migrations.RemoveField(model_name="testimonial", name="icon_color"),
migrations.RemoveField(model_name="testimonial", name="icon_name"),
migrations.RemoveField(model_name="testimonial", name="icon_size"),
migrations.RemoveField(model_name="time", name="icon_color"),
migrations.RemoveField(model_name="time", name="icon_name"),
migrations.RemoveField(model_name="time", name="icon_size"),
]
| 60.055556
| 80
| 0.708603
| 362
| 3,243
| 6.11326
| 0.146409
| 0.398554
| 0.493448
| 0.569363
| 0.893809
| 0.893809
| 0.792137
| 0.090375
| 0.047899
| 0
| 0
| 0.007184
| 0.141536
| 3,243
| 53
| 81
| 61.188679
| 0.787716
| 0.014184
| 0
| 0
| 1
| 0
| 0.222222
| 0.00939
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a4cffcfbc4e685cad628dea6f7715d011f1934a9
| 19,492
|
py
|
Python
|
packagepy/htmlpy.py
|
salimsuprayogi/test_automation_whatsapp
|
8b980fdb0e8e30a9b253742a704f1ecbc3cc4a34
|
[
"MIT"
] | 2
|
2020-10-28T13:33:38.000Z
|
2022-03-27T17:53:01.000Z
|
packagepy/htmlpy.py
|
salimsuprayogi/test_automation_whatsapp
|
8b980fdb0e8e30a9b253742a704f1ecbc3cc4a34
|
[
"MIT"
] | null | null | null |
packagepy/htmlpy.py
|
salimsuprayogi/test_automation_whatsapp
|
8b980fdb0e8e30a9b253742a704f1ecbc3cc4a34
|
[
"MIT"
] | null | null | null |
#!python3
# -*- coding: utf-8 -*-
# automationpy/packagepy/htmlpy
# salim suprayogi
# created 21 Oktober 2020
# create html with dominate
import dominate
from dominate.tags import *
import os
import io
import whatsapp
def nama_tester(tester="Salim Suprayogi"):
# Function name tester
nama_tester = tester
return nama_tester
def htmlpy_one(data_all, line, title_page, browser_name, tester):
# function create file html, each line/row of the csv file
# if data_all["label{}".format(line)]:
print(data_all["label{}".format(line)]["id"])
# html title page
deta = dominate.document(
title="Test Automation {}".format(title_page))
# html head
with deta.head:
meta(charset="UTF-8")
meta_ctn = "width=device-width"
meta_is = "initial-scale="
meta_num = "1.0"
meta_ctn_all = '{} {}{}'.format(
meta_ctn, meta_is, meta_num)
meta(content=meta_ctn_all, name="viewport")
# css file
# link(rel="stylesheet", href="./style.css")
style(
"""\
html {
width: 100%;
}
body {
font-family: "Trebuchet MS", Arial, Helvetica, sans-serif;
font-size: small;
box-sizing: border-box;
}
h1 {
text-align: center;
border: 2px solid whitesmoke;
background-color: #4caf50;
color: white;
padding: 8px;
box-shadow: 0px 0px 1px 0px grey;
border-radius: 1px;
margin-bottom: 15px;
}
.header {
box-shadow: 0px 0px 1px 0px grey;
border-radius: 1px;
border: 1px solid whitesmoke;
/* box-shadow: 0px 0px 1px 0px grey;
border-radius: 1px; */
padding: 8px;
/* padding-bottom: 5px; */
margin-bottom: 15px;
text-align: justify;
}
.profile {
/* box-shadow: 0px 0px 1px 0px grey;
border-radius: 1px;
text-align: left;
border: 1px solid whitesmoke;
background-color: #4caf50;
color: white; */
font-weight: normal;
/* margin: 1px;
padding: 8px; */
}
.header3,
.paragraf {
text-align: justify;
/* border-bottom-style: solid; */
/* border-bottom-color: #83c985; */
/* margin: 2px; */
/* padding: auto; */
margin-top: 15px;
}
h2 {
background-color: #4caf50;
color: white;
padding-top: 10px;
padding-bottom: 10px;
text-align: center;
}
h3,
p {
font-weight: normal;
}
#main {
border-collapse: collapse;
width: 100%;
font-weight: normal;
text-align: justify;
}
.container {
box-shadow: 0px 0px 1px 0px green;
border-radius: 1px;
border: 1px solid whitesmoke;
padding: 5px;
padding-bottom: 20px;
margin-bottom: 15px;
}
#main th {
position: sticky;
top: 0;
}
#main td,
#main th {
border: 1px solid #ddd;
padding: 5px;
}
#main tr:hover {
/*padding-top: 5px;
padding-bottom: 5px;
text-align: left;*/
background-color: #4caf50;
}
#main th {
padding-top: 5px;
padding-bottom: 5px;
text-align: left;
background-color: #4caf50;
color: white;
}
h4 {
text-align: center;
border: 2px solid whitesmoke;
background-color: #ddd;
color: darkslateblue;
padding: 8px;
margin-bottom: 15px;
}
.success {
background-color: #4caf50;
color: white;
text-align: justify;
}
.failed {
background-color: deeppink;
color: white;
text-align: justify;
}
"""
)
with deta:
# html report title
h1("Test Report {}".format(title_page))
with div():
attr(cls="header")
with p():
p(
"Name : {}".format(nama_tester(tester)), cls="profile"
)
p(
"Date : {}".format(today_date), cls="profile"
)
p(
"Title Page : {}".format(title_page), cls="profile"
)
p(
"Browser Name : {}".format(browser_name), cls="profile"
)
# read result data test from dictionary
with div(cls="container"):
# data header csv
# table html
with table(id="main", cls="table-striped"):
h2(
"Test Results Message {}".format(title_page), cls="header2"
)
# get no id csv
h3(
"Id : {}".format(data_all["label{}".format(line)]["id"]), cls="header3"
)
# get topik csv
p(
"Topic : {}".format(data_all["label{}".format(line)]["topik"]), cls="paragraf"
)
# get Category csv
p(
"Category : {}".format(data_all["label{}".format(line)]["kategori"]), cls="paragraf"
)
# get name label csv
p(
"Label : {}".format(data_all["label{}".format(line)]["nama_label"]), cls="paragraf"
)
br()
# header table
with thead():
# row header
with tr():
th("No")
th("Question")
th("Reply Chat")
th("Answer")
th("Status Pass")
th("Status Failed")
th("Probability")
for no_row, rows in enumerate(data_all["label{}".format(line)]["hasil"]):
no_row = str(no_row+1)
id = rows
# body table
with tbody():
# row table
with tr():
# no 1
with td():
p(id)
# question text
with td():
p(
data_all["label{}".format(
line)]["hasil"][no_row]["text"],
cls="question-text"
)
# result bot
with td():
p(
data_all["label{}".format(
line)]["hasil"][no_row]["reply_chat"],
cls="msg-body"
)
# answer
with td():
p(
data_all["label{}".format(
line)]["hasil"][no_row]["answer"]
)
# status
if "pass" in data_all["label{}".format(line)]["hasil"][no_row]["status"].lower().strip():
td(
"PASS",
cls="success"
)
td("-")
else:
# "failed" in if "pass" in data_all[no_label]["hasil"][no_row]["status"].lower().strip():
td("-")
td(
"FAILED",
cls="failed"
)
with td():
p(
data_all["label{}".format(
line)]["hasil"][no_row]["percentage"]
)
# footer html
with div():
attr(cls='footer')
h4(
"@botika.online by salim suprayogi"
)
path_change = "data_report"
extensi = "result_data_html{}.html".format(
data_all["label{}".format(line)]["id"])
print("########extensi##############################", extensi)
# ..\automationpy\packagepy\htmlpy.py
file_path = os.path.abspath(__file__)
# ..\automationpy\packagepy
file_dir = os.path.dirname(file_path)
# ..\automationpy
parent_dir = os.path.dirname(file_dir)
# ..\automationpy\data_report\html.html
path_location = os.path.join(parent_dir, path_change, extensi)
with io.open(path_location, mode="w", encoding="utf-8") as filehtml:
filehtml.write(deta.render())
def htmlpy(data_all, data_webchat, title_page, browser_name, file_html, tester):
# function create html file from all data test
# data_webchat = dictionary hasil test ,pass,failed,question,reply chat,answer
# data_all[str(label)==label1] = {
# "id": getcsv_id,
# "topik": getcsv_topik,
# "kategori": getcsv_category,
# "nama_label": getcsv_label,
# "hasil": data_status[str(row)==row1{}]
# }
# data_all[label1]["id"]
# data_all[label1]["topik"]
# data_all[label1]["kategori"]
# data_all[label1]["nama_label"]
# data_all[label1]["hasil"]
# html title page
deta_all = dominate.document(title="Test Automation {}".format(title_page))
# html head
with deta_all.head:
meta(charset="UTF-8")
meta_ctn = "width=device-width"
meta_is = "initial-scale="
meta_num = "1.0"
meta_ctn_all = '{} {}{}'.format(meta_ctn, meta_is, meta_num)
meta(content=meta_ctn_all, name="viewport")
# css file
# link(rel="stylesheet", href="./style.css")
style(
"""\
html {
width: 100%;
}
body {
font-family: "Trebuchet MS", Arial, Helvetica, sans-serif;
font-size: small;
box-sizing: border-box;
}
h1 {
text-align: center;
border: 2px solid whitesmoke;
background-color: #4caf50;
color: white;
padding: 8px;
box-shadow: 0px 0px 1px 0px grey;
border-radius: 1px;
margin-bottom: 15px;
}
.header {
box-shadow: 0px 0px 1px 0px grey;
border-radius: 1px;
border: 1px solid whitesmoke;
/* box-shadow: 0px 0px 1px 0px grey;
border-radius: 1px; */
padding: 8px;
/* padding-bottom: 5px; */
margin-bottom: 15px;
text-align: justify;
}
.profile {
/* box-shadow: 0px 0px 1px 0px grey;
border-radius: 1px;
text-align: left;
border: 1px solid whitesmoke;
background-color: #4caf50;
color: white; */
font-weight: normal;
/* margin: 1px;
padding: 8px; */
}
.header3,
.paragraf {
text-align: justify;
/* border-bottom-style: solid; */
/* border-bottom-color: #83c985; */
/* margin: 2px; */
/* padding: auto; */
margin-top: 15px;
}
h2 {
background-color: #4caf50;
color: white;
padding-top: 10px;
padding-bottom: 10px;
text-align: center;
}
h3,
p {
font-weight: normal;
}
#main {
border-collapse: collapse;
width: 100%;
font-weight: normal;
text-align: justify;
}
.container {
box-shadow: 0px 0px 1px 0px green;
border-radius: 1px;
border: 1px solid whitesmoke;
padding: 5px;
padding-bottom: 20px;
margin-bottom: 15px;
}
#main th {
position: sticky;
top: 0;
}
#main td,
#main th {
border: 1px solid #ddd;
padding: 5px;
}
#main tr:hover {
/*padding-top: 5px;
padding-bottom: 5px;
text-align: left;*/
background-color: #4caf50;
}
#main th {
padding-top: 5px;
padding-bottom: 5px;
text-align: left;
background-color: #4caf50;
color: white;
}
h4 {
text-align: center;
border: 2px solid whitesmoke;
background-color: #ddd;
color: darkslateblue;
padding: 8px;
margin-bottom: 15px;
}
.success {
background-color: #4caf50;
color: white;
text-align: justify;
}
.failed {
background-color: deeppink;
color: white;
text-align: justify;
}
"""
)
with deta_all:
# html report title
h1("Test Report {}".format(title_page))
with div():
attr(cls="header")
with p():
p(
"Name : {}".format(nama_tester(tester)), cls="profile"
)
p(
"Date : {}".format(today_date), cls="profile"
)
p(
"Title Page : {}".format(title_page), cls="profile"
)
p(
"Browser Name : {}".format(browser_name), cls="profile"
)
# read result data test from dictionary
for no_label in data_all:
with div(cls="container"):
# data header csv
# table html
with table(id="main", cls="table-striped"):
h2(
"Test Results Message {}".format(title_page), cls="header2"
)
# get no id csv
# data_all[label1==no_label]["id"]
h3(
"Id : {}".format(data_all[no_label]["id"]), cls="header3"
)
# get topik csv
p(
"Topic : {}".format(data_all[no_label]["topik"]), cls="paragraf"
)
# get category csv
p(
"Category : {}".format(data_all[no_label]["kategori"]), cls="paragraf"
)
# get name label csv
p(
"Label : {}".format(data_all[no_label]["nama_label"]), cls="paragraf"
)
br()
# header table
with thead():
# row header
with tr():
th("No")
th("Question")
th("Reply Chat")
th("Answer")
th("Status Pass")
th("Status Failed")
th("Probability")
for no_row, rows in enumerate(data_all[no_label]["hasil"]):
no_row = str(no_row+1)
id = rows
# body table
with tbody():
# row table
with tr():
# no 1
with td():
p(id)
# question text
with td():
p(
data_all[no_label]["hasil"][no_row]["text"],
cls="question-text"
)
# reply_chat
with td():
p(
data_all[no_label]["hasil"][no_row]["reply_chat"],
cls="msg-body"
)
# answer
with td():
p(
data_all[no_label]["hasil"][no_row]["answer"]
)
# status
# data_all[label1]["hasil"]
if "pass" in data_all[no_label]["hasil"][no_row]["status"].lower().strip():
td(
"PASS",
cls="success"
)
td("-")
else:
# "failed" in if "pass" in data_all[no_label]["hasil"][no_row]["status"].lower().strip():
td("-")
td(
"FAILED",
cls="failed"
)
with td():
p(
data_all[no_label]["hasil"][no_row]["percentage"]
)
# footer html
with div():
attr(cls='footer')
h4(
"@botika.online by salim suprayogi"
)
path_change = "data_report"
extensi = "{}.html".format(file_html)
print("########extensi##############################", extensi)
# ..\automationpy\packagepy\htmlpy.py
file_path = os.path.abspath(__file__)
# ..\automationpy\packagepy
file_dir = os.path.dirname(file_path)
# ..\automationpy
parent_dir = os.path.dirname(file_dir)
# ..\automationpy\data_report\html.html
path_location = os.path.join(parent_dir, path_change, extensi)
with io.open(path_location, mode="w", encoding="utf-8") as filehtml:
filehtml.write(deta_all.render())
today_date = whatsapp.to_days()
def main():
pass
if __name__ == "__main__":
pass
| 32.814815
| 125
| 0.388467
| 1,613
| 19,492
| 4.586485
| 0.130192
| 0.034063
| 0.018924
| 0.03163
| 0.873209
| 0.859286
| 0.849959
| 0.836037
| 0.831035
| 0.831035
| 0
| 0.023743
| 0.500872
| 19,492
| 593
| 126
| 32.870152
| 0.736664
| 0.092756
| 0
| 0.641026
| 0
| 0
| 0.123608
| 0.0104
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017094
| false
| 0.034188
| 0.021368
| 0
| 0.042735
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a4d65874a265bbe3a1d0d75260624d6098eb5e16
| 91,012
|
py
|
Python
|
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/swr_async_client.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/swr_async_client.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/swr_async_client.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class SwrAsyncClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(SwrAsyncClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkswr.v2.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "SwrClient":
raise TypeError("client type error, support client type is SwrClient")
return ClientBuilder(clazz)
def create_image_sync_repo_async(self, request):
"""创建镜像自动同步任务
创建镜像自动同步任务
:param CreateImageSyncRepoRequest request
:return: CreateImageSyncRepoResponse
"""
return self.create_image_sync_repo_with_http_info(request)
def create_image_sync_repo_with_http_info(self, request):
"""创建镜像自动同步任务
创建镜像自动同步任务
:param CreateImageSyncRepoRequest request
:return: CreateImageSyncRepoResponse
"""
all_params = ['namespace', 'repository', 'body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/sync_repo',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateImageSyncRepoResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_manual_image_sync_repo_async(self, request):
"""手动同步镜像
手动同步镜像
:param CreateManualImageSyncRepoRequest request
:return: CreateManualImageSyncRepoResponse
"""
return self.create_manual_image_sync_repo_with_http_info(request)
def create_manual_image_sync_repo_with_http_info(self, request):
"""手动同步镜像
手动同步镜像
:param CreateManualImageSyncRepoRequest request
:return: CreateManualImageSyncRepoResponse
"""
all_params = ['namespace', 'repository', 'body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/sync_images',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateManualImageSyncRepoResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_namespace_async(self, request):
"""创建组织
创建组织
:param CreateNamespaceRequest request
:return: CreateNamespaceResponse
"""
return self.create_namespace_with_http_info(request)
def create_namespace_with_http_info(self, request):
"""创建组织
创建组织
:param CreateNamespaceRequest request
:return: CreateNamespaceResponse
"""
all_params = ['create_namespace_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateNamespaceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_namespace_auth_async(self, request):
"""创建组织权限
创建组织权限
:param CreateNamespaceAuthRequest request
:return: CreateNamespaceAuthResponse
"""
return self.create_namespace_auth_with_http_info(request)
def create_namespace_auth_with_http_info(self, request):
"""创建组织权限
创建组织权限
:param CreateNamespaceAuthRequest request
:return: CreateNamespaceAuthResponse
"""
all_params = ['namespace', 'create_namespace_auth_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/access',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateNamespaceAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_repo_async(self, request):
"""在组织下创建镜像仓库
在组织下创建镜像仓库。
:param CreateRepoRequest request
:return: CreateRepoResponse
"""
return self.create_repo_with_http_info(request)
def create_repo_with_http_info(self, request):
"""在组织下创建镜像仓库
在组织下创建镜像仓库。
:param CreateRepoRequest request
:return: CreateRepoResponse
"""
all_params = ['namespace', 'create_repo_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateRepoResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_repo_domains_async(self, request):
"""创建共享账号
创建共享账号。镜像上传后,您可以共享私有镜像给其他帐号,并授予下载该镜像的权限。
:param CreateRepoDomainsRequest request
:return: CreateRepoDomainsResponse
"""
return self.create_repo_domains_with_http_info(request)
def create_repo_domains_with_http_info(self, request):
"""创建共享账号
创建共享账号。镜像上传后,您可以共享私有镜像给其他帐号,并授予下载该镜像的权限。
:param CreateRepoDomainsRequest request
:return: CreateRepoDomainsResponse
"""
all_params = ['namespace', 'repository', 'create_repo_domains_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repositories/{repository}/access-domains',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateRepoDomainsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_retention_async(self, request):
"""创建镜像老化规则
创建镜像老化规则
:param CreateRetentionRequest request
:return: CreateRetentionResponse
"""
return self.create_retention_with_http_info(request)
def create_retention_with_http_info(self, request):
"""创建镜像老化规则
创建镜像老化规则
:param CreateRetentionRequest request
:return: CreateRetentionResponse
"""
all_params = ['namespace', 'repository', 'create_retention_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/retentions',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateRetentionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_secret_async(self, request):
"""生成临时登录指令
调用该接口,通过获取响应消息头的X-Swr-Dockerlogin的值及响应消息体的host值,可生成临时登录指令。
:param CreateSecretRequest request
:return: CreateSecretResponse
"""
return self.create_secret_with_http_info(request)
def create_secret_with_http_info(self, request):
"""生成临时登录指令
调用该接口,通过获取响应消息头的X-Swr-Dockerlogin的值及响应消息体的host值,可生成临时登录指令。
:param CreateSecretRequest request
:return: CreateSecretResponse
"""
all_params = ['projectname']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'projectname' in local_var_params:
query_params.append(('projectname', local_var_params['projectname']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/utils/secret',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateSecretResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_trigger_async(self, request):
"""创建触发器
创建触发器
:param CreateTriggerRequest request
:return: CreateTriggerResponse
"""
return self.create_trigger_with_http_info(request)
def create_trigger_with_http_info(self, request):
"""创建触发器
创建触发器
:param CreateTriggerRequest request
:return: CreateTriggerResponse
"""
all_params = ['namespace', 'repository', 'create_trigger_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/triggers',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateTriggerResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_user_repository_auth_async(self, request):
"""创建镜像权限
创建镜像权限
:param CreateUserRepositoryAuthRequest request
:return: CreateUserRepositoryAuthResponse
"""
return self.create_user_repository_auth_with_http_info(request)
def create_user_repository_auth_with_http_info(self, request):
"""创建镜像权限
创建镜像权限
:param CreateUserRepositoryAuthRequest request
:return: CreateUserRepositoryAuthResponse
"""
all_params = ['namespace', 'repository', 'create_user_repository_auth_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/access',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateUserRepositoryAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_image_sync_repo_async(self, request):
"""删除镜像自动同步任务
删除镜像自动同步任务
:param DeleteImageSyncRepoRequest request
:return: DeleteImageSyncRepoResponse
"""
return self.delete_image_sync_repo_with_http_info(request)
def delete_image_sync_repo_with_http_info(self, request):
"""删除镜像自动同步任务
删除镜像自动同步任务
:param DeleteImageSyncRepoRequest request
:return: DeleteImageSyncRepoResponse
"""
all_params = ['namespace', 'repository', 'body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/sync_repo',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteImageSyncRepoResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_namespace_auth_async(self, request):
"""删除组织权限
删除组织权限
:param DeleteNamespaceAuthRequest request
:return: DeleteNamespaceAuthResponse
"""
return self.delete_namespace_auth_with_http_info(request)
def delete_namespace_auth_with_http_info(self, request):
"""删除组织权限
删除组织权限
:param DeleteNamespaceAuthRequest request
:return: DeleteNamespaceAuthResponse
"""
all_params = ['namespace', 'delete_namespace_auth_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/access',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteNamespaceAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_namespaces_async(self, request):
"""删除组织
删除组织
:param DeleteNamespacesRequest request
:return: DeleteNamespacesResponse
"""
return self.delete_namespaces_with_http_info(request)
def delete_namespaces_with_http_info(self, request):
"""删除组织
删除组织
:param DeleteNamespacesRequest request
:return: DeleteNamespacesResponse
"""
all_params = ['namespace']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteNamespacesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_repo_async(self, request):
"""删除组织下的镜像仓库
删除组织下的镜像仓库。
:param DeleteRepoRequest request
:return: DeleteRepoResponse
"""
return self.delete_repo_with_http_info(request)
def delete_repo_with_http_info(self, request):
"""删除组织下的镜像仓库
删除组织下的镜像仓库。
:param DeleteRepoRequest request
:return: DeleteRepoResponse
"""
all_params = ['namespace', 'repository']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteRepoResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_repo_domains_async(self, request):
"""删除共享账号
删除共享账号
:param DeleteRepoDomainsRequest request
:return: DeleteRepoDomainsResponse
"""
return self.delete_repo_domains_with_http_info(request)
def delete_repo_domains_with_http_info(self, request):
"""删除共享账号
删除共享账号
:param DeleteRepoDomainsRequest request
:return: DeleteRepoDomainsResponse
"""
all_params = ['namespace', 'repository', 'access_domain']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'access_domain' in local_var_params:
path_params['access_domain'] = local_var_params['access_domain']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repositories/{repository}/access-domains/{access_domain}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteRepoDomainsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_repo_tag_async(self, request):
"""删除指定tag的镜像
删除镜像仓库中指定tag的镜像
:param DeleteRepoTagRequest request
:return: DeleteRepoTagResponse
"""
return self.delete_repo_tag_with_http_info(request)
def delete_repo_tag_with_http_info(self, request):
"""删除指定tag的镜像
删除镜像仓库中指定tag的镜像
:param DeleteRepoTagRequest request
:return: DeleteRepoTagResponse
"""
all_params = ['namespace', 'repository', 'tag']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'tag' in local_var_params:
path_params['tag'] = local_var_params['tag']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/tags/{tag}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteRepoTagResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_retention_async(self, request):
"""删除镜像老化规则
删除镜像老化规则
:param DeleteRetentionRequest request
:return: DeleteRetentionResponse
"""
return self.delete_retention_with_http_info(request)
def delete_retention_with_http_info(self, request):
"""删除镜像老化规则
删除镜像老化规则
:param DeleteRetentionRequest request
:return: DeleteRetentionResponse
"""
all_params = ['namespace', 'repository', 'retention_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'retention_id' in local_var_params:
path_params['retention_id'] = local_var_params['retention_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/retentions/{retention_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteRetentionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_trigger_async(self, request):
"""删除触发器
删除触发器
:param DeleteTriggerRequest request
:return: DeleteTriggerResponse
"""
return self.delete_trigger_with_http_info(request)
def delete_trigger_with_http_info(self, request):
"""删除触发器
删除触发器
:param DeleteTriggerRequest request
:return: DeleteTriggerResponse
"""
all_params = ['namespace', 'repository', 'trigger']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'trigger' in local_var_params:
path_params['trigger'] = local_var_params['trigger']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/triggers/{trigger}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteTriggerResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_user_repository_auth_async(self, request):
"""删除镜像权限
删除镜像权限
:param DeleteUserRepositoryAuthRequest request
:return: DeleteUserRepositoryAuthResponse
"""
return self.delete_user_repository_auth_with_http_info(request)
def delete_user_repository_auth_with_http_info(self, request):
"""删除镜像权限
删除镜像权限
:param DeleteUserRepositoryAuthRequest request
:return: DeleteUserRepositoryAuthResponse
"""
all_params = ['namespace', 'repository', 'delete_user_repository_auth_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/access',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteUserRepositoryAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_image_auto_sync_repos_details_async(self, request):
"""获取镜像自动同步任务列表
获取镜像自动同步任务列表
:param ListImageAutoSyncReposDetailsRequest request
:return: ListImageAutoSyncReposDetailsResponse
"""
return self.list_image_auto_sync_repos_details_with_http_info(request)
def list_image_auto_sync_repos_details_with_http_info(self, request):
"""获取镜像自动同步任务列表
获取镜像自动同步任务列表
:param ListImageAutoSyncReposDetailsRequest request
:return: ListImageAutoSyncReposDetailsResponse
"""
all_params = ['namespace', 'repository']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/sync_repo',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListImageAutoSyncReposDetailsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_namespaces_async(self, request):
"""查询组织列表
查询组织列表
:param ListNamespacesRequest request
:return: ListNamespacesResponse
"""
return self.list_namespaces_with_http_info(request)
def list_namespaces_with_http_info(self, request):
"""查询组织列表
查询组织列表
:param ListNamespacesRequest request
:return: ListNamespacesResponse
"""
all_params = ['namespace']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListNamespacesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_repo_domains_async(self, request):
"""获取共享账号列表
获取共享账号列表
:param ListRepoDomainsRequest request
:return: ListRepoDomainsResponse
"""
return self.list_repo_domains_with_http_info(request)
def list_repo_domains_with_http_info(self, request):
"""获取共享账号列表
获取共享账号列表
:param ListRepoDomainsRequest request
:return: ListRepoDomainsResponse
"""
all_params = ['namespace', 'repository']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repositories/{repository}/access-domains',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRepoDomainsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_repos_details_async(self, request):
"""查询镜像列表
查询镜像列表
:param ListReposDetailsRequest request
:return: ListReposDetailsResponse
"""
return self.list_repos_details_with_http_info(request)
def list_repos_details_with_http_info(self, request):
"""查询镜像列表
查询镜像列表
:param ListReposDetailsRequest request
:return: ListReposDetailsResponse
"""
all_params = ['namespace', 'name', 'category', 'filter']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'category' in local_var_params:
query_params.append(('category', local_var_params['category']))
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["Content-Range"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/repos',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListReposDetailsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_repository_tags_async(self, request):
"""查询镜像tag列表
查询镜像tag列表
:param ListRepositoryTagsRequest request
:return: ListRepositoryTagsResponse
"""
return self.list_repository_tags_with_http_info(request)
def list_repository_tags_with_http_info(self, request):
"""查询镜像tag列表
查询镜像tag列表
:param ListRepositoryTagsRequest request
:return: ListRepositoryTagsResponse
"""
all_params = ['namespace', 'repository', 'offset', 'limit', 'order_column', 'order_type', 'tag']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'order_column' in local_var_params:
query_params.append(('order_column', local_var_params['order_column']))
if 'order_type' in local_var_params:
query_params.append(('order_type', local_var_params['order_type']))
if 'tag' in local_var_params:
query_params.append(('tag', local_var_params['tag']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/tags',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRepositoryTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_retention_histories_async(self, request):
"""获取镜像老化记录
获取镜像老化记录
:param ListRetentionHistoriesRequest request
:return: ListRetentionHistoriesResponse
"""
return self.list_retention_histories_with_http_info(request)
def list_retention_histories_with_http_info(self, request):
"""获取镜像老化记录
获取镜像老化记录
:param ListRetentionHistoriesRequest request
:return: ListRetentionHistoriesResponse
"""
all_params = ['namespace', 'repository', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["Content-Range"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/retentions/histories',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRetentionHistoriesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_retentions_async(self, request):
"""获取镜像老化规则列表
获取镜像老化规则列表
:param ListRetentionsRequest request
:return: ListRetentionsResponse
"""
return self.list_retentions_with_http_info(request)
def list_retentions_with_http_info(self, request):
"""获取镜像老化规则列表
获取镜像老化规则列表
:param ListRetentionsRequest request
:return: ListRetentionsResponse
"""
all_params = ['namespace', 'repository']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/retentions',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRetentionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_shared_repos_details_async(self, request):
"""查询共享镜像列表
查询共享镜像列表
:param ListSharedReposDetailsRequest request
:return: ListSharedReposDetailsResponse
"""
return self.list_shared_repos_details_with_http_info(request)
def list_shared_repos_details_with_http_info(self, request):
"""查询共享镜像列表
查询共享镜像列表
:param ListSharedReposDetailsRequest request
:return: ListSharedReposDetailsResponse
"""
all_params = ['filter']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["Content-Range"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/shared-repositories',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSharedReposDetailsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_triggers_details_async(self, request):
"""获取镜像仓库下的触发器列表
获取镜像仓库下的触发器列表
:param ListTriggersDetailsRequest request
:return: ListTriggersDetailsResponse
"""
return self.list_triggers_details_with_http_info(request)
def list_triggers_details_with_http_info(self, request):
"""获取镜像仓库下的触发器列表
获取镜像仓库下的触发器列表
:param ListTriggersDetailsRequest request
:return: ListTriggersDetailsResponse
"""
all_params = ['namespace', 'repository']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/triggers',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListTriggersDetailsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_access_domain_async(self, request):
"""判断共享账号是否存在
判断共享租户是否存在
:param ShowAccessDomainRequest request
:return: ShowAccessDomainResponse
"""
return self.show_access_domain_with_http_info(request)
def show_access_domain_with_http_info(self, request):
"""判断共享账号是否存在
判断共享租户是否存在
:param ShowAccessDomainRequest request
:return: ShowAccessDomainResponse
"""
all_params = ['namespace', 'repository', 'access_domain']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'access_domain' in local_var_params:
path_params['access_domain'] = local_var_params['access_domain']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repositories/{repository}/access-domains/{access_domain}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAccessDomainResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_namespace_async(self, request):
"""获取组织详情
获取组织详情
:param ShowNamespaceRequest request
:return: ShowNamespaceResponse
"""
return self.show_namespace_with_http_info(request)
def show_namespace_with_http_info(self, request):
"""获取组织详情
获取组织详情
:param ShowNamespaceRequest request
:return: ShowNamespaceResponse
"""
all_params = ['namespace']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowNamespaceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_namespace_auth_async(self, request):
"""查询组织权限
查询组织权限
:param ShowNamespaceAuthRequest request
:return: ShowNamespaceAuthResponse
"""
return self.show_namespace_auth_with_http_info(request)
def show_namespace_auth_with_http_info(self, request):
"""查询组织权限
查询组织权限
:param ShowNamespaceAuthRequest request
:return: ShowNamespaceAuthResponse
"""
all_params = ['namespace']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/access',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowNamespaceAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_repository_async(self, request):
"""查询镜像概要信息
查询镜像概要信息
:param ShowRepositoryRequest request
:return: ShowRepositoryResponse
"""
return self.show_repository_with_http_info(request)
def show_repository_with_http_info(self, request):
"""查询镜像概要信息
查询镜像概要信息
:param ShowRepositoryRequest request
:return: ShowRepositoryResponse
"""
all_params = ['namespace', 'repository']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRepositoryResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_retention_async(self, request):
"""获取镜像老化规则记录
获取镜像老化规则记录
:param ShowRetentionRequest request
:return: ShowRetentionResponse
"""
return self.show_retention_with_http_info(request)
def show_retention_with_http_info(self, request):
"""获取镜像老化规则记录
获取镜像老化规则记录
:param ShowRetentionRequest request
:return: ShowRetentionResponse
"""
all_params = ['namespace', 'repository', 'retention_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'retention_id' in local_var_params:
path_params['retention_id'] = local_var_params['retention_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/retentions/{retention_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRetentionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_sync_job_async(self, request):
"""获取镜像同步任务信息
获取镜像同步任务信息
:param ShowSyncJobRequest request
:return: ShowSyncJobResponse
"""
return self.show_sync_job_with_http_info(request)
def show_sync_job_with_http_info(self, request):
"""获取镜像同步任务信息
获取镜像同步任务信息
:param ShowSyncJobRequest request
:return: ShowSyncJobResponse
"""
all_params = ['namespace', 'repository', 'filter']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/sync_job',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowSyncJobResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_trigger_async(self, request):
"""获取触发器详情
获取触发器详情
:param ShowTriggerRequest request
:return: ShowTriggerResponse
"""
return self.show_trigger_with_http_info(request)
def show_trigger_with_http_info(self, request):
"""获取触发器详情
获取触发器详情
:param ShowTriggerRequest request
:return: ShowTriggerResponse
"""
all_params = ['namespace', 'repository', 'trigger']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'trigger' in local_var_params:
path_params['trigger'] = local_var_params['trigger']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/triggers/{trigger}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowTriggerResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_user_repository_auth_async(self, request):
"""查询镜像权限
查询镜像权限
:param ShowUserRepositoryAuthRequest request
:return: ShowUserRepositoryAuthResponse
"""
return self.show_user_repository_auth_with_http_info(request)
def show_user_repository_auth_with_http_info(self, request):
"""查询镜像权限
查询镜像权限
:param ShowUserRepositoryAuthRequest request
:return: ShowUserRepositoryAuthResponse
"""
all_params = ['namespace', 'repository']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/access',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowUserRepositoryAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_namespace_auth_async(self, request):
"""更新组织权限
更新组织权限
:param UpdateNamespaceAuthRequest request
:return: UpdateNamespaceAuthResponse
"""
return self.update_namespace_auth_with_http_info(request)
def update_namespace_auth_with_http_info(self, request):
"""更新组织权限
更新组织权限
:param UpdateNamespaceAuthRequest request
:return: UpdateNamespaceAuthResponse
"""
all_params = ['namespace', 'update_namespace_auth_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/access',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateNamespaceAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_repo_async(self, request):
"""更新镜像仓库的概要信息
更新租户命名空间下的镜像概要信息,包括镜像类型、是否公有、描述信息
:param UpdateRepoRequest request
:return: UpdateRepoResponse
"""
return self.update_repo_with_http_info(request)
def update_repo_with_http_info(self, request):
"""更新镜像仓库的概要信息
更新租户命名空间下的镜像概要信息,包括镜像类型、是否公有、描述信息
:param UpdateRepoRequest request
:return: UpdateRepoResponse
"""
all_params = ['namespace', 'repository', 'update_repo_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateRepoResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_repo_domains_async(self, request):
"""更新共享账号
更新共享账号
:param UpdateRepoDomainsRequest request
:return: UpdateRepoDomainsResponse
"""
return self.update_repo_domains_with_http_info(request)
def update_repo_domains_with_http_info(self, request):
"""更新共享账号
更新共享账号
:param UpdateRepoDomainsRequest request
:return: UpdateRepoDomainsResponse
"""
all_params = ['namespace', 'repository', 'access_domain', 'update_repo_domains_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'access_domain' in local_var_params:
path_params['access_domain'] = local_var_params['access_domain']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repositories/{repository}/access-domains/{access_domain}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateRepoDomainsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_retention_async(self, request):
"""修改镜像老化规则
修改镜像老化规则
:param UpdateRetentionRequest request
:return: UpdateRetentionResponse
"""
return self.update_retention_with_http_info(request)
def update_retention_with_http_info(self, request):
"""修改镜像老化规则
修改镜像老化规则
:param UpdateRetentionRequest request
:return: UpdateRetentionResponse
"""
all_params = ['namespace', 'repository', 'retention_id', 'update_retention_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'retention_id' in local_var_params:
path_params['retention_id'] = local_var_params['retention_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/retentions/{retention_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateRetentionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_trigger_async(self, request):
"""更新触发器配置
更新触发器配置
:param UpdateTriggerRequest request
:return: UpdateTriggerResponse
"""
return self.update_trigger_with_http_info(request)
def update_trigger_with_http_info(self, request):
"""更新触发器配置
更新触发器配置
:param UpdateTriggerRequest request
:return: UpdateTriggerResponse
"""
all_params = ['namespace', 'repository', 'trigger', 'body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
if 'trigger' in local_var_params:
path_params['trigger'] = local_var_params['trigger']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/triggers/{trigger}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateTriggerResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_user_repository_auth_async(self, request):
"""更新镜像权限
更新镜像权限
:param UpdateUserRepositoryAuthRequest request
:return: UpdateUserRepositoryAuthResponse
"""
return self.update_user_repository_auth_with_http_info(request)
def update_user_repository_auth_with_http_info(self, request):
"""更新镜像权限
更新镜像权限
:param UpdateUserRepositoryAuthRequest request
:return: UpdateUserRepositoryAuthResponse
"""
all_params = ['namespace', 'repository', 'update_repository_auth_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'repository' in local_var_params:
path_params['repository'] = local_var_params['repository']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/manage/namespaces/{namespace}/repos/{repository}/access',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateUserRepositoryAuthResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_api_versions_async(self, request):
"""查询所有API版本信息
查询所有API版本信息
:param ListApiVersionsRequest request
:return: ListApiVersionsResponse
"""
return self.list_api_versions_with_http_info(request)
def list_api_versions_with_http_info(self, request):
"""查询所有API版本信息
查询所有API版本信息
:param ListApiVersionsRequest request
:return: ListApiVersionsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApiVersionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_api_version_async(self, request):
"""查询指定API版本信息
查询指定API版本信息
:param ShowApiVersionRequest request
:return: ShowApiVersionResponse
"""
return self.show_api_version_with_http_info(request)
def show_api_version_with_http_info(self, request):
"""查询指定API版本信息
查询指定API版本信息
:param ShowApiVersionRequest request
:return: ShowApiVersionResponse
"""
all_params = ['api_version']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'api_version' in local_var_params:
path_params['api_version'] = local_var_params['api_version']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/{api_version}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowApiVersionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type,
async_request=True)
| 30.408286
| 119
| 0.62483
| 8,768
| 91,012
| 6.093864
| 0.037865
| 0.046415
| 0.081226
| 0.033239
| 0.918549
| 0.906028
| 0.875147
| 0.854934
| 0.844266
| 0.698097
| 0
| 0.000696
| 0.289489
| 91,012
| 2,992
| 120
| 30.418449
| 0.825578
| 0.10436
| 0
| 0.844367
| 0
| 0
| 0.116378
| 0.048964
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052846
| false
| 0
| 0.005807
| 0
| 0.11324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3502228b9652ea9c62b254b734603d057b12470e
| 641
|
py
|
Python
|
src/recording_script_generator_tests/test_utils.py
|
stefantaubert/recording-script-generator
|
01cdcd4b85ed7f245f4bb8535d870c04472746c9
|
[
"MIT"
] | null | null | null |
src/recording_script_generator_tests/test_utils.py
|
stefantaubert/recording-script-generator
|
01cdcd4b85ed7f245f4bb8535d870c04472746c9
|
[
"MIT"
] | null | null | null |
src/recording_script_generator_tests/test_utils.py
|
stefantaubert/recording-script-generator
|
01cdcd4b85ed7f245f4bb8535d870c04472746c9
|
[
"MIT"
] | null | null | null |
from ordered_set import OrderedSet
# def test_detect_ids_from_tex__one_id():
# input_tex = "\\item Nor do all men find the same things the objects of their fear, anger, repulsion, and the rest. % 1\n"
# res = detect_ids_from_tex(input_tex)
# assert res == OrderedSet([1])
# def test_detect_ids_from_tex__two_ids():
# input_tex = "\\item Nor do all men find the same things the objects of their fear, anger, repulsion, and the rest. % 1\n\\item Nor do all men find the same things the objects of their fear, anger, repulsion, and the rest. % 22\n"
# res = detect_ids_from_tex(input_tex)
# assert res == OrderedSet([1, 22])
| 37.705882
| 233
| 0.720749
| 111
| 641
| 3.936937
| 0.324324
| 0.08238
| 0.118993
| 0.146453
| 0.897025
| 0.897025
| 0.791762
| 0.791762
| 0.791762
| 0.791762
| 0
| 0.015238
| 0.180967
| 641
| 16
| 234
| 40.0625
| 0.817143
| 0.907956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
35050e5a210bd99be33c7b7857df299f5d2a8895
| 6,545
|
py
|
Python
|
loldib/getratings/models/NA/na_maokai/na_maokai_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_maokai/na_maokai_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_maokai/na_maokai_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Maokai_Mid_Aatrox(Ratings):
pass
class NA_Maokai_Mid_Ahri(Ratings):
pass
class NA_Maokai_Mid_Akali(Ratings):
pass
class NA_Maokai_Mid_Alistar(Ratings):
pass
class NA_Maokai_Mid_Amumu(Ratings):
pass
class NA_Maokai_Mid_Anivia(Ratings):
pass
class NA_Maokai_Mid_Annie(Ratings):
pass
class NA_Maokai_Mid_Ashe(Ratings):
pass
class NA_Maokai_Mid_AurelionSol(Ratings):
pass
class NA_Maokai_Mid_Azir(Ratings):
pass
class NA_Maokai_Mid_Bard(Ratings):
pass
class NA_Maokai_Mid_Blitzcrank(Ratings):
pass
class NA_Maokai_Mid_Brand(Ratings):
pass
class NA_Maokai_Mid_Braum(Ratings):
pass
class NA_Maokai_Mid_Caitlyn(Ratings):
pass
class NA_Maokai_Mid_Camille(Ratings):
pass
class NA_Maokai_Mid_Cassiopeia(Ratings):
pass
class NA_Maokai_Mid_Chogath(Ratings):
pass
class NA_Maokai_Mid_Corki(Ratings):
pass
class NA_Maokai_Mid_Darius(Ratings):
pass
class NA_Maokai_Mid_Diana(Ratings):
pass
class NA_Maokai_Mid_Draven(Ratings):
pass
class NA_Maokai_Mid_DrMundo(Ratings):
pass
class NA_Maokai_Mid_Ekko(Ratings):
pass
class NA_Maokai_Mid_Elise(Ratings):
pass
class NA_Maokai_Mid_Evelynn(Ratings):
pass
class NA_Maokai_Mid_Ezreal(Ratings):
pass
class NA_Maokai_Mid_Fiddlesticks(Ratings):
pass
class NA_Maokai_Mid_Fiora(Ratings):
pass
class NA_Maokai_Mid_Fizz(Ratings):
pass
class NA_Maokai_Mid_Galio(Ratings):
pass
class NA_Maokai_Mid_Gangplank(Ratings):
pass
class NA_Maokai_Mid_Garen(Ratings):
pass
class NA_Maokai_Mid_Gnar(Ratings):
pass
class NA_Maokai_Mid_Gragas(Ratings):
pass
class NA_Maokai_Mid_Graves(Ratings):
pass
class NA_Maokai_Mid_Hecarim(Ratings):
pass
class NA_Maokai_Mid_Heimerdinger(Ratings):
pass
class NA_Maokai_Mid_Illaoi(Ratings):
pass
class NA_Maokai_Mid_Irelia(Ratings):
pass
class NA_Maokai_Mid_Ivern(Ratings):
pass
class NA_Maokai_Mid_Janna(Ratings):
pass
class NA_Maokai_Mid_JarvanIV(Ratings):
pass
class NA_Maokai_Mid_Jax(Ratings):
pass
class NA_Maokai_Mid_Jayce(Ratings):
pass
class NA_Maokai_Mid_Jhin(Ratings):
pass
class NA_Maokai_Mid_Jinx(Ratings):
pass
class NA_Maokai_Mid_Kalista(Ratings):
pass
class NA_Maokai_Mid_Karma(Ratings):
pass
class NA_Maokai_Mid_Karthus(Ratings):
pass
class NA_Maokai_Mid_Kassadin(Ratings):
pass
class NA_Maokai_Mid_Katarina(Ratings):
pass
class NA_Maokai_Mid_Kayle(Ratings):
pass
class NA_Maokai_Mid_Kayn(Ratings):
pass
class NA_Maokai_Mid_Kennen(Ratings):
pass
class NA_Maokai_Mid_Khazix(Ratings):
pass
class NA_Maokai_Mid_Kindred(Ratings):
pass
class NA_Maokai_Mid_Kled(Ratings):
pass
class NA_Maokai_Mid_KogMaw(Ratings):
pass
class NA_Maokai_Mid_Leblanc(Ratings):
pass
class NA_Maokai_Mid_LeeSin(Ratings):
pass
class NA_Maokai_Mid_Leona(Ratings):
pass
class NA_Maokai_Mid_Lissandra(Ratings):
pass
class NA_Maokai_Mid_Lucian(Ratings):
pass
class NA_Maokai_Mid_Lulu(Ratings):
pass
class NA_Maokai_Mid_Lux(Ratings):
pass
class NA_Maokai_Mid_Malphite(Ratings):
pass
class NA_Maokai_Mid_Malzahar(Ratings):
pass
class NA_Maokai_Mid_Maokai(Ratings):
pass
class NA_Maokai_Mid_MasterYi(Ratings):
pass
class NA_Maokai_Mid_MissFortune(Ratings):
pass
class NA_Maokai_Mid_MonkeyKing(Ratings):
pass
class NA_Maokai_Mid_Mordekaiser(Ratings):
pass
class NA_Maokai_Mid_Morgana(Ratings):
pass
class NA_Maokai_Mid_Nami(Ratings):
pass
class NA_Maokai_Mid_Nasus(Ratings):
pass
class NA_Maokai_Mid_Nautilus(Ratings):
pass
class NA_Maokai_Mid_Nidalee(Ratings):
pass
class NA_Maokai_Mid_Nocturne(Ratings):
pass
class NA_Maokai_Mid_Nunu(Ratings):
pass
class NA_Maokai_Mid_Olaf(Ratings):
pass
class NA_Maokai_Mid_Orianna(Ratings):
pass
class NA_Maokai_Mid_Ornn(Ratings):
pass
class NA_Maokai_Mid_Pantheon(Ratings):
pass
class NA_Maokai_Mid_Poppy(Ratings):
pass
class NA_Maokai_Mid_Quinn(Ratings):
pass
class NA_Maokai_Mid_Rakan(Ratings):
pass
class NA_Maokai_Mid_Rammus(Ratings):
pass
class NA_Maokai_Mid_RekSai(Ratings):
pass
class NA_Maokai_Mid_Renekton(Ratings):
pass
class NA_Maokai_Mid_Rengar(Ratings):
pass
class NA_Maokai_Mid_Riven(Ratings):
pass
class NA_Maokai_Mid_Rumble(Ratings):
pass
class NA_Maokai_Mid_Ryze(Ratings):
pass
class NA_Maokai_Mid_Sejuani(Ratings):
pass
class NA_Maokai_Mid_Shaco(Ratings):
pass
class NA_Maokai_Mid_Shen(Ratings):
pass
class NA_Maokai_Mid_Shyvana(Ratings):
pass
class NA_Maokai_Mid_Singed(Ratings):
pass
class NA_Maokai_Mid_Sion(Ratings):
pass
class NA_Maokai_Mid_Sivir(Ratings):
pass
class NA_Maokai_Mid_Skarner(Ratings):
pass
class NA_Maokai_Mid_Sona(Ratings):
pass
class NA_Maokai_Mid_Soraka(Ratings):
pass
class NA_Maokai_Mid_Swain(Ratings):
pass
class NA_Maokai_Mid_Syndra(Ratings):
pass
class NA_Maokai_Mid_TahmKench(Ratings):
pass
class NA_Maokai_Mid_Taliyah(Ratings):
pass
class NA_Maokai_Mid_Talon(Ratings):
pass
class NA_Maokai_Mid_Taric(Ratings):
pass
class NA_Maokai_Mid_Teemo(Ratings):
pass
class NA_Maokai_Mid_Thresh(Ratings):
pass
class NA_Maokai_Mid_Tristana(Ratings):
pass
class NA_Maokai_Mid_Trundle(Ratings):
pass
class NA_Maokai_Mid_Tryndamere(Ratings):
pass
class NA_Maokai_Mid_TwistedFate(Ratings):
pass
class NA_Maokai_Mid_Twitch(Ratings):
pass
class NA_Maokai_Mid_Udyr(Ratings):
pass
class NA_Maokai_Mid_Urgot(Ratings):
pass
class NA_Maokai_Mid_Varus(Ratings):
pass
class NA_Maokai_Mid_Vayne(Ratings):
pass
class NA_Maokai_Mid_Veigar(Ratings):
pass
class NA_Maokai_Mid_Velkoz(Ratings):
pass
class NA_Maokai_Mid_Vi(Ratings):
pass
class NA_Maokai_Mid_Viktor(Ratings):
pass
class NA_Maokai_Mid_Vladimir(Ratings):
pass
class NA_Maokai_Mid_Volibear(Ratings):
pass
class NA_Maokai_Mid_Warwick(Ratings):
pass
class NA_Maokai_Mid_Xayah(Ratings):
pass
class NA_Maokai_Mid_Xerath(Ratings):
pass
class NA_Maokai_Mid_XinZhao(Ratings):
pass
class NA_Maokai_Mid_Yasuo(Ratings):
pass
class NA_Maokai_Mid_Yorick(Ratings):
pass
class NA_Maokai_Mid_Zac(Ratings):
pass
class NA_Maokai_Mid_Zed(Ratings):
pass
class NA_Maokai_Mid_Ziggs(Ratings):
pass
class NA_Maokai_Mid_Zilean(Ratings):
pass
class NA_Maokai_Mid_Zyra(Ratings):
pass
| 15.695444
| 46
| 0.766692
| 972
| 6,545
| 4.736626
| 0.151235
| 0.209818
| 0.389661
| 0.479583
| 0.803432
| 0.803432
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169748
| 6,545
| 416
| 47
| 15.733173
| 0.847258
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
3511fa656a379307ade37209bc4ba6acd69e9010
| 3,391
|
py
|
Python
|
test/enabled/test_impl/test_JobBrowserBFF_query_jobs_admin_ee2_test.py
|
eapearson/kbase-skd-module-job-browser-bff
|
426445f90569adac16632ef4921f174e51abd42f
|
[
"MIT"
] | null | null | null |
test/enabled/test_impl/test_JobBrowserBFF_query_jobs_admin_ee2_test.py
|
eapearson/kbase-skd-module-job-browser-bff
|
426445f90569adac16632ef4921f174e51abd42f
|
[
"MIT"
] | 6
|
2020-05-26T17:40:07.000Z
|
2022-03-11T16:33:11.000Z
|
test/enabled/test_impl/test_JobBrowserBFF_query_jobs_admin_ee2_test.py
|
eapearson/kbase-skd-module-job-browser-bff
|
426445f90569adac16632ef4921f174e51abd42f
|
[
"MIT"
] | 1
|
2020-05-26T17:12:59.000Z
|
2020-05-26T17:12:59.000Z
|
# -*- coding: utf-8 -*-
from JobBrowserBFF.TestBase import TestBase
UPSTREAM_SERVICE = 'ee2'
TIMEOUT = 10000
ENV = 'ci'
USER_CLASS = 'admin'
TIME_FROM = 1585699200000 # 4/1/20
TIME_TO = 1585872000000 # 4/4/20
class JobBrowserBFFTest(TestBase):
# Uncomment to skip this test
# @unittest.skip("skipped test_query_jobs_happy")
def test_query_jobs_happy(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
try:
impl, context = self.impl_for(ENV, USER_CLASS)
ret = impl.query_jobs(context, {
'time_span': {
'from': TIME_FROM,
'to': TIME_TO
},
'offset': 0,
'limit': 5,
'timeout': TIMEOUT,
'admin': True
})
jobs, found_count, total_count = self.assert_job_query_result_with_count(ret)
self.assertEqual(found_count, 36)
# TODO: total_count is not implemented in ee2 yet.
# self.assertEqual(total_count, 19)
self.assertEqual(len(jobs), 5)
except Exception as ex:
self.assert_no_exception(ex)
# Uncomment to skip this test
# @unittest.skip("skipped test_query_jobs_with_time_span_happy")
def test_query_jobs_with_sort_happy(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
try:
impl, context = self.impl_for(ENV, USER_CLASS)
ret = impl.query_jobs(context, {
'sort': [
{
'key': 'created',
'direction': 'ascending'
}
],
'time_span': {
'from': TIME_FROM,
'to': TIME_TO
},
'offset': 0,
'limit': 5,
'timeout': TIMEOUT,
'admin': True
})
jobs, found_count, total_count = self.assert_job_query_result_with_count(ret)
# self.assertEqual(total_count, 19)
self.assertEqual(found_count, 36)
self.is_in_ascending_order(jobs, ['state', 'create_at'])
except Exception as ex:
self.assert_no_exception(ex)
# Uncomment to skip this test
# @unittest.skip("skipped test_query_jobs_with_sort_descending_happy")
def test_query_jobs_with_sort_descending_happy(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
try:
impl, context = self.impl_for(ENV, USER_CLASS)
ret = impl.query_jobs(context, {
'sort': [
{
'key': 'created',
'direction': 'descending'
}
],
'time_span': {
'from': TIME_FROM,
'to': TIME_TO
},
'offset': 0,
'limit': 5,
'timeout': TIMEOUT,
'admin': True
})
jobs, found_count, total_count = self.assert_job_query_result_with_count(ret)
# self.assertEqual(total_count, 19)
self.assertEqual(found_count, 36)
self.is_in_descending_order(jobs, ['state', 'create_at'])
except Exception as ex:
self.assert_no_exception(ex)
| 35.694737
| 89
| 0.519021
| 347
| 3,391
| 4.786744
| 0.233429
| 0.048766
| 0.04696
| 0.040939
| 0.838651
| 0.818784
| 0.818784
| 0.74413
| 0.74413
| 0.74413
| 0
| 0.029089
| 0.381598
| 3,391
| 94
| 90
| 36.074468
| 0.762995
| 0.132704
| 0
| 0.701299
| 0
| 0
| 0.090536
| 0
| 0
| 0
| 0
| 0.010638
| 0.12987
| 1
| 0.038961
| false
| 0
| 0.012987
| 0
| 0.064935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1041fc440ac4f119233609607fe39270c43eb3ba
| 27,233
|
py
|
Python
|
sdk/python/pulumi_lxd/container.py
|
soupdiver/pulumi-lxd
|
258395aefd6a4cf138d470d7de70babed3310063
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_lxd/container.py
|
soupdiver/pulumi-lxd
|
258395aefd6a4cf138d470d7de70babed3310063
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_lxd/container.py
|
soupdiver/pulumi-lxd
|
258395aefd6a4cf138d470d7de70babed3310063
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ContainerArgs', 'Container']
@pulumi.input_type
class ContainerArgs:
def __init__(__self__, *,
image: pulumi.Input[str],
config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
devices: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerDeviceArgs']]]] = None,
ephemeral: Optional[pulumi.Input[bool]] = None,
files: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerFileArgs']]]] = None,
limits: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
privileged: Optional[pulumi.Input[bool]] = None,
profiles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
remote: Optional[pulumi.Input[str]] = None,
start_container: Optional[pulumi.Input[bool]] = None,
target: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
wait_for_network: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Container resource.
"""
pulumi.set(__self__, "image", image)
if config is not None:
pulumi.set(__self__, "config", config)
if devices is not None:
pulumi.set(__self__, "devices", devices)
if ephemeral is not None:
pulumi.set(__self__, "ephemeral", ephemeral)
if files is not None:
pulumi.set(__self__, "files", files)
if limits is not None:
pulumi.set(__self__, "limits", limits)
if name is not None:
pulumi.set(__self__, "name", name)
if privileged is not None:
warnings.warn("""Use a config setting of security.privileged=1 instead""", DeprecationWarning)
pulumi.log.warn("""privileged is deprecated: Use a config setting of security.privileged=1 instead""")
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if profiles is not None:
pulumi.set(__self__, "profiles", profiles)
if remote is not None:
pulumi.set(__self__, "remote", remote)
if start_container is not None:
pulumi.set(__self__, "start_container", start_container)
if target is not None:
pulumi.set(__self__, "target", target)
if type is not None:
pulumi.set(__self__, "type", type)
if wait_for_network is not None:
pulumi.set(__self__, "wait_for_network", wait_for_network)
@property
@pulumi.getter
def image(self) -> pulumi.Input[str]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: pulumi.Input[str]):
pulumi.set(self, "image", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerDeviceArgs']]]]:
return pulumi.get(self, "devices")
@devices.setter
def devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerDeviceArgs']]]]):
pulumi.set(self, "devices", value)
@property
@pulumi.getter
def ephemeral(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ephemeral")
@ephemeral.setter
def ephemeral(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ephemeral", value)
@property
@pulumi.getter
def files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerFileArgs']]]]:
return pulumi.get(self, "files")
@files.setter
def files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerFileArgs']]]]):
pulumi.set(self, "files", value)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter
def profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "profiles")
@profiles.setter
def profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "profiles", value)
@property
@pulumi.getter
def remote(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "remote")
@remote.setter
def remote(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remote", value)
@property
@pulumi.getter(name="startContainer")
def start_container(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "start_container")
@start_container.setter
def start_container(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "start_container", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="waitForNetwork")
def wait_for_network(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "wait_for_network")
@wait_for_network.setter
def wait_for_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_network", value)
@pulumi.input_type
class _ContainerState:
def __init__(__self__, *,
config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
devices: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerDeviceArgs']]]] = None,
ephemeral: Optional[pulumi.Input[bool]] = None,
files: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerFileArgs']]]] = None,
image: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
ipv4_address: Optional[pulumi.Input[str]] = None,
ipv6_address: Optional[pulumi.Input[str]] = None,
limits: Optional[pulumi.Input[Mapping[str, Any]]] = None,
mac_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
privileged: Optional[pulumi.Input[bool]] = None,
profiles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
remote: Optional[pulumi.Input[str]] = None,
start_container: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
wait_for_network: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering Container resources.
"""
if config is not None:
pulumi.set(__self__, "config", config)
if devices is not None:
pulumi.set(__self__, "devices", devices)
if ephemeral is not None:
pulumi.set(__self__, "ephemeral", ephemeral)
if files is not None:
pulumi.set(__self__, "files", files)
if image is not None:
pulumi.set(__self__, "image", image)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if ipv4_address is not None:
pulumi.set(__self__, "ipv4_address", ipv4_address)
if ipv6_address is not None:
pulumi.set(__self__, "ipv6_address", ipv6_address)
if limits is not None:
pulumi.set(__self__, "limits", limits)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if name is not None:
pulumi.set(__self__, "name", name)
if privileged is not None:
warnings.warn("""Use a config setting of security.privileged=1 instead""", DeprecationWarning)
pulumi.log.warn("""privileged is deprecated: Use a config setting of security.privileged=1 instead""")
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if profiles is not None:
pulumi.set(__self__, "profiles", profiles)
if remote is not None:
pulumi.set(__self__, "remote", remote)
if start_container is not None:
pulumi.set(__self__, "start_container", start_container)
if status is not None:
pulumi.set(__self__, "status", status)
if target is not None:
pulumi.set(__self__, "target", target)
if type is not None:
pulumi.set(__self__, "type", type)
if wait_for_network is not None:
pulumi.set(__self__, "wait_for_network", wait_for_network)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerDeviceArgs']]]]:
return pulumi.get(self, "devices")
@devices.setter
def devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerDeviceArgs']]]]):
pulumi.set(self, "devices", value)
@property
@pulumi.getter
def ephemeral(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "ephemeral")
@ephemeral.setter
def ephemeral(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ephemeral", value)
@property
@pulumi.getter
def files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerFileArgs']]]]:
return pulumi.get(self, "files")
@files.setter
def files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerFileArgs']]]]):
pulumi.set(self, "files", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="ipv4Address")
def ipv4_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ipv4_address")
@ipv4_address.setter
def ipv4_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ipv4_address", value)
@property
@pulumi.getter(name="ipv6Address")
def ipv6_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ipv6_address")
@ipv6_address.setter
def ipv6_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ipv6_address", value)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "mac_address")
@mac_address.setter
def mac_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mac_address", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter
def profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "profiles")
@profiles.setter
def profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "profiles", value)
@property
@pulumi.getter
def remote(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "remote")
@remote.setter
def remote(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remote", value)
@property
@pulumi.getter(name="startContainer")
def start_container(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "start_container")
@start_container.setter
def start_container(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "start_container", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="waitForNetwork")
def wait_for_network(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "wait_for_network")
@wait_for_network.setter
def wait_for_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_network", value)
class Container(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerDeviceArgs']]]]] = None,
ephemeral: Optional[pulumi.Input[bool]] = None,
files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerFileArgs']]]]] = None,
image: Optional[pulumi.Input[str]] = None,
limits: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
privileged: Optional[pulumi.Input[bool]] = None,
profiles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
remote: Optional[pulumi.Input[str]] = None,
start_container: Optional[pulumi.Input[bool]] = None,
target: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
wait_for_network: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Create a Container resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContainerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Container resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param ContainerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContainerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerDeviceArgs']]]]] = None,
ephemeral: Optional[pulumi.Input[bool]] = None,
files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerFileArgs']]]]] = None,
image: Optional[pulumi.Input[str]] = None,
limits: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
privileged: Optional[pulumi.Input[bool]] = None,
profiles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
remote: Optional[pulumi.Input[str]] = None,
start_container: Optional[pulumi.Input[bool]] = None,
target: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
wait_for_network: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContainerArgs.__new__(ContainerArgs)
__props__.__dict__["config"] = config
__props__.__dict__["devices"] = devices
__props__.__dict__["ephemeral"] = ephemeral
__props__.__dict__["files"] = files
if image is None and not opts.urn:
raise TypeError("Missing required property 'image'")
__props__.__dict__["image"] = image
__props__.__dict__["limits"] = limits
__props__.__dict__["name"] = name
if privileged is not None and not opts.urn:
warnings.warn("""Use a config setting of security.privileged=1 instead""", DeprecationWarning)
pulumi.log.warn("""privileged is deprecated: Use a config setting of security.privileged=1 instead""")
__props__.__dict__["privileged"] = privileged
__props__.__dict__["profiles"] = profiles
__props__.__dict__["remote"] = remote
__props__.__dict__["start_container"] = start_container
__props__.__dict__["target"] = target
__props__.__dict__["type"] = type
__props__.__dict__["wait_for_network"] = wait_for_network
__props__.__dict__["ip_address"] = None
__props__.__dict__["ipv4_address"] = None
__props__.__dict__["ipv6_address"] = None
__props__.__dict__["mac_address"] = None
__props__.__dict__["status"] = None
super(Container, __self__).__init__(
'lxd:index/container:Container',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerDeviceArgs']]]]] = None,
ephemeral: Optional[pulumi.Input[bool]] = None,
files: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerFileArgs']]]]] = None,
image: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
ipv4_address: Optional[pulumi.Input[str]] = None,
ipv6_address: Optional[pulumi.Input[str]] = None,
limits: Optional[pulumi.Input[Mapping[str, Any]]] = None,
mac_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
privileged: Optional[pulumi.Input[bool]] = None,
profiles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
remote: Optional[pulumi.Input[str]] = None,
start_container: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
wait_for_network: Optional[pulumi.Input[bool]] = None) -> 'Container':
"""
Get an existing Container resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ContainerState.__new__(_ContainerState)
__props__.__dict__["config"] = config
__props__.__dict__["devices"] = devices
__props__.__dict__["ephemeral"] = ephemeral
__props__.__dict__["files"] = files
__props__.__dict__["image"] = image
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["ipv4_address"] = ipv4_address
__props__.__dict__["ipv6_address"] = ipv6_address
__props__.__dict__["limits"] = limits
__props__.__dict__["mac_address"] = mac_address
__props__.__dict__["name"] = name
__props__.__dict__["privileged"] = privileged
__props__.__dict__["profiles"] = profiles
__props__.__dict__["remote"] = remote
__props__.__dict__["start_container"] = start_container
__props__.__dict__["status"] = status
__props__.__dict__["target"] = target
__props__.__dict__["type"] = type
__props__.__dict__["wait_for_network"] = wait_for_network
return Container(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def config(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
return pulumi.get(self, "config")
@property
@pulumi.getter
def devices(self) -> pulumi.Output[Optional[Sequence['outputs.ContainerDevice']]]:
return pulumi.get(self, "devices")
@property
@pulumi.getter
def ephemeral(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "ephemeral")
@property
@pulumi.getter
def files(self) -> pulumi.Output[Optional[Sequence['outputs.ContainerFile']]]:
return pulumi.get(self, "files")
@property
@pulumi.getter
def image(self) -> pulumi.Output[str]:
return pulumi.get(self, "image")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipv4Address")
def ipv4_address(self) -> pulumi.Output[str]:
return pulumi.get(self, "ipv4_address")
@property
@pulumi.getter(name="ipv6Address")
def ipv6_address(self) -> pulumi.Output[str]:
return pulumi.get(self, "ipv6_address")
@property
@pulumi.getter
def limits(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
return pulumi.get(self, "limits")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> pulumi.Output[str]:
return pulumi.get(self, "mac_address")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def privileged(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "privileged")
@property
@pulumi.getter
def profiles(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "profiles")
@property
@pulumi.getter
def remote(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "remote")
@property
@pulumi.getter(name="startContainer")
def start_container(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "start_container")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter
def target(self) -> pulumi.Output[str]:
return pulumi.get(self, "target")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="waitForNetwork")
def wait_for_network(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "wait_for_network")
| 39.640466
| 134
| 0.62931
| 3,063
| 27,233
| 5.359125
| 0.054195
| 0.118611
| 0.165519
| 0.083095
| 0.874079
| 0.853244
| 0.805605
| 0.783856
| 0.761681
| 0.712214
| 0
| 0.001992
| 0.244189
| 27,233
| 686
| 135
| 39.698251
| 0.795511
| 0.040943
| 0
| 0.803571
| 1
| 0
| 0.094458
| 0.007675
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164286
| false
| 0.001786
| 0.0125
| 0.092857
| 0.276786
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
105e26a321db3e638ee833fa80fee750cccb0980
| 13,352
|
py
|
Python
|
bio_rtd_test/unit_test/test_pdf.py
|
open-biotech/bio-rtd
|
c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0
|
[
"MIT"
] | 5
|
2020-03-30T13:26:12.000Z
|
2021-04-02T07:10:49.000Z
|
bio_rtd_test/unit_test/test_pdf.py
|
open-biotech/bio-rtd
|
c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0
|
[
"MIT"
] | null | null | null |
bio_rtd_test/unit_test/test_pdf.py
|
open-biotech/bio-rtd
|
c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0
|
[
"MIT"
] | 1
|
2020-06-03T07:50:56.000Z
|
2020-06-03T07:50:56.000Z
|
import unittest
import numpy as np
from bio_rtd.utils.vectors import true_start, true_end
from bio_rtd import pdf, peak_shapes, logger
class GaussianTest(unittest.TestCase):
@staticmethod
def get_gauss_pdf_with_cutoff(t, rt_mean, sigma, cutoff) -> np.ndarray:
p: np.ndarray = peak_shapes.gaussian(t, rt_mean, sigma)
# cut at end
p = p[:true_end(p >= cutoff * p.max())]
# set to 0 at front
p[:true_start(p >= cutoff * p.max())] = 0
# return normalized profile (integral = 1)
return p / p.sum() / t[1]
# define inlet with single specie
def run_gauss_fixed_dispersion(self, t, rt_mean_list, sigma_list, cutoff) -> None:
for rt_mean in rt_mean_list:
for sigma in sigma_list:
p = self.get_gauss_pdf_with_cutoff(t, rt_mean, sigma, cutoff)
pd = pdf.GaussianFixedDispersion(t, dispersion_index=sigma ** 2 / rt_mean, cutoff=cutoff)
pd.log.log_level = pd.log.ERROR + 10
pd.update_pdf(rt_mean=rt_mean)
np.testing.assert_array_equal(pd.get_p(), p)
pd.update_pdf(rt_mean=rt_mean / 2)
p = self.get_gauss_pdf_with_cutoff(t, rt_mean / 2, sigma / np.sqrt(2), cutoff)
np.testing.assert_array_almost_equal(pd.get_p(), p, 5)
# define inlet with single specie
def test_gauss_fixed_dispersion(self) -> None:
t = np.linspace(0, 100, 1500)
self.run_gauss_fixed_dispersion(t,
sigma_list=[20, 5.5, 30, 80],
rt_mean_list=[5, 10, 0.4, 100],
cutoff=0.001)
t = np.linspace(0, 900, 901)
self.run_gauss_fixed_dispersion(t,
sigma_list=[200, 5.5, 30, 880],
rt_mean_list=[5, 0.4, 100],
cutoff=0.001)
pd = pdf.GaussianFixedDispersion(t, dispersion_index=5, cutoff=0.03)
pd.log.log_level = pd.log.ERROR
with self.assertRaises(RuntimeError):
pd.update_pdf(rt_mean=6.6)
pd.log.log_level = pd.log.ERROR + 10
pd.update_pdf(rt_mean=6.6)
p1 = pd.get_p().copy()
pd.trim_and_normalize = False
pd.update_pdf(rt_mean=6.6)
p2 = pd.get_p()
self.assertTrue(p1.size < p2.size)
self.assertAlmostEqual(0.892, p2.sum() * t[1], 3)
# define inlet with single specie
def run_gauss_fixed_relative_with(self, t, rt_mean_list, sigma_list, cutoff, ignore_logger=True) -> None:
for rt_mean in rt_mean_list:
for sigma in sigma_list:
p = self.get_gauss_pdf_with_cutoff(t, rt_mean, sigma, cutoff)
pd = pdf.GaussianFixedRelativeWidth(t, relative_sigma=sigma / rt_mean, cutoff=cutoff)
if ignore_logger:
pd.log.log_level = pd.log.ERROR + 10
else:
pd.log = logger.StrictLogger()
pd.update_pdf(rt_mean=rt_mean)
np.testing.assert_array_equal(pd.get_p(), p)
pd.update_pdf(rt_mean=rt_mean / 2)
p = self.get_gauss_pdf_with_cutoff(t, rt_mean / 2, sigma / 2, cutoff)
np.testing.assert_array_almost_equal(pd.get_p(), p, 5)
# define inlet with single specie
def test_gauss_fixed_relative_with(self) -> None:
t = np.linspace(0, 100, 1500)
self.run_gauss_fixed_relative_with(t,
sigma_list=[20, 5.5, 30, 80],
rt_mean_list=[5, 10, 0.4, 100],
cutoff=0.001)
t = np.linspace(0, 900, 901)
self.run_gauss_fixed_relative_with(t,
sigma_list=[200, 5.5, 30, 80],
rt_mean_list=[5, 0.4, 100],
cutoff=0.001)
# Test logger binding.
with self.assertRaises(RuntimeError):
self.run_gauss_fixed_relative_with(t,
sigma_list=[5.5],
rt_mean_list=[5],
cutoff=0.001,
ignore_logger=False)
class EmgTest(unittest.TestCase):
@staticmethod
def get_emg_pdf_with_cutoff(t, rt_mean, sigma, skew, cutoff) -> np.ndarray:
p: np.ndarray = peak_shapes.emg(t, rt_mean, sigma, skew)
# cut at end
p = p[:true_end(p >= cutoff * p.max())]
# set to 0 at front
p[:true_start(p >= cutoff * p.max())] = 0
# return normalized profile (integral = 1)
return p / p.sum() / t[1]
# define inlet with single specie
# noinspection DuplicatedCode
def run_emg_fixed_dispersion(self, t, rt_mean_list, sigma_list, skew_list, cutoff, ignore_logger=True) -> None:
for skew in skew_list:
for sigma in sigma_list:
for rt_mean in rt_mean_list:
if rt_mean < 1 / skew: # peak max would be at t < 0
continue
p = self.get_emg_pdf_with_cutoff(t, rt_mean, sigma, skew, cutoff)
pd = pdf.ExpModGaussianFixedDispersion(t,
dispersion_index=sigma ** 2 / rt_mean,
skew=skew)
if ignore_logger:
pd.log.log_level = pd.log.ERROR + 10
else:
pd.log = logger.StrictLogger()
pd.cutoff_relative_to_max = cutoff
pd.update_pdf(rt_mean=rt_mean)
np.testing.assert_array_almost_equal(pd.get_p(), p, 3)
pd.update_pdf(rt_mean=rt_mean / 2)
p = self.get_emg_pdf_with_cutoff(t, rt_mean / 2, sigma / np.sqrt(2), skew, cutoff)
np.testing.assert_array_almost_equal(pd.get_p(), p, 3)
pd.update_pdf(rt_mean=rt_mean / 2, skew=skew / 2)
p = self.get_emg_pdf_with_cutoff(t, rt_mean / 2, sigma / np.sqrt(2), skew / 2, cutoff)
np.testing.assert_array_almost_equal(pd.get_p(), p, 3)
# define inlet with single specie
def test_emg_fixed_dispersion(self) -> None:
t = np.linspace(0, 100, 1500)
self.run_emg_fixed_dispersion(t,
rt_mean_list=[5, 10, 0.4, 100],
sigma_list=[2, 5.5, 30],
skew_list=[1, 1 / 2, 1 / 4, 1 / 20],
cutoff=0.001)
t = np.linspace(0, 900, 901)
self.run_emg_fixed_dispersion(t,
rt_mean_list=[5, 0.4, 100],
sigma_list=[2, 5.5, 30],
skew_list=[1, 1 / 2, 1 / 4, 1 / 20],
cutoff=0.001)
# Make sure logger gets passed to emg.
with self.assertRaises(RuntimeError):
self.run_emg_fixed_dispersion(t,
rt_mean_list=[5],
sigma_list=[2],
skew_list=[1],
cutoff=0.001,
ignore_logger=False)
# define inlet with single specie
# noinspection DuplicatedCode
def run_emg_fixed_relative_width(self, t, rt_mean_list, sigma_list,
skew_list, cutoff, ignore_logger=True) -> None:
for skew in skew_list:
for sigma in sigma_list:
for rt_mean in rt_mean_list:
if rt_mean < 1 / skew: # peak max would be at t < 0
continue
p = self.get_emg_pdf_with_cutoff(t,
rt_mean,
sigma,
skew,
cutoff)
pd = pdf.ExpModGaussianFixedRelativeWidth(
t,
sigma_relative=sigma / rt_mean,
tau_relative=1 / skew / rt_mean)
if ignore_logger:
pd.log.log_level = pd.log.ERROR + 10
else:
pd.log = logger.StrictLogger()
pd.cutoff_relative_to_max = cutoff
pd.update_pdf(rt_mean=rt_mean)
np.testing.assert_array_almost_equal(pd.get_p(), p, 3)
pd.update_pdf(rt_mean=rt_mean / 2)
p = self.get_emg_pdf_with_cutoff(t,
rt_mean / 2,
sigma / 2,
skew * 2,
cutoff)
np.testing.assert_array_almost_equal(pd.get_p(), p, 3)
pd.update_pdf(rt_mean=rt_mean / 2, skew=skew / 3)
p = self.get_emg_pdf_with_cutoff(t,
rt_mean / 2,
sigma / 2,
skew / 3,
cutoff)
np.testing.assert_array_almost_equal(pd.get_p(), p, 3)
# define inlet with single specie
def test_emg_fixed_relative_width(self) -> None:
t = np.linspace(0, 100, 1500)
self.run_emg_fixed_relative_width(t,
rt_mean_list=[5, 10, 0.4, 100],
sigma_list=[2, 5.5, 30],
skew_list=[1, 1 / 2, 1 / 4, 1 / 20],
cutoff=0.001)
t = np.linspace(0, 900, 901)
self.run_emg_fixed_relative_width(t,
rt_mean_list=[5, 0.4, 100],
sigma_list=[2, 5.5, 30],
skew_list=[1, 1 / 2, 1 / 4, 1 / 20],
cutoff=0.001)
# Test binding with logger.
with self.assertRaises(RuntimeError):
self.run_emg_fixed_relative_width(t,
rt_mean_list=[5],
sigma_list=[2],
skew_list=[1],
cutoff=0.001,
ignore_logger=False)
class TanksInSeriesTest(unittest.TestCase):
@staticmethod
def get_tanks_in_series_pdf_with_cutoff(t, rt_mean, n_tanks, cutoff) -> np.ndarray:
p: np.ndarray = peak_shapes.tanks_in_series(t, rt_mean, n_tanks)
# cut at end
p = p[:true_end(p >= cutoff * p.max())]
# set to 0 at front
p[:true_start(p >= cutoff * p.max())] = 0
# return normalized profile (integral = 1)
return p / p.sum() / t[1]
# define inlet with single specie
# noinspection DuplicatedCode
def run_tanks_in_series(self, t, rt_mean_list, n_tank_list, cutoff, ignore_logger=True) -> None:
for n_tanks in n_tank_list:
for rt_mean in rt_mean_list:
p = self.get_tanks_in_series_pdf_with_cutoff(t, rt_mean, n_tanks, cutoff)
pd = pdf.TanksInSeries(t, n_tanks)
if ignore_logger:
pd.log.log_level = pd.log.ERROR + 10
else:
pd.log = logger.StrictLogger()
pd.cutoff_relative_to_max = cutoff
pd.update_pdf(rt_mean=rt_mean)
np.testing.assert_array_almost_equal(pd.get_p(), p, 3)
pd.update_pdf(rt_mean=rt_mean / 2)
p = self.get_tanks_in_series_pdf_with_cutoff(t, rt_mean / 2, n_tanks, cutoff)
np.testing.assert_array_almost_equal(pd.get_p(), p, 3)
# define inlet with single specie
def test_tanks_in_series(self) -> None:
t = np.linspace(0, 100, 1500)
self.run_tanks_in_series(t,
rt_mean_list=[5, 10, 0.4, 100],
n_tank_list=[1, 2, 15, 30],
cutoff=0.001)
t = np.linspace(0, 900, 901)
self.run_tanks_in_series(t,
rt_mean_list=[5, 10, 400, 100],
n_tank_list=[1, 2, 15, 30],
cutoff=0.001)
with self.assertRaises(RuntimeError):
self.run_tanks_in_series(t,
rt_mean_list=[20000],
n_tank_list=[2],
cutoff=0.001,
ignore_logger=False)
| 49.820896
| 115
| 0.468919
| 1,576
| 13,352
| 3.71066
| 0.08566
| 0.083105
| 0.038304
| 0.03591
| 0.883037
| 0.852257
| 0.821819
| 0.784713
| 0.742305
| 0.706566
| 0
| 0.054531
| 0.443754
| 13,352
| 267
| 116
| 50.007491
| 0.732867
| 0.056246
| 0
| 0.720721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085586
| 1
| 0.058559
| false
| 0
| 0.018018
| 0
| 0.103604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
107622d2d82b21cf8a5297cfe93d5e246ea8e7b8
| 129
|
py
|
Python
|
visualize/grid_attention_visualization/__init__.py
|
rentainhe/visualization
|
d9a8aac82ac6049b3fd59754c6ccf3bd60206218
|
[
"MIT"
] | 169
|
2021-03-14T13:35:07.000Z
|
2022-03-29T12:21:18.000Z
|
visualize/grid_attention_visualization/__init__.py
|
liwanzhao/visualization
|
0cd047a02957019f9b551f016db21b5117e1cfcd
|
[
"MIT"
] | 6
|
2021-09-29T08:15:53.000Z
|
2022-03-13T10:08:29.000Z
|
visualize/grid_attention_visualization/__init__.py
|
liwanzhao/visualization
|
0cd047a02957019f9b551f016db21b5117e1cfcd
|
[
"MIT"
] | 31
|
2021-03-14T13:35:11.000Z
|
2022-03-25T11:10:45.000Z
|
from .visualize_attention_map import visualize_grid_attention
from .visualize_attention_map_V2 import visualize_grid_attention_v2
| 64.5
| 67
| 0.930233
| 18
| 129
| 6.111111
| 0.388889
| 0.236364
| 0.4
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.054264
| 129
| 2
| 67
| 64.5
| 0.885246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
107bb5ad6c81f8a82ea1bcbd831d2bbb0aa38796
| 127
|
py
|
Python
|
procuret/integrate/xero/__init__.py
|
Procuret/procuret-python
|
2f49cbd3454e33986c84a6c32c0f0ab8f60d4b82
|
[
"MIT"
] | null | null | null |
procuret/integrate/xero/__init__.py
|
Procuret/procuret-python
|
2f49cbd3454e33986c84a6c32c0f0ab8f60d4b82
|
[
"MIT"
] | null | null | null |
procuret/integrate/xero/__init__.py
|
Procuret/procuret-python
|
2f49cbd3454e33986c84a6c32c0f0ab8f60d4b82
|
[
"MIT"
] | 1
|
2020-10-28T14:26:21.000Z
|
2020-10-28T14:26:21.000Z
|
from procuret.integrate.xero.entity_map import XeroEntityMap
from procuret.integrate.xero.organisation import XeroOrganisation
| 42.333333
| 65
| 0.889764
| 15
| 127
| 7.466667
| 0.666667
| 0.214286
| 0.375
| 0.446429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062992
| 127
| 2
| 66
| 63.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
10b52531bd78d3cd984398a99729052f4c8bca39
| 1,070
|
py
|
Python
|
avencoding/__init__.py
|
CraftYun83/AVEncoding
|
a6dc603f29e35071c25a1c48cd5464c15ddcafd1
|
[
"MIT"
] | null | null | null |
avencoding/__init__.py
|
CraftYun83/AVEncoding
|
a6dc603f29e35071c25a1c48cd5464c15ddcafd1
|
[
"MIT"
] | null | null | null |
avencoding/__init__.py
|
CraftYun83/AVEncoding
|
a6dc603f29e35071c25a1c48cd5464c15ddcafd1
|
[
"MIT"
] | null | null | null |
def encode(string,level):
import base64
message = string
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
if level == 1 or 0:
return(base64_message)
else:
for i in range(level - 1):
message_bytes = base64_message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
return(base64_message)
def decode(string,level):
import base64
message = string
message_bytes = message.encode('ascii')
base64_bytes = base64.b64decode(message_bytes)
base64_message = base64_bytes.decode('ascii')
if level == 1 or 0:
return(base64_message)
else:
for i in range(level - 1):
message_bytes = base64_message.encode('ascii')
base64_bytes = base64.b64decode(message_bytes)
base64_message = base64_bytes.decode('ascii')
return(base64_message)
| 32.424242
| 59
| 0.635514
| 122
| 1,070
| 5.360656
| 0.180328
| 0.238532
| 0.165138
| 0.229358
| 0.972477
| 0.972477
| 0.972477
| 0.972477
| 0.972477
| 0.972477
| 0
| 0.079692
| 0.272897
| 1,070
| 33
| 60
| 32.424242
| 0.760925
| 0
| 0
| 0.928571
| 0
| 0
| 0.038499
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
52f2e80c2369df89ec3d58dc0d2f2b00731ef0c1
| 31,077
|
py
|
Python
|
test/TestCopyDataToPersonFormEventTree.py
|
ruchidesai/redi
|
562706805fb6bb472f38ba79094a57c72848492e
|
[
"BSD-3-Clause"
] | null | null | null |
test/TestCopyDataToPersonFormEventTree.py
|
ruchidesai/redi
|
562706805fb6bb472f38ba79094a57c72848492e
|
[
"BSD-3-Clause"
] | null | null | null |
test/TestCopyDataToPersonFormEventTree.py
|
ruchidesai/redi
|
562706805fb6bb472f38ba79094a57c72848492e
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import os
from lxml import etree
import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestCopyDataToPersonFormEventTree(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY, False)
self.form_event_tree = """<?xml version='1.0' encoding='US-ASCII'?>
<redcapProject>
<name>Project</name>
<form>
<name>cbc</name>
<formDateField>cbc_lbdtc</formDateField>
<formCompletedFieldName>cbc_complete</formCompletedFieldName>
<formImportedFieldName>cbc_nximport</formImportedFieldName>
<formCompletedFieldValue>2</formCompletedFieldValue>
<formImportedFieldValue>Y</formImportedFieldValue>
<event>
<name>1_arm_1</name>
</event>
</form>
<form>
<name>chemistry</name>
<formDateField>chem_lbdtc</formDateField>
<formCompletedFieldName>chemistry_complete</formCompletedFieldName>
<formImportedFieldName>chem_nximport</formImportedFieldName>
<formCompletedFieldValue>2</formCompletedFieldValue>
<formImportedFieldValue>Y</formImportedFieldValue>
<event>
<name>1_arm_1</name>
</event>
</form>
<form>
<name>inr</name>
<formDateField>inr_lbdtc</formDateField>
<formCompletedFieldName>inr_complete</formCompletedFieldName>
<formImportedFieldName>inr_nximport</formImportedFieldName>
<formCompletedFieldValue>2</formCompletedFieldValue>
<formImportedFieldValue>Y</formImportedFieldValue>
<event>
<name>1_arm_1</name>
</event>
</form>
</redcapProject>
"""
self.data_form_event_tree = etree.ElementTree(etree.fromstring(self.form_event_tree))
return()
def test_copy_data_with_blank_reference_unit(self):
self.person_form_event_tree = """<person_form_event><person><study_id>123</study_id><all_form_events><form>
<name>cbc</name>
<event>
<name>1_arm_1</name>
<field><name>hemo_lborres</name><value/></field>
<field><name>hemo_lborresu</name><value/></field>
<field><name>cbc_complete</name><value/></field>
<field><name>cbc_nximport</name><value/></field></event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.one_subject = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT/>
<STUDY_ID>123</STUDY_ID>
<timestamp>1906-12-25</timestamp>
<redcapFormName>cbc</redcapFormName>
<eventName>1_arm_1</eventName>
<formDateField>cbc_lbdtc</formDateField>
<formCompletedFieldName>cbc_complete</formCompletedFieldName>
<formImportedFieldName>cbc_nximport</formImportedFieldName>
<redcapFieldNameValue>hemo_lborres</redcapFieldNameValue>
<redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits>
<redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_one_subject= etree.ElementTree(etree.fromstring(self.one_subject))
self.result = etree.tostring(redi.copy_data_to_person_form_event_tree(self.data_one_subject,self.data_person_form_event_tree,self.data_form_event_tree))
self.output = """<person_form_event><person><study_id>123</study_id><all_form_events><form>
<name>cbc</name>
<event>
<name>1_arm_1</name>
<field><name>hemo_lborres</name><value>987</value></field>
<field><name>hemo_lborresu</name><value/></field>
<field><name>cbc_complete</name><value>2</value></field>
<field><name>cbc_nximport</name><value>Y</value></field></event>
</form>
</all_form_events></person></person_form_event>"""
self.expect = etree.tostring(etree.fromstring(self.output))
self.assertEqual(self.expect, self.result)
def test_copy_data_to_person_form_event_tree_one_person(self):
self.person_form_event_tree = """<person_form_event><person><study_id>123</study_id><all_form_events><form>
<name>cbc</name>
<event>
<name>1_arm_1</name>
<field><name>hemo_lborres</name><value/></field>
<field><name>hemo_lborresu</name><value/></field>
<field><name>cbc_complete</name><value/></field>
<field><name>cbc_nximport</name><value/></field></event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.one_subject = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>123</STUDY_ID>
<timestamp>1906-12-25</timestamp>
<redcapFormName>cbc</redcapFormName>
<eventName>1_arm_1</eventName>
<formDateField>cbc_lbdtc</formDateField>
<formCompletedFieldName>cbc_complete</formCompletedFieldName>
<formImportedFieldName>cbc_nximport</formImportedFieldName>
<redcapFieldNameValue>hemo_lborres</redcapFieldNameValue>
<redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits>
<redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_one_subject= etree.ElementTree(etree.fromstring(self.one_subject))
self.result = etree.tostring(redi.copy_data_to_person_form_event_tree(self.data_one_subject,self.data_person_form_event_tree,self.data_form_event_tree))
self.output = """<person_form_event><person><study_id>123</study_id><all_form_events><form>
<name>cbc</name>
<event>
<name>1_arm_1</name>
<field><name>hemo_lborres</name><value>987</value></field>
<field><name>hemo_lborresu</name><value>g/dL</value></field>
<field><name>cbc_complete</name><value>2</value></field>
<field><name>cbc_nximport</name><value>Y</value></field></event>
</form>
</all_form_events></person></person_form_event>"""
self.expect = etree.tostring(etree.fromstring(self.output))
self.assertEqual(self.expect, self.result)
def test_copy_data_to_person_form_event_tree_two_persons(self):
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value/></field>
<field><name>inr_complete</name><value/></field>
<field><name>inr_nximport</name><value/></field></event>
</form>
</all_form_events></person>
<person><study_id>123</study_id><all_form_events><form>
<name>cbc</name>
<event>
<name>1_arm_1</name>
<field><name>hemo_lborres</name><value></value></field>
<field><name>cbc_complete</name><value/></field>
<field><name>cbc_nximport</name><value/></field></event>
</form>
</all_form_events></person>
</person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.two_subjects = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>123</STUDY_ID>
<timestamp>1906-12-25</timestamp><redcapFormName>cbc</redcapFormName><eventName>1_arm_1</eventName><formDateField>cbc_lbdtc</formDateField><formCompletedFieldName>cbc_complete</formCompletedFieldName><formImportedFieldName>cbc_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>123</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-25</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formDateField>inr_lbdtc</formDateField><formCompletedFieldName>inr_complete</formCompletedFieldName><formImportedFieldName>inr_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_two_subjects= etree.ElementTree(etree.fromstring(self.two_subjects))
self.result = etree.tostring(redi.copy_data_to_person_form_event_tree(self.data_two_subjects,self.data_person_form_event_tree,self.data_form_event_tree))
self.output = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value>1906-12-25</value></field>
<field><name>inr_complete</name><value>2</value></field>
<field><name>inr_nximport</name><value>Y</value></field></event>
</form>
</all_form_events></person>
<person><study_id>123</study_id><all_form_events><form>
<name>cbc</name>
<event>
<name>1_arm_1</name>
<field><name>hemo_lborres</name><value>987</value></field>
<field><name>cbc_complete</name><value>2</value></field>
<field><name>cbc_nximport</name><value>Y</value></field></event>
</form>
</all_form_events></person>
</person_form_event>"""
self.expect = etree.tostring(etree.fromstring(self.output))
self.assertEqual(self.expect, self.result)
def test_copy_data_to_person_form_event_tree_zero_forms(self):
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id>
</person>
</person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.zero_form = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>123</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-25</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formDateField>inr_lbdtc</formDateField><formCompletedFieldName>inr_complete</formCompletedFieldName><formImportedFieldName>inr_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_zero_form= etree.ElementTree(etree.fromstring(self.zero_form))
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_zero_form,self.data_person_form_event_tree,self.data_form_event_tree)
def test_copy_data_to_person_form_event_tree_two_forms(self):
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>cbc</name>
<event>
<name>1_arm_1</name>
<field><name>cbc_lbdtc</name><value/></field>
<field><name>cbc_complete</name><value/></field>
<field><name>cbc_nximport</name><value/></field></event>
</form>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value/></field>
<field><name>inr_complete</name><value/></field>
<field><name>inr_nximport</name><value/></field>
</event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.two_subjects = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-26</timestamp><redcapFormName>cbc</redcapFormName><eventName>1_arm_1</eventName><formDateField>cbc_lbdtc</formDateField><formCompletedFieldName>cbc_complete</formCompletedFieldName><formImportedFieldName>cbc_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>123</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-25</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formDateField>inr_lbdtc</formDateField><formCompletedFieldName>inr_complete</formCompletedFieldName><formImportedFieldName>inr_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_two_subjects= etree.ElementTree(etree.fromstring(self.two_subjects))
self.result = etree.tostring(redi.copy_data_to_person_form_event_tree(self.data_two_subjects,self.data_person_form_event_tree,self.data_form_event_tree))
self.output = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>cbc</name>
<event>
<name>1_arm_1</name>
<field><name>cbc_lbdtc</name><value>1906-12-26</value></field>
<field><name>cbc_complete</name><value>2</value></field>
<field><name>cbc_nximport</name><value>Y</value></field></event>
</form>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value>1906-12-25</value></field>
<field><name>inr_complete</name><value>2</value></field>
<field><name>inr_nximport</name><value>Y</value></field>
</event>
</form>
</all_form_events></person></person_form_event>"""
self.expect = etree.tostring(etree.fromstring(self.output))
self.assertEqual(self.expect, self.result)
def test_copy_data_to_person_form_event_tree_two_events(self):
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value/></field>
<field><name>inr_complete</name><value/></field>
<field><name>inr_nximport</name><value/></field>
</event>
<event>
<name>2_arm_1</name>
<field><name>inr_lbdtc</name><value/></field>
<field><name>inr_complete</name><value/></field>
<field><name>inr_nximport</name><value/></field>
</event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.two_subjects = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-26</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formDateField>inr_lbdtc</formDateField><formCompletedFieldName>inr_complete</formCompletedFieldName><formImportedFieldName>inr_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>123</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-25</timestamp><redcapFormName>inr</redcapFormName><eventName>2_arm_1</eventName><formDateField>inr_lbdtc</formDateField><formCompletedFieldName>inr_complete</formCompletedFieldName><formImportedFieldName>inr_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_two_subjects= etree.ElementTree(etree.fromstring(self.two_subjects))
# print "Testing Test case"
self.result = etree.tostring(redi.copy_data_to_person_form_event_tree(self.data_two_subjects,self.data_person_form_event_tree,self.data_form_event_tree))
self.output = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value>1906-12-26</value></field>
<field><name>inr_complete</name><value>2</value></field>
<field><name>inr_nximport</name><value>Y</value></field>
</event>
<event>
<name>2_arm_1</name>
<field><name>inr_lbdtc</name><value>1906-12-25</value></field>
<field><name>inr_complete</name><value>2</value></field>
<field><name>inr_nximport</name><value>Y</value></field>
</event>
</form>
</all_form_events></person></person_form_event>"""
self.expect = etree.tostring(etree.fromstring(self.output))
self.assertEqual(self.expect, self.result)
def test_form_date_field_pair(self):
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value/></field></event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.form_date_field = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-26</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formCompletedFieldName>cbc_complete</formCompletedFieldName><formImportedFieldName>cbc_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_form_date_field= etree.ElementTree(etree.fromstring(self.form_date_field))
self.form_date_value_field = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formCompletedFieldName>cbc_complete</formCompletedFieldName><formDateField>cbc_lbdtc</formDateField><formImportedFieldName>cbc_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_form_date_value_field= etree.ElementTree(etree.fromstring(self.form_date_value_field))
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_form_date_field,self.data_person_form_event_tree,self.data_form_event_tree)
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_form_date_value_field,self.data_person_form_event_tree,self.data_form_event_tree)
def test_redcap_field_name_pair(self):
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value/></field></event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.redcap_field_name = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-26</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formCompletedFieldName>cbc_complete</formCompletedFieldName><formImportedFieldName>cbc_nximport</formImportedFieldName><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_redcap_field_name= etree.ElementTree(etree.fromstring(self.redcap_field_name))
self.redcap_value_field = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formCompletedFieldName>cbc_complete</formCompletedFieldName><formDateField>cbc_lbdtc</formDateField><formImportedFieldName>cbc_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_redcap_value_field= etree.ElementTree(etree.fromstring(self.redcap_value_field))
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_redcap_field_name,self.data_person_form_event_tree,self.data_form_event_tree)
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_redcap_value_field,self.data_person_form_event_tree,self.data_form_event_tree)
def test_redcap_field_units_pair(self):
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value/></field></event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.redcap_field_units_name = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-26</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formDateField>cbc_lbdtc</formDateField><formCompletedFieldName>cbc_complete</formCompletedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><formImportedFieldName>cbc_nximport</formImportedFieldName><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_redcap_field_units_name= etree.ElementTree(etree.fromstring(self.redcap_field_units_name))
self.redcap_units_value_field = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-26</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formCompletedFieldName>cbc_complete</formCompletedFieldName><formDateField>cbc_lbdtc</formDateField><formImportedFieldName>cbc_nximport</formImportedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_redcap_units_value_field= etree.ElementTree(etree.fromstring(self.redcap_units_value_field))
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_redcap_field_units_name,self.data_person_form_event_tree,self.data_form_event_tree)
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_redcap_units_value_field,self.data_person_form_event_tree,self.data_form_event_tree)
def test_Form_Completed_Field(self):
self.form_event_tree = """<?xml version='1.0' encoding='US-ASCII'?>
<redcapProject>
<name>Project</name>
<form>
<name>inr</name>
<formDateField>inr_lbdtc</formDateField>
<formCompletedFieldName>inr_complete</formCompletedFieldName>
<formImportedFieldName>inr_nximport</formImportedFieldName>
<formCompletedFieldValue></formCompletedFieldValue>
<formImportedFieldValue>Y</formImportedFieldValue>
<event>
<name>1_arm_1</name>
</event>
</form>
</redcapProject>
"""
self.data_form_event_tree = etree.ElementTree(etree.fromstring(self.form_event_tree))
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value/></field>
<field><name>inr_complete</name><value/></field>
<field><name>inr_nximport</name><value/></field>
</event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.Form_Completed_Field = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-26</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formDateField>inr_lbdtc</formDateField><formCompletedFieldName>inr_complete</formCompletedFieldName><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><formImportedFieldName>inr_nximport</formImportedFieldName><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_Form_Completed_Field= etree.ElementTree(etree.fromstring(self.Form_Completed_Field))
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_Form_Completed_Field,self.data_person_form_event_tree,self.data_form_event_tree)
def test_Form_Imported_Field(self):
self.form_event_tree = """<?xml version='1.0' encoding='US-ASCII'?>
<redcapProject>
<name>Project</name>
<form>
<name>inr</name>
<formDateField>inr_lbdtc</formDateField>
<formCompletedFieldName>inr_complete</formCompletedFieldName>
<formImportedFieldName>inr_nximport</formImportedFieldName>
<formCompletedFieldValue>2</formCompletedFieldValue>
<formImportedFieldValue></formImportedFieldValue>
<event>
<name>1_arm_1</name>
</event>
</form>
</redcapProject>
"""
self.data_form_event_tree = etree.ElementTree(etree.fromstring(self.form_event_tree))
self.person_form_event_tree = """<person_form_event><person><study_id>456</study_id><all_form_events>
<form>
<name>inr</name>
<event>
<name>1_arm_1</name>
<field><name>inr_lbdtc</name><value/></field>
<field><name>inr_complete</name><value/></field>
<field><name>inr_nximport</name><value/></field>
</event>
</form>
</all_form_events></person></person_form_event>
"""
self.data_person_form_event_tree = etree.ElementTree(etree.fromstring(self.person_form_event_tree))
self.Form_Imported_Field = """<?xml version='1.0' encoding='US-ASCII'?>
<study>
<subject>
<NAME>TestSubject_2</NAME>
<RESULT>987</RESULT>
<REFERENCE_UNIT>g/dL</REFERENCE_UNIT>
<STUDY_ID>456</STUDY_ID>
<timestamp>1906-12-26</timestamp><redcapFormName>inr</redcapFormName><eventName>1_arm_1</eventName><formCompletedFieldName>inr_complete</formCompletedFieldName><formDateField>inr_lbdtc</formDateField><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><formImportedFieldName>inr_nximport</formImportedFieldName><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits><redcapFieldNameStatus>hemo_lbstat</redcapFieldNameStatus></subject>
</study>
"""
self.data_Form_Imported_Field = etree.ElementTree(etree.fromstring(self.Form_Imported_Field))
self.assertRaises(Exception,redi.copy_data_to_person_form_event_tree,self.data_Form_Imported_Field,self.data_person_form_event_tree,self.data_form_event_tree)
def tearDown(self):
return()
if __name__ == '__main__':
unittest.main()
| 51.451987
| 450
| 0.678894
| 3,420
| 31,077
| 5.865205
| 0.03538
| 0.05429
| 0.073284
| 0.062516
| 0.961065
| 0.955332
| 0.954335
| 0.946109
| 0.931253
| 0.931253
| 0
| 0.018519
| 0.186826
| 31,077
| 603
| 451
| 51.537313
| 0.775236
| 0.000804
| 0
| 0.851782
| 0
| 0.037523
| 0.731981
| 0.499936
| 0
| 0
| 0
| 0
| 0.026266
| 1
| 0.02439
| false
| 0
| 0.099437
| 0.001876
| 0.125704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5e2cf6d47eccbca0c66d2ea2a9a975b2b504e47b
| 2,780
|
py
|
Python
|
wio/termui.py
|
Seeed-Studio/wio-cli
|
ce83f4c2d30be7f72d1a128acd123dfc5effa563
|
[
"MIT"
] | 29
|
2016-03-28T12:32:51.000Z
|
2020-04-20T08:00:10.000Z
|
wio/termui.py
|
Seeed-Studio/wio-cli
|
ce83f4c2d30be7f72d1a128acd123dfc5effa563
|
[
"MIT"
] | 10
|
2016-05-27T10:35:48.000Z
|
2021-08-20T18:11:08.000Z
|
wio/termui.py
|
Seeed-Studio/wio-cli
|
ce83f4c2d30be7f72d1a128acd123dfc5effa563
|
[
"MIT"
] | 13
|
2016-01-25T03:34:02.000Z
|
2021-11-30T09:05:06.000Z
|
from time import sleep
import click
import threading
class waiting_echo(threading.Thread):
def __init__(self, msg):
threading.Thread.__init__(self)
self.msg = msg
self.exiting=False
self.flag = True
def run(self):
while not self.exiting:
click.echo("\r-%s" %self.msg, nl=False)
click.echo(" "*(80-len(self.msg)), nl=False)
click.echo("\b"*(80-len(self.msg)), nl=False)
sleep(0.1)
click.echo("\r\%s" %self.msg, nl=False)
click.echo(" "*(80-len(self.msg)), nl=False)
click.echo("\b"*(80-len(self.msg)), nl=False)
sleep(0.1)
click.echo("\r|%s" %self.msg, nl=False)
click.echo(" "*(80-len(self.msg)), nl=False)
click.echo("\b"*(80-len(self.msg)), nl=False)
sleep(0.1)
click.echo("\r/%s" %self.msg, nl=False)
click.echo(" "*(80-len(self.msg)), nl=False)
click.echo("\b"*(80-len(self.msg)), nl=False)
sleep(0.1)
click.echo('\r' + " "*(80-len(self.msg)), nl=False)
click.echo('\r', nl=False)
def message(self, msg):
self.msg = msg
def stop(self, msg):
self.exiting = True
self.msg = msg
def tree(list):
if not list:
click.echo('No Wio devices could be found.')
return
for l in list[:-1]:
click.echo('|-- ', nl=False)
if l['online']:
click.secho(l['name'] + ' (%s) [%s]' %(l['onoff'], l['board'].split()[1]), fg='green')
else:
click.secho(l['name'] + ' (%s) [%s]' %(l['onoff'], l['board'].split()[1]), fg='cyan')
click.echo('| |-- ', nl=False)
click.echo('sn: ' + l['node_sn'])
click.echo('| |-- ', nl=False)
click.echo('token: ' + l['node_key'])
click.echo('| |-- ', nl=False)
click.echo('API url: ' + l['resources'])
click.echo('| |-- ', nl=False)
click.echo('APIs: ')
for api in l['well_known']:
click.echo('| |-- ', nl=False)
click.echo(api)
l = list[-1]
click.echo('|-- ', nl=False)
if l['online']:
click.secho(l['name'] + ' (%s) [%s]' %(l['onoff'], l['board'].split()[1]), fg='green')
else:
click.secho(l['name'] + ' (%s) [%s]' %(l['onoff'], l['board'].split()[1]), fg='cyan')
click.echo(' |-- ', nl=False)
click.echo('sn: ' + l['node_sn'])
click.echo(' |-- ', nl=False)
click.echo('token: ' + l['node_key'])
click.echo(' |-- ', nl=False)
click.echo('API url: ' + l['resources'])
click.echo(' |-- ', nl=False)
click.echo('APIs: ')
for api in l['well_known']:
click.echo(' |-- ', nl=False)
click.echo(api)
| 36.103896
| 98
| 0.481655
| 375
| 2,780
| 3.530667
| 0.170667
| 0.251511
| 0.172205
| 0.229607
| 0.758308
| 0.758308
| 0.758308
| 0.758308
| 0.73716
| 0.73716
| 0
| 0.016194
| 0.289209
| 2,780
| 76
| 99
| 36.578947
| 0.653846
| 0
| 0
| 0.652778
| 0
| 0
| 0.147122
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069444
| false
| 0
| 0.041667
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e3b93cab656e4372bb130c7e09e6b5ac60e21fd
| 9,112
|
py
|
Python
|
Python/REST/rest.py
|
alexanderkrauck/Weatherstation
|
bee84d974ee53ae9700681b9e22898137d95f932
|
[
"WTFPL"
] | null | null | null |
Python/REST/rest.py
|
alexanderkrauck/Weatherstation
|
bee84d974ee53ae9700681b9e22898137d95f932
|
[
"WTFPL"
] | 5
|
2018-01-11T11:53:06.000Z
|
2018-03-01T12:17:57.000Z
|
Python/REST/rest.py
|
alexanderkrauck/Weatherstation
|
bee84d974ee53ae9700681b9e22898137d95f932
|
[
"WTFPL"
] | null | null | null |
from flask import Flask, url_for
from flask import Response
import flask
import web
import json
#import responses
import requests
import mysql.connector
import logging
app = Flask(__name__)
log = logging.getLogger('werkzeug') #flask logger
log.setLevel(logging.CRITICAL)
mysql_host = "db"
mysql_user = "root"
mysql_password = "passme"
mysql_db = "weather"
class weather_measurement(object):
ambient_temperature = float(0)
ground_temperature = float(0)
air_quality = float(0)
humidity = float(0)
wind_speed = float(0)
wind_gust_speed = float(0)
rainfall = float(0)
created = ""
air_pressure = float(0)
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, weather_measurement):
return super(MyEncoder, self).default(obj)
return obj.__dict__
#get all measurements
@app.route('/all', methods = ['GET'])
def find_all():
try:
conn = mysql.connector.connect(host=mysql_host, user=mysql_user, passwd=mysql_password, db=mysql_db)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor = conn.cursor() #create cursor for select
res = ""
query = ("SELECT ambient_temperature, ground_temperature, air_quality, humidity, wind_speed, wind_gust_speed,rainfall, created, air_pressure FROM WEATHER_MEASUREMENT")
cursor.execute(query)
data = cursor.fetchall()
list = []
for row in data:
mes = weather_measurement()
mes.ambient_temperature = float(row[0])
mes.ground_temperature = float(row[1])
mes.air_quality = float(row[2])
mes.humidity = float(row[3])
mes.wind_speed = float(row[4])
mes.wind_gust_speed = float(row[5])
mes.rainfall = float(row[6])
mes.created = str(row[7])
mes.air_pressure = float(row[8])
list.append(mes)
conn.close
cursor.close()
resp = Response(json.dumps(list, cls=MyEncoder), status=200, mimetype='application/json')
resp.headers["Access-Control-Allow-Origin"] = '*'
return resp
#get last measurement
@app.route('/last', methods = ['GET'])
def get_last():
try:
conn = mysql.connector.connect(host=mysql_host, user=mysql_user, passwd=mysql_password, db=mysql_db)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor = conn.cursor() #create cursor for select
res = ""
query = ("SELECT ambient_temperature, ground_temperature, air_quality, humidity, wind_speed, wind_gust_speed,rainfall, created, air_pressure FROM WEATHER_MEASUREMENT WHERE created = (SELECT MAX(created) FROM WEATHER_MEASUREMENT);")
cursor.execute(query)
data = cursor.fetchall()
list = []
if len(data) == 1:
row = data[0]
mes = weather_measurement()
mes.ambient_temperature = float(row[0])
mes.ground_temperature = float(row[1])
mes.air_quality = float(row[2])
mes.humidity = float(row[3])
mes.wind_speed = float(row[4])
mes.wind_gust_speed = float(row[5])
mes.rainfall = float(row[6])
mes.created = str(row[7])
mes.air_pressure = float(row[8])
conn.close()
cursor.close()
resp = Response(json.dumps(mes, cls=MyEncoder), status=200, mimetype='application/json')
resp.headers["Access-Control-Allow-Origin"] = '*'
return resp
else:
conn.close()
cursor.close()
resp = Response(json.dumps(list, cls=MyEncoder), status=500, mimetype='text/html')
resp.headers["Access-Control-Allow-Origin"] = '*'
return resp
#get all measurements between two dates
@app.route('/between/<fromDate>/<toDate>', methods = ['GET'])
def between_two_dates(fromDate, toDate):
try:
conn = mysql.connector.connect(host=mysql_host, user=mysql_user, passwd=mysql_password, db=mysql_db)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor = conn.cursor() #create cursor for select
res = ""
query = ("SELECT ambient_temperature, ground_temperature, air_quality, humidity, wind_speed, wind_gust_speed,rainfall, created," +
"air_pressure FROM WEATHER_MEASUREMENT WHERE created >= %s AND created <= %s")
cursor.execute(query, (fromDate, toDate))
data = cursor.fetchall()
list = []
for row in data:
mes = weather_measurement()
mes.ambient_temperature = float(row[0])
mes.ground_temperature = float(row[1])
mes.air_quality = float(row[2])
mes.humidity = float(row[3])
mes.wind_speed = float(row[4])
mes.wind_gust_speed = float(row[5])
mes.rainfall = float(row[6])
mes.created = str(row[7])
mes.air_pressure = float(row[8])
list.append(mes)
conn.close();
cursor.close()
resp = Response(json.dumps(list, cls=MyEncoder), status=200, mimetype='application/json')
resp.headers["Access-Control-Allow-Origin"] = '*'
return resp
#get all measurements n seconds ago
@app.route('/from_now/<int:seconds>', methods = ['GET'])
def seconds_from_now(seconds):
try:
conn = mysql.connector.connect(host=mysql_host, user=mysql_user, passwd=mysql_password, db=mysql_db)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor = conn.cursor() #create cursor for select
res = ""
query = ("SELECT ambient_temperature, ground_temperature, air_quality, humidity, wind_speed, wind_gust_speed,rainfall, created, air_pressure FROM WEATHER_MEASUREMENT " +
"WHERE created >= from_unixtime(unix_timestamp(current_timestamp) - %s) AND created <= current_timestamp()")
cursor.execute(query, (seconds,))
data = cursor.fetchall()
list = []
for row in data:
mes = weather_measurement()
mes.ambient_temperature = float(row[0])
mes.ground_temperature = float(row[1])
mes.air_quality = float(row[2])
mes.humidity = float(row[3])
mes.wind_speed = float(row[4])
mes.wind_gust_speed = float(row[5])
mes.rainfall = float(row[6])
mes.created = str(row[7])
mes.air_pressure = float(row[8])
list.append(mes)
resp = Response(json.dumps(list, cls=MyEncoder), status=200, mimetype='application/json')
resp.headers["Access-Control-Allow-Origin"] = '*'
conn.close()
cursor.close()
return resp
#get the measurement closest to a specific date
@app.route('/at_date/<date>', methods = ['GET'])
def at_date(date):
try:
conn = mysql.connector.connect(host=mysql_host, user=mysql_user, passwd=mysql_password, db=mysql_db)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor = conn.cursor() #create cursor for select
res = ""
query = ("SELECT ambient_temperature, ground_temperature, air_quality, humidity, wind_speed, wind_gust_speed,rainfall, created, air_pressure FROM WEATHER_MEASUREMENT " +
"ORDER BY ABS(unix_timestamp(%s) - unix_timestamp(CREATED)) LIMIT 1")
cursor.execute(query, (date,))
data = cursor.fetchall()
list = []
for row in data:
mes = weather_measurement()
mes.ambient_temperature = float(row[0])
mes.ground_temperature = float(row[1])
mes.air_quality = float(row[2])
mes.humidity = float(row[3])
mes.wind_speed = float(row[4])
mes.wind_gust_speed = float(row[5])
mes.rainfall = float(row[6])
mes.created = str(row[7])
mes.air_pressure = float(row[8])
list.append(mes)
conn.close()
cursor.close()
resp = Response(json.dumps(list, cls=MyEncoder), status=200, mimetype='application/json')
resp.headers["Access-Control-Allow-Origin"] = '*'
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
| 38.774468
| 235
| 0.650461
| 1,179
| 9,112
| 4.873622
| 0.142494
| 0.055691
| 0.024887
| 0.033066
| 0.771319
| 0.771319
| 0.771319
| 0.771319
| 0.764184
| 0.764184
| 0
| 0.0118
| 0.228051
| 9,112
| 234
| 236
| 38.940171
| 0.80509
| 0.034021
| 0
| 0.719626
| 0
| 0.004673
| 0.205119
| 0.051422
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03271
| false
| 0.051402
| 0.037383
| 0.004673
| 0.163551
| 0.070093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
eaefeec7f641c642150f5c181ac9da9b3bd0a0f5
| 46
|
py
|
Python
|
petrophysics/conversions/__init__.py
|
petroGG/petrophysics
|
953023d89969f7970f584604146118ad7123ce30
|
[
"MIT"
] | 47
|
2016-06-30T20:04:02.000Z
|
2021-12-11T17:01:04.000Z
|
petrophysics/conversions/__init__.py
|
Khalilsqu/petrophysics
|
953023d89969f7970f584604146118ad7123ce30
|
[
"MIT"
] | null | null | null |
petrophysics/conversions/__init__.py
|
Khalilsqu/petrophysics
|
953023d89969f7970f584604146118ad7123ce30
|
[
"MIT"
] | 35
|
2016-09-30T17:28:48.000Z
|
2022-01-06T02:24:53.000Z
|
from . import oil
from . import temperature
| 9.2
| 25
| 0.73913
| 6
| 46
| 5.666667
| 0.666667
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 46
| 4
| 26
| 11.5
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d82405f08b6204f1ab0cb9e96939f8dfafa4abe7
| 484
|
py
|
Python
|
s3_mysql_backup/test/test_accounting_file_patterns.py
|
fogcitymarathoner/s3_mysql_backup
|
f6e821889abae16381e9a7fa49d24a61ffb28ac2
|
[
"MIT"
] | null | null | null |
s3_mysql_backup/test/test_accounting_file_patterns.py
|
fogcitymarathoner/s3_mysql_backup
|
f6e821889abae16381e9a7fa49d24a61ffb28ac2
|
[
"MIT"
] | null | null | null |
s3_mysql_backup/test/test_accounting_file_patterns.py
|
fogcitymarathoner/s3_mysql_backup
|
f6e821889abae16381e9a7fa49d24a61ffb28ac2
|
[
"MIT"
] | null | null | null |
import re
from s3_mysql_backup.scripts.backup_gnucash import pat as gpat
from s3_mysql_backup.scripts.backup_qb import pat as qpat
class Test:
assert re.match(gpat, 'Personal041008.20140819135748.gnucash.20151005135235.gnucash.20160921104022.gnucash')
assert re.match(gpat, 'Personal041008.20140819135748.gnucash.20151005135235.gnucash.20160923091326.gnucash')
assert re.match(gpat, 'Personal041008.20140819135748.gnucash.20151005135235.gnucash.20160923092006.gnucash')
| 48.4
| 112
| 0.834711
| 59
| 484
| 6.745763
| 0.389831
| 0.060302
| 0.09799
| 0.128141
| 0.736181
| 0.736181
| 0.585427
| 0.585427
| 0.585427
| 0.40201
| 0
| 0.32809
| 0.080579
| 484
| 9
| 113
| 53.777778
| 0.566292
| 0
| 0
| 0
| 0
| 0
| 0.514463
| 0.514463
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0
| true
| 0
| 0.428571
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d82bce2aa50e30b42f41478c0ab09e0808fc32d9
| 69,884
|
py
|
Python
|
tests/pytests/unit/pillar/test_netbox.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/pytests/unit/pillar/test_netbox.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/pytests/unit/pillar/test_netbox.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
:codeauthor: Gary T. Giesen <ggiesen@giesen.me>
"""
import pytest
import salt.pillar.netbox as netbox
from tests.support.mock import patch
@pytest.fixture
def default_kwargs():
return {
"minion_id": "minion1",
"pillar": None,
"api_url": "http://netbox.example.com",
"api_token": "yeic5oocizei7owuichoesh8ooqu6oob3uWiey9a",
"api_query_result_limit": 65535,
}
@pytest.fixture
def headers():
return {"Authorization": "Token quin1Di5MoRooChaiph3Aenaxais5EeY1gie6eev"}
@pytest.fixture
def device_results():
return {
"dict": {
"count": 1,
"next": None,
"previous": None,
"results": [
{
"id": 511,
"url": "https://netbox.example.com/api/dcim/devices/511/",
"name": "minion1",
"display_name": "minion1",
"device_type": {
"id": 4,
"url": "https://netbox.example.com/api/dcim/device-types/4/",
"manufacturer": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/manufacturers/1/",
"name": "Cisco",
"slug": "cisco",
},
"model": "ISR2901",
"slug": "isr2901",
"display_name": "Cisco ISR2901",
},
"device_role": {
"id": 45,
"url": "https://netbox.example.com/api/dcim/device-roles/45/",
"name": "Network",
"slug": "network",
},
"node_type": "device",
"tenant": None,
"platform": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/platforms/1/",
"name": "Cisco IOS",
"slug": "ios",
},
"serial": "",
"asset_tag": None,
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
},
"rack": None,
"position": None,
"face": None,
"parent_device": None,
"status": {"value": "active", "label": "Active"},
"primary_ip": {
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": 4,
"address": "192.0.2.1/24",
},
"primary_ip4": {
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": 4,
"address": "192.0.2.1/24",
},
"primary_ip6": None,
"cluster": None,
"virtual_chassis": None,
"vc_position": None,
"vc_priority": None,
"comments": "",
"local_context_data": None,
"tags": [],
"custom_fields": {},
"config_context": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:04.171105Z",
}
],
}
}
@pytest.fixture
def multiple_device_results():
return {
"dict": {
"count": 2,
"next": None,
"previous": None,
"results": [
{
"id": 511,
"url": "https://netbox.example.com/api/dcim/devices/511/",
"name": "minion1",
"display_name": "minion1",
"device_type": {
"id": 4,
"url": "https://netbox.example.com/api/dcim/device-types/4/",
"manufacturer": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/manufacturers/1/",
"name": "Cisco",
"slug": "cisco",
},
"model": "ISR2901",
"slug": "isr2901",
"display_name": "Cisco ISR2901",
},
"device_role": {
"id": 45,
"url": "https://netbox.example.com/api/dcim/device-roles/45/",
"name": "Network",
"slug": "network",
},
"node_type": "device",
"tenant": None,
"platform": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/platforms/1/",
"name": "Cisco IOS",
"slug": "ios",
},
"serial": "",
"asset_tag": None,
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
},
"rack": None,
"position": None,
"face": None,
"parent_device": None,
"status": {"value": "active", "label": "Active"},
"primary_ip": {
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": 4,
"address": "192.0.2.1/24",
},
"primary_ip4": {
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": 4,
"address": "192.0.2.1/24",
},
"primary_ip6": None,
"cluster": None,
"virtual_chassis": None,
"vc_position": None,
"vc_priority": None,
"comments": "",
"local_context_data": None,
"tags": [],
"custom_fields": {},
"config_context": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:04.171105Z",
},
{
"id": 512,
"url": "https://netbox.example.com/api/dcim/devices/512/",
"name": "minion1",
"display_name": "minion1",
"device_type": {
"id": 4,
"url": "https://netbox.example.com/api/dcim/device-types/4/",
"manufacturer": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/manufacturers/1/",
"name": "Cisco",
"slug": "cisco",
},
"model": "ISR2901",
"slug": "isr2901",
"display_name": "Cisco ISR2901",
},
"device_role": {
"id": 45,
"url": "https://netbox.example.com/api/dcim/device-roles/45/",
"name": "Network",
"slug": "network",
},
"node_type": "device",
"tenant": None,
"platform": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/platforms/1/",
"name": "Cisco IOS",
"slug": "ios",
},
"serial": "",
"asset_tag": None,
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
},
"rack": None,
"position": None,
"face": None,
"parent_device": None,
"status": {"value": "active", "label": "Active"},
"primary_ip": {
"id": 1150,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1150/",
"family": 4,
"address": "192.0.2.3/24",
},
"primary_ip4": {
"id": 1150,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1150/",
"family": 4,
"address": "192.0.2.3/24",
},
"primary_ip6": None,
"cluster": None,
"virtual_chassis": None,
"vc_position": None,
"vc_priority": None,
"comments": "",
"local_context_data": None,
"tags": [],
"custom_fields": {},
"config_context": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:04.171105Z",
},
],
}
}
@pytest.fixture
def virtual_machine_results():
return {
"dict": {
"count": 1,
"next": None,
"previous": None,
"results": [
{
"id": 222,
"url": "https://netbox.example.com/api/virtualization/virtual-machines/222/",
"name": "minion1",
"status": {"value": "active", "label": "Active"},
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
},
"cluster": {
"id": 1,
"url": "https://netbox.example.com/api/virtualization/clusters/1/",
"name": "Cluster",
},
"role": {
"id": 45,
"url": "https://netbox.example.com/api/dcim/device-roles/45/",
"name": "Network",
"slug": "network",
},
"node_type": "virtual-machine",
"tenant": None,
"platform": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/platforms/1/",
"name": "Cisco IOS",
"slug": "ios",
},
"primary_ip": {
"id": 1148,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1148/",
"family": 4,
"address": "192.0.2.2/24",
},
"primary_ip4": {
"id": 1148,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1148/",
"family": 4,
"address": "192.0.2.2/24",
},
"primary_ip6": None,
"vcpus": 1,
"memory": 1024,
"disk": 30,
"comments": "",
"local_context_data": None,
"tags": [],
"custom_fields": {},
"config_context": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:23:05.799541Z",
}
],
}
}
@pytest.fixture
def multiple_virtual_machine_results():
return {
"dict": {
"count": 1,
"next": None,
"previous": None,
"results": [
{
"id": 222,
"url": "https://netbox.example.com/api/virtualization/virtual-machines/222/",
"name": "minion1",
"status": {"value": "active", "label": "Active"},
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
},
"cluster": {
"id": 1,
"url": "https://netbox.example.com/api/virtualization/clusters/1/",
"name": "Cluster",
},
"role": {
"id": 45,
"url": "https://netbox.example.com/api/dcim/device-roles/45/",
"name": "Network",
"slug": "network",
},
"node_type": "virtual-machine",
"tenant": None,
"platform": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/platforms/1/",
"name": "Cisco IOS",
"slug": "ios",
},
"primary_ip": {
"id": 1148,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1148/",
"family": 4,
"address": "192.0.2.2/24",
},
"primary_ip4": {
"id": 1148,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1148/",
"family": 4,
"address": "192.0.2.2/24",
},
"primary_ip6": None,
"vcpus": 1,
"memory": 1024,
"disk": 30,
"comments": "",
"local_context_data": None,
"tags": [],
"custom_fields": {},
"config_context": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:23:05.799541Z",
},
{
"id": 223,
"url": "https://netbox.example.com/api/virtualization/virtual-machines/223/",
"name": "minion1",
"status": {"value": "active", "label": "Active"},
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
},
"cluster": {
"id": 1,
"url": "https://netbox.example.com/api/virtualization/clusters/1/",
"name": "Cluster",
},
"role": {
"id": 45,
"url": "https://netbox.example.com/api/dcim/device-roles/45/",
"name": "Network",
"slug": "network",
},
"node_type": "virtual-machine",
"tenant": None,
"platform": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/platforms/1/",
"name": "Cisco IOS",
"slug": "ios",
},
"primary_ip": {
"id": 1152,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1152/",
"family": 4,
"address": "192.0.2.4/24",
},
"primary_ip4": {
"id": 1152,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1152/",
"family": 4,
"address": "192.0.2.4/24",
},
"primary_ip6": None,
"vcpus": 1,
"memory": 1024,
"disk": 30,
"comments": "",
"local_context_data": None,
"tags": [],
"custom_fields": {},
"config_context": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:23:05.799541Z",
},
],
}
}
@pytest.fixture
def no_results():
return {"dict": {"count": 0, "next": None, "previous": None, "results": []}}
@pytest.fixture
def http_error():
return {"error": "HTTP 404: Not Found", "status": 404}
@pytest.fixture
def device_interface_results():
return {
"dict": {
"count": 2,
"next": None,
"previous": None,
"results": [
{
"id": 8158,
"url": "https://netbox.example.com/api/dcim/interfaces/8158/",
"device": {
"id": 511,
"url": "https://netbox.example.com/api/dcim/devices/511/",
"name": "minion1",
"display_name": "minion1",
},
"name": "GigabitEthernet0/0",
"label": "",
"type": {"value": "1000base-t", "label": "1000BASE-T (1GE)"},
"enabled": True,
"lag": None,
"mtu": None,
"mac_address": None,
"mgmt_only": False,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"cable": None,
"cable_peer": None,
"cable_peer_type": None,
"connected_endpoint": None,
"connected_endpoint_type": None,
"connected_endpoint_reachable": None,
"tags": [],
"count_ipaddresses": 1,
},
{
"id": 8159,
"url": "https://netbox.example.com/api/dcim/interfaces/8159/",
"device": {
"id": 511,
"url": "https://netbox.example.com/api/dcim/devices/511/",
"name": "minion1",
"display_name": "minion1",
},
"name": "GigabitEthernet0/1",
"label": "",
"type": {"value": "1000base-t", "label": "1000BASE-T (1GE)"},
"enabled": True,
"lag": None,
"mtu": None,
"mac_address": None,
"mgmt_only": False,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"cable": None,
"cable_peer": None,
"cable_peer_type": None,
"connected_endpoint": None,
"connected_endpoint_type": None,
"connected_endpoint_reachable": None,
"tags": [],
"count_ipaddresses": 1,
},
],
}
}
@pytest.fixture
def device_interfaces_list():
return [
{
"id": 8158,
"url": "https://netbox.example.com/api/dcim/interfaces/8158/",
"name": "GigabitEthernet0/0",
"label": "",
"type": {"value": "1000base-t", "label": "1000BASE-T (1GE)"},
"enabled": True,
"lag": None,
"mtu": None,
"mac_address": None,
"mgmt_only": False,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"cable": None,
"cable_peer": None,
"cable_peer_type": None,
"connected_endpoint": None,
"connected_endpoint_type": None,
"connected_endpoint_reachable": None,
"tags": [],
"count_ipaddresses": 1,
},
{
"id": 8159,
"url": "https://netbox.example.com/api/dcim/interfaces/8159/",
"name": "GigabitEthernet0/1",
"label": "",
"type": {"value": "1000base-t", "label": "1000BASE-T (1GE)"},
"enabled": True,
"lag": None,
"mtu": None,
"mac_address": None,
"mgmt_only": False,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"cable": None,
"cable_peer": None,
"cable_peer_type": None,
"connected_endpoint": None,
"connected_endpoint_type": None,
"connected_endpoint_reachable": None,
"tags": [],
"count_ipaddresses": 1,
},
]
@pytest.fixture
def virtual_machine_interface_results():
return {
"dict": {
"count": 2,
"next": None,
"previous": None,
"results": [
{
"id": 668,
"url": "https://netbox.example.com/api/virtualization/interfaces/668/",
"virtual_machine": {
"id": 222,
"url": "https://netbox.example.com/api/virtualization/virtual-machines/222/",
"name": "minion1",
},
"name": "GigabitEthernet0/0",
"enabled": True,
"mtu": None,
"mac_address": None,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"tags": [],
},
{
"id": 669,
"url": "https://netbox.example.com/api/virtualization/interfaces/669/",
"virtual_machine": {
"id": 222,
"url": "https://netbox.example.com/api/virtualization/virtual-machines/222/",
"name": "minion1",
},
"name": "GigabitEthernet0/1",
"enabled": True,
"mtu": None,
"mac_address": None,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"tags": [],
},
],
}
}
@pytest.fixture
def virtual_machine_interfaces_list():
return [
{
"id": 668,
"url": "https://netbox.example.com/api/virtualization/interfaces/668/",
"name": "GigabitEthernet0/0",
"enabled": True,
"mtu": None,
"mac_address": None,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"tags": [],
},
{
"id": 669,
"url": "https://netbox.example.com/api/virtualization/interfaces/669/",
"name": "GigabitEthernet0/1",
"enabled": True,
"mtu": None,
"mac_address": None,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"tags": [],
},
]
@pytest.fixture
def device_ip_results():
return {
"dict": {
"count": 2,
"next": None,
"previous": None,
"results": [
{
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": {"value": 4, "label": "IPv4"},
"address": "192.0.2.1/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"assigned_object_type": "dcim.interface",
"assigned_object_id": 8158,
"assigned_object": {
"id": 8158,
"url": "https://netbox.example.com/api/dcim/interfaces/8158/",
"device": {
"id": 511,
"url": "https://netbox.example.com/api/dcim/devices/511/",
"name": "minion1",
"display_name": "minion1",
},
"name": "GigabitEthernet0/0",
"cable": None,
},
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:04.153386Z",
},
{
"id": 1147,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1147/",
"family": {"value": 4, "label": "IPv4"},
"address": "198.51.100.1/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"assigned_object_type": "dcim.interface",
"assigned_object_id": 8159,
"assigned_object": {
"id": 8159,
"url": "https://netbox.example.com/api/dcim/interfaces/8159/",
"device": {
"id": 511,
"url": "https://netbox.example.com/api/dcim/devices/511/",
"name": "minion1",
"display_name": "minion1",
},
"name": "GigabitEthernet0/1",
"cable": None,
},
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:40.508154Z",
},
],
}
}
@pytest.fixture
def virtual_machine_ip_results():
return {
"dict": {
"count": 2,
"next": None,
"previous": None,
"results": [
{
"id": 1148,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1148/",
"family": {"value": 4, "label": "IPv4"},
"address": "192.0.2.2/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"assigned_object_type": "virtualization.vminterface",
"assigned_object_id": 668,
"assigned_object": {
"id": 668,
"url": "https://netbox.example.com/api/virtualization/interfaces/668/",
"virtual_machine": {
"id": 222,
"url": "https://netbox.example.com/api/virtualization/virtual-machines/222/",
"name": "minion1",
},
"name": "GigabitEthernet0/0",
},
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:23:05.784281Z",
},
{
"id": 1149,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1149/",
"family": {"value": 4, "label": "IPv4"},
"address": "198.51.100.2/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"assigned_object_type": "virtualization.vminterface",
"assigned_object_id": 669,
"assigned_object": {
"id": 669,
"url": "https://netbox.example.com/api/virtualization/interfaces/669/",
"virtual_machine": {
"id": 222,
"url": "https://netbox.example.com/api/virtualization/virtual-machines/222/",
"name": "minion1",
},
"name": "GigabitEthernet0/1",
},
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:23:29.607428Z",
},
],
}
}
@pytest.fixture
def device_interfaces_ip_list():
return [
{
"id": 8158,
"ip_addresses": [
{
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": {"value": 4, "label": "IPv4"},
"address": "192.0.2.1/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:04.153386Z",
},
],
"url": "https://netbox.example.com/api/dcim/interfaces/8158/",
"name": "GigabitEthernet0/0",
"label": "",
"type": {"value": "1000base-t", "label": "1000BASE-T (1GE)"},
"enabled": True,
"lag": None,
"mtu": None,
"mac_address": None,
"mgmt_only": False,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"cable": None,
"cable_peer": None,
"cable_peer_type": None,
"connected_endpoint": None,
"connected_endpoint_type": None,
"connected_endpoint_reachable": None,
"tags": [],
"count_ipaddresses": 1,
},
{
"id": 8159,
"ip_addresses": [
{
"id": 1147,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1147/",
"family": {"value": 4, "label": "IPv4"},
"address": "198.51.100.1/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:40.508154Z",
},
],
"url": "https://netbox.example.com/api/dcim/interfaces/8159/",
"name": "GigabitEthernet0/1",
"label": "",
"type": {"value": "1000base-t", "label": "1000BASE-T (1GE)"},
"enabled": True,
"lag": None,
"mtu": None,
"mac_address": None,
"mgmt_only": False,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"cable": None,
"cable_peer": None,
"cable_peer_type": None,
"connected_endpoint": None,
"connected_endpoint_type": None,
"connected_endpoint_reachable": None,
"tags": [],
"count_ipaddresses": 1,
},
]
@pytest.fixture
def virtual_machine_interfaces_ip_list():
return [
{
"id": 668,
"ip_addresses": [
{
"id": 1148,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1148/",
"family": {"value": 4, "label": "IPv4"},
"address": "192.0.2.2/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:23:05.784281Z",
},
],
"url": "https://netbox.example.com/api/virtualization/interfaces/668/",
"name": "GigabitEthernet0/0",
"enabled": True,
"mtu": None,
"mac_address": None,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"tags": [],
},
{
"id": 669,
"ip_addresses": [
{
"id": 1149,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1149/",
"family": {"value": 4, "label": "IPv4"},
"address": "198.51.100.2/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:23:29.607428Z",
},
],
"url": "https://netbox.example.com/api/virtualization/interfaces/669/",
"name": "GigabitEthernet0/1",
"enabled": True,
"mtu": None,
"mac_address": None,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"tags": [],
},
]
@pytest.fixture
def site_results():
return {
"dict": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
"status": {"value": "active", "label": "Active"},
"region": None,
"tenant": None,
"facility": "",
"asn": None,
"time_zone": None,
"description": "",
"physical_address": "",
"shipping_address": "",
"latitude": None,
"longitude": None,
"contact_name": "",
"contact_phone": "",
"contact_email": "",
"comments": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-25",
"last_updated": "2021-02-25T14:21:07.898957Z",
"circuit_count": 0,
"device_count": 1,
"prefix_count": 2,
"rack_count": 0,
"virtualmachine_count": 1,
"vlan_count": 0,
}
}
@pytest.fixture
def site_prefixes_results():
return {
"dict": {
"count": 2,
"next": None,
"previous": None,
"results": [
{
"id": 284,
"url": "https://netbox.example.com/api/ipam/prefixes/284/",
"family": {"value": 4, "label": "IPv4"},
"prefix": "192.0.2.0/24",
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
},
"vrf": None,
"tenant": None,
"vlan": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"is_pool": False,
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-25",
"last_updated": "2021-02-25T15:08:27.136305Z",
},
{
"id": 285,
"url": "https://netbox.example.com/api/ipam/prefixes/285/",
"family": {"value": 4, "label": "IPv4"},
"prefix": "198.51.100.0/24",
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
},
"vrf": None,
"tenant": None,
"vlan": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"is_pool": False,
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-25",
"last_updated": "2021-02-25T15:08:59.880440Z",
},
],
}
}
@pytest.fixture
def site_prefixes():
return [
{
"id": 284,
"url": "https://netbox.example.com/api/ipam/prefixes/284/",
"family": {"value": 4, "label": "IPv4"},
"prefix": "192.0.2.0/24",
"vrf": None,
"tenant": None,
"vlan": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"is_pool": False,
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-25",
"last_updated": "2021-02-25T15:08:27.136305Z",
},
{
"id": 285,
"url": "https://netbox.example.com/api/ipam/prefixes/285/",
"family": {"value": 4, "label": "IPv4"},
"prefix": "198.51.100.0/24",
"vrf": None,
"tenant": None,
"vlan": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"is_pool": False,
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-25",
"last_updated": "2021-02-25T15:08:59.880440Z",
},
]
@pytest.fixture
def proxy_details_results():
return {
"dict": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/platforms/1/",
"name": "Cisco IOS",
"slug": "ios",
"manufacturer": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/manufacturers/1/",
"name": "Cisco",
"slug": "cisco",
},
"napalm_driver": "ios",
"napalm_args": None,
"description": "",
"device_count": 152,
"virtualmachine_count": 1,
}
}
@pytest.fixture
def proxy_details():
return {
"host": "192.0.2.1",
"driver": "ios",
"proxytype": "napalm",
}
@pytest.fixture
def pillar_results():
return {
"netbox": {
"id": 511,
"url": "https://netbox.example.com/api/dcim/devices/511/",
"name": "minion1",
"node_type": "device",
"display_name": "minion1",
"device_type": {
"id": 4,
"url": "https://netbox.example.com/api/dcim/device-types/4/",
"manufacturer": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/manufacturers/1/",
"name": "Cisco",
"slug": "cisco",
},
"model": "ISR2901",
"slug": "isr2901",
"display_name": "Cisco ISR2901",
},
"device_role": {
"id": 45,
"url": "https://netbox.example.com/api/dcim/device-roles/45/",
"name": "Network",
"slug": "network",
},
"interfaces": [
{
"id": 8158,
"ip_addresses": [
{
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": {"value": 4, "label": "IPv4"},
"address": "192.0.2.1/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:04.153386Z",
},
],
"url": "https://netbox.example.com/api/dcim/interfaces/8158/",
"name": "GigabitEthernet0/0",
"label": "",
"type": {"value": "1000base-t", "label": "1000BASE-T (1GE)"},
"enabled": True,
"lag": None,
"mtu": None,
"mac_address": None,
"mgmt_only": False,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"cable": None,
"cable_peer": None,
"cable_peer_type": None,
"connected_endpoint": None,
"connected_endpoint_type": None,
"connected_endpoint_reachable": None,
"tags": [],
"count_ipaddresses": 1,
},
{
"id": 8159,
"ip_addresses": [
{
"id": 1147,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1147/",
"family": {"value": 4, "label": "IPv4"},
"address": "198.51.100.1/24",
"vrf": None,
"tenant": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"nat_inside": None,
"nat_outside": None,
"dns_name": "",
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:40.508154Z",
},
],
"url": "https://netbox.example.com/api/dcim/interfaces/8159/",
"name": "GigabitEthernet0/1",
"label": "",
"type": {"value": "1000base-t", "label": "1000BASE-T (1GE)"},
"enabled": True,
"lag": None,
"mtu": None,
"mac_address": None,
"mgmt_only": False,
"description": "",
"mode": None,
"untagged_vlan": None,
"tagged_vlans": [],
"cable": None,
"cable_peer": None,
"cable_peer_type": None,
"connected_endpoint": None,
"connected_endpoint_type": None,
"connected_endpoint_reachable": None,
"tags": [],
"count_ipaddresses": 1,
},
],
"tenant": None,
"platform": {
"id": 1,
"url": "https://netbox.example.com/api/dcim/platforms/1/",
"name": "Cisco IOS",
"slug": "ios",
},
"serial": "",
"asset_tag": None,
"site": {
"id": 18,
"url": "https://netbox.example.com/api/dcim/sites/18/",
"name": "Site 1",
"slug": "site1",
"status": {"value": "active", "label": "Active"},
"region": None,
"tenant": None,
"facility": "",
"asn": None,
"time_zone": None,
"description": "",
"physical_address": "",
"shipping_address": "",
"latitude": None,
"longitude": None,
"contact_name": "",
"contact_phone": "",
"contact_email": "",
"comments": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-25",
"last_updated": "2021-02-25T14:21:07.898957Z",
"circuit_count": 0,
"device_count": 1,
"prefix_count": 2,
"rack_count": 0,
"virtualmachine_count": 1,
"vlan_count": 0,
"prefixes": [
{
"id": 284,
"url": "https://netbox.example.com/api/ipam/prefixes/284/",
"family": {"value": 4, "label": "IPv4"},
"prefix": "192.0.2.0/24",
"vrf": None,
"tenant": None,
"vlan": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"is_pool": False,
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-25",
"last_updated": "2021-02-25T15:08:27.136305Z",
},
{
"id": 285,
"url": "https://netbox.example.com/api/ipam/prefixes/285/",
"family": {"value": 4, "label": "IPv4"},
"prefix": "198.51.100.0/24",
"vrf": None,
"tenant": None,
"vlan": None,
"status": {"value": "active", "label": "Active"},
"role": None,
"is_pool": False,
"description": "",
"tags": [],
"custom_fields": {},
"created": "2021-02-25",
"last_updated": "2021-02-25T15:08:59.880440Z",
},
],
},
"rack": None,
"position": None,
"face": None,
"parent_device": None,
"status": {"value": "active", "label": "Active"},
"primary_ip": {
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": 4,
"address": "192.0.2.1/24",
},
"primary_ip4": {
"id": 1146,
"url": "https://netbox.example.com/api/ipam/ip-addresses/1146/",
"family": 4,
"address": "192.0.2.1/24",
},
"primary_ip6": None,
"cluster": None,
"virtual_chassis": None,
"vc_position": None,
"vc_priority": None,
"comments": "",
"local_context_data": None,
"tags": [],
"custom_fields": {},
"config_context": {},
"created": "2021-02-19",
"last_updated": "2021-02-19T06:12:04.171105Z",
},
"proxy": {"host": "192.0.2.1", "driver": "ios", "proxytype": "napalm"},
}
def test_when_minion_id_is_star_then_result_should_be_empty_dict(default_kwargs):
expected_result = {}
default_kwargs["minion_id"] = "*"
actual_result = netbox.ext_pillar(**default_kwargs)
assert actual_result == expected_result
def test_when_api_url_is_not_http_or_https_then_error_message_should_be_logged(
default_kwargs,
):
default_kwargs["api_url"] = "ftp://netbox.example.com"
with patch("salt.pillar.netbox.log.error", autospec=True) as fake_error:
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
'Provided URL for api_url "%s" is malformed or is not an http/https URL',
"ftp://netbox.example.com",
)
def test_when_neither_devices_or_virtual_machines_requested_then_error_message_should_be_logged(
default_kwargs,
):
default_kwargs["devices"] = default_kwargs["virtual_machines"] = False
with patch("salt.pillar.netbox.log.error", autospec=True) as fake_error:
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
"At least one of devices or virtual_machines must be True"
)
def test_when_interface_ips_requested_but_not_interfaces_then_error_message_should_be_logged(
default_kwargs,
):
default_kwargs["interfaces"] = False
default_kwargs["interface_ips"] = True
with patch("salt.pillar.netbox.log.error", autospec=True) as fake_error:
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
"The value for interfaces must be True if interface_ips is True"
)
def test_when_api_query_result_limit_set_but_not_a_positive_integer_then_error_message_should_be_logged(
default_kwargs,
):
default_kwargs["api_query_result_limit"] = -1
with patch("salt.pillar.netbox.log.error", autospec=True) as fake_error:
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
"The value for api_query_result_limit must be a postive integer if set"
)
def test_when_api_token_not_set_then_error_message_should_be_logged(
default_kwargs,
):
default_kwargs["api_token"] = ""
with patch("salt.pillar.netbox.log.error", autospec=True) as fake_error:
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with("The value for api_token is not set")
def test_when_we_retrieve_a_single_device_then_return_list(
default_kwargs, headers, device_results
):
expected_result = device_results["dict"]["results"]
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = device_results
actual_result = netbox._get_devices(
default_kwargs["api_url"],
default_kwargs["minion_id"],
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_a_device_and_get_http_error_then_return_empty_list(
default_kwargs, headers, http_error
):
expected_result = []
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_devices(
default_kwargs["api_url"],
default_kwargs["minion_id"],
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_a_single_virtual_machine_then_return_list(
default_kwargs, headers, virtual_machine_results
):
expected_result = virtual_machine_results["dict"]["results"]
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = virtual_machine_results
actual_result = netbox._get_virtual_machines(
default_kwargs["api_url"],
default_kwargs["minion_id"],
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_a_virtual_machine_and_get_http_error_then_return_empty_dict(
default_kwargs, headers, http_error
):
expected_result = []
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_virtual_machines(
default_kwargs["api_url"],
default_kwargs["minion_id"],
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_device_interfaces_then_return_dict(
default_kwargs, headers, device_interface_results, device_interfaces_list
):
expected_result = device_interfaces_list
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = device_interface_results
actual_result = netbox._get_interfaces(
default_kwargs["api_url"],
default_kwargs["minion_id"],
511,
"device",
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_device_interfaces_and_get_http_error_then_return_empty_list(
default_kwargs, headers, http_error
):
expected_result = []
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_interfaces(
default_kwargs["api_url"],
default_kwargs["minion_id"],
511,
"device",
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_virtual_machine_interfaces_then_return_list(
default_kwargs,
headers,
virtual_machine_interface_results,
virtual_machine_interfaces_list,
):
expected_result = virtual_machine_interfaces_list
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = virtual_machine_interface_results
actual_result = netbox._get_interfaces(
default_kwargs["api_url"],
default_kwargs["minion_id"],
222,
"virtual-machine",
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_virtual_machine_interfaces_and_get_http_error_then_return_empty_list(
default_kwargs, headers, http_error
):
expected_result = []
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_interfaces(
default_kwargs["api_url"],
default_kwargs["minion_id"],
222,
"virtual-machine",
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_device_interface_ips_then_return_list(
default_kwargs, headers, device_ip_results
):
expected_result = device_ip_results["dict"]["results"]
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = device_ip_results
actual_result = netbox._get_interface_ips(
default_kwargs["api_url"],
default_kwargs["minion_id"],
511,
"device",
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_device_interface_ips_and_get_http_error_then_return_empty_list(
default_kwargs, headers, http_error
):
expected_result = []
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_interface_ips(
default_kwargs["api_url"],
default_kwargs["minion_id"],
511,
"device",
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_virtual_machine_interface_ips_then_return_list(
default_kwargs, headers, virtual_machine_ip_results
):
expected_result = virtual_machine_ip_results["dict"]["results"]
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = virtual_machine_ip_results
actual_result = netbox._get_interface_ips(
default_kwargs["api_url"],
default_kwargs["minion_id"],
222,
"virtual-machine",
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_virtual_machine_interface_ips_and_get_http_error_then_return_empty_list(
default_kwargs, headers, http_error
):
expected_result = []
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_interface_ips(
default_kwargs["api_url"],
default_kwargs["minion_id"],
222,
"virtual-machine",
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_associate_ips_to_interfaces_then_return_list(
default_kwargs, device_interfaces_list, device_ip_results, device_interfaces_ip_list
):
expected_result = device_interfaces_ip_list
interfaces_list = device_interfaces_list
interface_ips_list = device_ip_results["dict"]["results"]
actual_result = netbox._associate_ips_to_interfaces(
interfaces_list, interface_ips_list
)
assert actual_result == expected_result
def test_associate_empty_ip_list_to_interfaces_then_return_list(
default_kwargs, device_interfaces_list, device_ip_results
):
expected_result = device_interfaces_list
interfaces_list = device_interfaces_list
interface_ips_list = []
actual_result = netbox._associate_ips_to_interfaces(
interfaces_list, interface_ips_list
)
assert actual_result == expected_result
def test_when_we_retrieve_site_details_then_return_dict(
default_kwargs, headers, site_results
):
expected_result = site_results["dict"]
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = site_results
actual_result = netbox._get_site_details(
default_kwargs["api_url"],
default_kwargs["minion_id"],
"Site 1",
18,
headers,
)
assert actual_result == expected_result
def test_when_we_retrieve_site_details_and_get_http_error_then_return_empty_dict(
default_kwargs, headers, http_error
):
expected_result = {}
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_site_details(
default_kwargs["api_url"],
default_kwargs["minion_id"],
"Site 1",
18,
headers,
)
assert actual_result == expected_result
def test_when_we_retrieve_site_prefixes_then_return_list(
default_kwargs, headers, site_prefixes_results, site_prefixes
):
expected_result = site_prefixes
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = site_prefixes_results
actual_result = netbox._get_site_prefixes(
default_kwargs["api_url"],
default_kwargs["minion_id"],
"Site 1",
18,
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_site_prefixes_and_get_http_error_then_return_empty_list(
default_kwargs, headers, http_error
):
expected_result = []
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_site_prefixes(
default_kwargs["api_url"],
default_kwargs["minion_id"],
"Site 1",
18,
headers,
default_kwargs["api_query_result_limit"],
)
assert actual_result == expected_result
def test_when_we_retrieve_proxy_details_then_return_dict(
default_kwargs, headers, proxy_details_results, proxy_details
):
expected_result = proxy_details
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = proxy_details_results
actual_result = netbox._get_proxy_details(
default_kwargs["api_url"],
default_kwargs["minion_id"],
"192.0.2.1/24",
1,
headers,
)
assert actual_result == expected_result
def test_when_we_retrieve_proxy_details_and_get_http_error_then_dont_return(
default_kwargs, headers, http_error
):
expected_result = None
with patch("salt.utils.http.query", autospec=True) as query:
query.return_value = http_error
actual_result = netbox._get_proxy_details(
default_kwargs["api_url"],
default_kwargs["minion_id"],
"192.0.2.1/24",
1,
headers,
)
assert actual_result == expected_result
def test_when_we_retrieve_multiple_devices_then_error_message_should_be_logged(
default_kwargs, multiple_device_results
):
with patch(
"salt.pillar.netbox._get_devices", autospec=True
) as multiple_devices, patch(
"salt.pillar.netbox.log.error", autospec=True
) as fake_error:
multiple_devices.return_value = multiple_device_results["dict"]["results"]
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
'More than one node found for "%s"',
"minion1",
)
def test_when_we_retrieve_multiple_virtual_machines_then_error_message_should_be_logged(
default_kwargs, multiple_virtual_machine_results
):
default_kwargs["devices"] = False
default_kwargs["virtual_machines"] = True
with patch(
"salt.pillar.netbox._get_virtual_machines", autospec=True
) as multiple_virtual_machines, patch(
"salt.pillar.netbox.log.error", autospec=True
) as fake_error:
multiple_virtual_machines.return_value = multiple_virtual_machine_results[
"dict"
]["results"]
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
'More than one node found for "%s"',
"minion1",
)
def test_when_we_retrieve_a_device_and_a_virtual_machine_then_error_message_should_be_logged(
default_kwargs, device_results, virtual_machine_results
):
default_kwargs["virtual_machines"] = True
with patch("salt.pillar.netbox._get_devices", autospec=True) as device, patch(
"salt.pillar.netbox._get_virtual_machines", autospec=True
) as virtual_machine, patch(
"salt.pillar.netbox.log.error", autospec=True
) as fake_error:
device.return_value = device_results["dict"]["results"]
virtual_machine.return_value = virtual_machine_results["dict"]["results"]
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
'More than one node found for "%s"',
"minion1",
)
def test_when_we_retrieve_no_devices_then_error_message_should_be_logged(
default_kwargs, no_results
):
with patch("salt.pillar.netbox._get_devices", autospec=True) as devices, patch(
"salt.pillar.netbox.log.error", autospec=True
) as fake_error:
devices.return_value = no_results["dict"]["results"]
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
'Unable to pull NetBox data for "%s"',
"minion1",
)
def test_when_we_retrieve_no_virtual_machines_then_error_message_should_be_logged(
default_kwargs, no_results
):
default_kwargs["devices"] = False
default_kwargs["virtual_machines"] = True
with patch(
"salt.pillar.netbox._get_virtual_machines", autospec=True
) as virtual_machines, patch(
"salt.pillar.netbox.log.error", autospec=True
) as fake_error:
virtual_machines.return_value = no_results["dict"]["results"]
netbox.ext_pillar(**default_kwargs)
fake_error.assert_called_with(
'Unable to pull NetBox data for "%s"',
"minion1",
)
def test_when_we_retrieve_everything_successfully_then_return_dict(
default_kwargs,
device_results,
no_results,
device_interfaces_list,
device_ip_results,
site_results,
site_prefixes,
proxy_details,
pillar_results,
):
expected_result = pillar_results
default_kwargs["virtual_machines"] = False
default_kwargs["interfaces"] = True
default_kwargs["interface_ips"] = True
default_kwargs["site_details"] = True
default_kwargs["site_prefixes"] = True
default_kwargs["proxy_return"] = True
with patch("salt.pillar.netbox._get_devices", autospec=True) as get_devices, patch(
"salt.pillar.netbox._get_virtual_machines", autospec=True
) as get_virtual_machines, patch(
"salt.pillar.netbox._get_interfaces", autospec=True
) as get_interfaces, patch(
"salt.pillar.netbox._get_interface_ips", autospec=True
) as get_interface_ips, patch(
"salt.pillar.netbox._get_site_details", autospec=True
) as get_site_details, patch(
"salt.pillar.netbox._get_site_prefixes", autospec=True
) as get_site_prefixes, patch(
"salt.pillar.netbox._get_proxy_details", autospec=True
) as get_proxy_details:
get_devices.return_value = device_results["dict"]["results"]
get_virtual_machines.return_value = no_results["dict"]["results"]
get_interfaces.return_value = device_interfaces_list
get_interface_ips.return_value = device_ip_results["dict"]["results"]
get_site_details.return_value = site_results["dict"]
get_site_prefixes.return_value = site_prefixes
get_proxy_details.return_value = proxy_details
actual_result = netbox.ext_pillar(**default_kwargs)
assert actual_result == expected_result
| 34.837488
| 105
| 0.439357
| 5,950
| 69,884
| 4.920672
| 0.049076
| 0.050618
| 0.056288
| 0.065544
| 0.916627
| 0.888619
| 0.874377
| 0.867853
| 0.855386
| 0.844832
| 0
| 0.048523
| 0.427308
| 69,884
| 2,005
| 106
| 34.854863
| 0.683024
| 0.000673
| 0
| 0.790541
| 0
| 0
| 0.280215
| 0.039527
| 0
| 0
| 0
| 0
| 0.018018
| 1
| 0.030405
| false
| 0
| 0.001689
| 0.012387
| 0.044482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dc1a45a58143402a7de65343e1fe4d7f58ce7198
| 4,861
|
py
|
Python
|
tests/test_rebuild_app.py
|
gkzz/mydemo_pack
|
1accb8a270a6c08ac598da81522648e4cfb1abc1
|
[
"MIT"
] | null | null | null |
tests/test_rebuild_app.py
|
gkzz/mydemo_pack
|
1accb8a270a6c08ac598da81522648e4cfb1abc1
|
[
"MIT"
] | null | null | null |
tests/test_rebuild_app.py
|
gkzz/mydemo_pack
|
1accb8a270a6c08ac598da81522648e4cfb1abc1
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from st2tests.base import BaseActionTestCase
from mock import MagicMock, patch
import json
import yaml
import os
import sys
import re
BASE_DIR = '/opt/stackstorm/packs/mydemo_pack'
sys.path.append(BASE_DIR)
sys.path.append(BASE_DIR + '/actions/scripts')
sys.path.append('/opt/stackstorm/virtualenvs/mydemo_pack/lib/python2.7/site-packages')
sys.path.append('/opt/stackstorm/st2/lib/python2.7/site-packages')
input_file = "rebuild_app.yaml"
res_file = BASE_DIR + "/tests/rebuild_app/response.yaml"
from rebuild_app import RebuildAppAction
class TestRebuildAppAction(BaseActionTestCase):
action_cls = RebuildAppAction
class_common = "common_mydemo.Common"
method_execute = class_common + ".execute_command"
def test00_no_mock_st2(self):
input = yaml.load(
self.get_fixture_content(input_file), Loader=yaml.FullLoader
)
action = self.get_action_instance()
result = action.run(**input)
#print('result: {r}'.format(r=result))
self.assertEquals(len(result), 4)
self.assertEqual(result["bool"], True)
@patch(method_execute)
def test01_mock_st2_rebuild(self, executer):
input = yaml.load(
self.get_fixture_content(input_file), Loader=yaml.FullLoader
)
def _execute_command(_cmd):
_bool = False
stdout = []
stderr = []
_res = yaml.load(open(res_file), Loader=yaml.FullLoader)
if 'ls' in _cmd and 'grep' in _cmd:
_bool = True
_stdout = _res["succeeded"]["ls"]["stdout"]
_stderr = _res["succeeded"]["ls"]["stderr"]
elif 'stop' in _cmd and 'rm' in _cmd:
_bool = True
_stderr = _res["succeeded"]["rm"]["stderr"]
if executer.call_count == 2:
_stdout = _res["succeeded"]["rm"]["stdout"]["former"]
elif executer.call_count == 3:
_stdout = _res["succeeded"]["rm"]["stdout"]["latter"]
else:
_bool = False
raise Error("docker_container_rm_err")
elif '--build' in _cmd:
_bool = True
_stdout = _res["succeeded"]["build"]["stdout"]
_stderr = _res["succeeded"]["build"]["stderr"]
else:
raise Error("_excute_command_err")
return _bool, _stdout, _stderr
executer.side_effect = _execute_command
action = self.get_action_instance()
result = action.run(**input)
print('result: {r}'.format(r=result))
self.assertEquals(len(result), 4)
self.assertEqual(result["bool"], True)
self.assertIn("docker-compose", result["command"])
self.assertNotEqual(result["stdout"], "")
self.assertNotEqual(result["stderr"], "")
self.assertEqual(executer.call_count, 4)
@patch(method_execute)
def test01_mock_st2_rebuild(self, executer):
input = yaml.load(
self.get_fixture_content(input_file), Loader=yaml.FullLoader
)
def _execute_command(_cmd):
_bool = False
stdout = []
stderr = []
_res = yaml.load(open(res_file), Loader=yaml.FullLoader)
if 'ls' in _cmd and 'grep' in _cmd:
_bool = True
_stdout = _res["succeeded"]["ls"]["stdout"]["exists"]
_stderr = _res["succeeded"]["ls"]["stderr"]
elif 'stop' in _cmd and 'rm' in _cmd:
_bool = True
_stderr = _res["succeeded"]["rm"]["stderr"]
if executer.call_count == 2:
_stdout = _res["succeeded"]["rm"]["stdout"]["former"]
elif executer.call_count == 3:
_stdout = _res["succeeded"]["rm"]["stdout"]["latter"]
else:
_bool = False
raise Error("docker_container_rm_err")
elif '--build' in _cmd:
_bool = True
_stdout = _res["succeeded"]["build"]["stdout"]
_stderr = _res["succeeded"]["build"]["stderr"]
else:
raise Error("_excute_command_err")
return _bool, _stdout, _stderr
executer.side_effect = _execute_command
action = self.get_action_instance()
result = action.run(**input)
print('result: {r}'.format(r=result))
self.assertEquals(len(result), 4)
self.assertEqual(result["bool"], True)
self.assertIn("docker-compose", result["command"])
self.assertNotEqual(result["stdout"], "")
self.assertNotEqual(result["stderr"], "")
self.assertEqual(executer.call_count, 4)
| 32.624161
| 86
| 0.565521
| 512
| 4,861
| 5.113281
| 0.21875
| 0.064171
| 0.055004
| 0.029794
| 0.818946
| 0.766234
| 0.766234
| 0.766234
| 0.766234
| 0.766234
| 0
| 0.007383
| 0.303436
| 4,861
| 148
| 87
| 32.844595
| 0.7658
| 0.016458
| 0
| 0.770642
| 0
| 0
| 0.15676
| 0.047091
| 0
| 0
| 0
| 0
| 0.12844
| 1
| 0.045872
| false
| 0
| 0.073395
| 0
| 0.174312
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f495ad24bbb5cb96c88aaddfeb6c63928fe380ee
| 6,512
|
py
|
Python
|
playground/learning-face-3d-movement-detection.py
|
porcelainruler/Driver-Drowsiness-Detection
|
a93ccde1c871786178f7c03f4cd793007c017c68
|
[
"Apache-2.0"
] | null | null | null |
playground/learning-face-3d-movement-detection.py
|
porcelainruler/Driver-Drowsiness-Detection
|
a93ccde1c871786178f7c03f4cd793007c017c68
|
[
"Apache-2.0"
] | null | null | null |
playground/learning-face-3d-movement-detection.py
|
porcelainruler/Driver-Drowsiness-Detection
|
a93ccde1c871786178f7c03f4cd793007c017c68
|
[
"Apache-2.0"
] | 1
|
2020-11-19T14:17:51.000Z
|
2020-11-19T14:17:51.000Z
|
import cv2
import numpy as np
import dlib
from imutils import face_utils
#Take Image
capture = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('../shape_predictor_68_face_landmarks.dat')
while(True):
ret, im = capture.read()
# im = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
size = im.shape
rects = detector(im, 0)
try:
shape = face_utils.shape_to_np(predictor(im, rects[0]))
#2D image points. If you change the image, you need to change vector
image_points = np.array([
shape[33], #(359, 391), # Nose tip
shape[8],# (399, 561), # Chin
shape[45],# (337, 297), # Left eye left corner
shape[36],# (513, 301), # Right eye right corne
shape[54],# (345, 465), # Left Mouth corner
shape[48]# (453, 469) # Right mouth corner
], dtype="double")
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
focal_length = size[1]
center = (size[1]/2, size[0]/2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
# print "Camera Matrix :\n {0}".format(camera_matrix)
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
print "Rotation Vector:\n {0}".format(rotation_vector)
print "Translation Vector:\n {0}".format(translation_vector)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
for p in image_points:
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
cv2.line(im, p1, p2, (255,0,0), 2)
except:
print('Face not detected');
# Display image
cv2.imshow("Output", im)
if(cv2.waitKey(1)==27):
break
# Read Image
# im = cv2.imread("headpose.jpg")
# size = im.shape
# detector = dlib.get_frontal_face_detector()
# rects = detector(im, 0)
# predictor = dlib.shape_predictor('../shape_predictor_68_face_landmarks.dat')
# (leStart, leEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
# (reStart, reEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# shape = face_utils.shape_to_np(predictor(im, rects[0]))
# leftEye = shape[leStart:leEnd]
# rightEye = shape[reStart:reEnd]
# leftEyeHull = cv2.convexHull(leftEye)
# rightEyeHull = cv2.convexHull(rightEye)
# cv2.drawContours(im, [leftEyeHull], -1, (255, 255, 255), 1)
# # cv2.drawContours(im, [rightEyeHull], -1, (255, 255, 255), 1)
# print(rightEye, shape[3])
# #2D image points. If you change the image, you need to change vector
# image_points = np.array([
# shape[33], #(359, 391), # Nose tip
# shape[8],# (399, 561), # Chin
# shape[45],# (337, 297), # Left eye left corner
# shape[36],# (513, 301), # Right eye right corne
# shape[54],# (345, 465), # Left Mouth corner
# shape[48]# (453, 469) # Right mouth corner
# ], dtype="double")
# # 3D model points.
# model_points = np.array([
# (0.0, 0.0, 0.0), # Nose tip
# (0.0, -330.0, -65.0), # Chin
# (-225.0, 170.0, -135.0), # Left eye left corner
# (225.0, 170.0, -135.0), # Right eye right corne
# (-150.0, -150.0, -125.0), # Left Mouth corner
# (150.0, -150.0, -125.0) # Right mouth corner
# ])
# # Camera internals
# focal_length = size[1]
# center = (size[1]/2, size[0]/2)
# camera_matrix = np.array(
# [[focal_length, 0, center[0]],
# [0, focal_length, center[1]],
# [0, 0, 1]], dtype = "double"
# )
# print "Camera Matrix :\n {0}".format(camera_matrix)
# dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
# (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
# print "Rotation Vector:\n {0}".format(rotation_vector)
# print "Translation Vector:\n {0}".format(translation_vector)
# # Project a 3D point (0, 0, 1000.0) onto the image plane.
# # We use this to draw a line sticking out of the nose
# (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
# for p in image_points:
# cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
# p1 = ( int(image_points[0][0]), int(image_points[0][1]))
# p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
# cv2.line(im, p1, p2, (255,0,0), 2)
# # Display image
# cv2.imshow("Output", im)
# cv2.waitKey(0)
| 42.562092
| 155
| 0.506143
| 798
| 6,512
| 4.015038
| 0.199248
| 0.022472
| 0.013109
| 0.009988
| 0.836454
| 0.81211
| 0.772784
| 0.772784
| 0.772784
| 0.772784
| 0
| 0.099095
| 0.355344
| 6,512
| 153
| 156
| 42.562092
| 0.664126
| 0.556972
| 0
| 0
| 0
| 0
| 0.04398
| 0.01442
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.078431
| null | null | 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f4ee91128297ea09493c01d983cb0ec3d9c2d756
| 326
|
py
|
Python
|
gltools/main/__init__.py
|
jvzantvoort/gltools
|
e6b013540983ccc70d23d05279164832047bb966
|
[
"MIT"
] | null | null | null |
gltools/main/__init__.py
|
jvzantvoort/gltools
|
e6b013540983ccc70d23d05279164832047bb966
|
[
"MIT"
] | 1
|
2019-11-12T08:11:14.000Z
|
2019-11-12T08:11:14.000Z
|
gltools/main/__init__.py
|
jvzantvoort/gltools
|
e6b013540983ccc70d23d05279164832047bb966
|
[
"MIT"
] | null | null | null |
from gltools.main.exportgroup import ExportGroup
from gltools.main.workongroup import WorkOnGroup
from gltools.main.syncgroup import SyncGroup
from gltools.main.syncgroup import SyncGroupLocal
from gltools.main.groups import ListGroups
from gltools.main.projects import ListProjects
from gltools.main.config import InitConfig
| 40.75
| 49
| 0.871166
| 42
| 326
| 6.761905
| 0.333333
| 0.271127
| 0.369718
| 0.169014
| 0.211268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08589
| 326
| 7
| 50
| 46.571429
| 0.95302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
52007d4b7eb014b60c4469384c473c38f1c6535b
| 7,742
|
py
|
Python
|
django_social_launch/tests/test_urls.py
|
elricgit/django-social-launch
|
2c005c26986f97c363978b58cc1ce24c1a4d7ed9
|
[
"BSD-3-Clause"
] | 1
|
2018-06-19T10:45:22.000Z
|
2018-06-19T10:45:22.000Z
|
django_social_launch/tests/test_urls.py
|
elricgit/django-social-launch
|
2c005c26986f97c363978b58cc1ce24c1a4d7ed9
|
[
"BSD-3-Clause"
] | null | null | null |
django_social_launch/tests/test_urls.py
|
elricgit/django-social-launch
|
2c005c26986f97c363978b58cc1ce24c1a4d7ed9
|
[
"BSD-3-Clause"
] | null | null | null |
#Django imports
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.sessions.backends.db import SessionStore
#App imports
from .. import user_successfully_created_msg, referrer_url_session_key, referring_user_id_session_key
from ..models import SocialLaunchProfile
#Test imports
from .util import BaseTestCase
class IndexTestCase(BaseTestCase):
def test_get(self):
response = self.client.get(reverse('social_launch_index'))
self.assertEqual(response.status_code, 200)
def test_get_with_referrer(self):
referrer_url = 'http://facebook.com'
response = self.client.get(reverse('social_launch_index'), HTTP_REFERER=referrer_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.client.session[referrer_url_session_key], referrer_url)
def test_post_success_creates_new_user(self):
post_data = {'email' : 'foo@example.com'}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, None)
def test_post_success_creates_new_user_with_referrer(self):
referrer_url = 'http://facebook.com'
post_data = {'email' : 'foo@example.com'}
session = SessionStore()
session[referrer_url_session_key] = referrer_url
session[referring_user_id_session_key] = ''
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, referrer_url)
self.assertEquals(slp.referring_user, None)
def test_post_fails_invalid_email(self):
post_data = {'email' : 'fooexample.com'}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
def test_post_fails_invalid_email_with_referrer(self):
referrer_url = 'http://facebook.com'
post_data = {'email' : 'fooexample.com'}
session = SessionStore()
session[referrer_url_session_key] = referrer_url
session[referring_user_id_session_key] = ''
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
self.assertEqual(self.client.session[referrer_url_session_key], referrer_url)
def test_post_fails_no_email(self):
post_data = {}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
class ReferralTestCase(BaseTestCase):
def test_get_success(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}))
self.assertEqual(response.status_code, 200)
def test_get_fails_invalid_id(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : 'foo'}))
self.assertEqual(response.status_code, 404)
def test_get_fails_no_such_user(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : 1000}))
self.assertEqual(response.status_code, 404)
def test_post_success_creates_new_user(self):
post_data = {'email' : 'foo@example.com'}
session = SessionStore()
session[referring_user_id_session_key] = self.user1.id
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, self.user1)
def test_post_success_creates_new_user_bad_referring_used_id(self):
post_data = {'email' : 'foo@example.com'}
session = SessionStore()
session[referring_user_id_session_key] = 1000
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, None)
| 35.351598
| 134
| 0.761431
| 988
| 7,742
| 5.731781
| 0.098178
| 0.079463
| 0.053682
| 0.045912
| 0.907823
| 0.897581
| 0.889634
| 0.873918
| 0.835953
| 0.817941
| 0
| 0.010632
| 0.113149
| 7,742
| 218
| 135
| 35.513761
| 0.814157
| 0.004779
| 0
| 0.806667
| 0
| 0
| 0.091145
| 0.025708
| 0
| 0
| 0
| 0
| 0.486667
| 1
| 0.08
| false
| 0.026667
| 0.046667
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
520578bfe44ca1433a5ffc231025dbc758c7d26a
| 4,363
|
py
|
Python
|
reference_quality_predictor/fleiss_kappa.py
|
Aliossandro/WD_references_analysis
|
ee054182dd398fed90fce49fdfa4b8cd6192f38e
|
[
"MIT"
] | 5
|
2017-05-10T08:40:30.000Z
|
2020-04-27T07:28:48.000Z
|
reference_quality_predictor/fleiss_kappa.py
|
Aliossandro/WD_references_analysis
|
ee054182dd398fed90fce49fdfa4b8cd6192f38e
|
[
"MIT"
] | null | null | null |
reference_quality_predictor/fleiss_kappa.py
|
Aliossandro/WD_references_analysis
|
ee054182dd398fed90fce49fdfa4b8cd6192f38e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on May 15 2017
@author: Alessandro
"""
import pandas as pd
from reference_quality_predictor.metrics import computeFleissKappa
###T1
def main():
file_name = '~/Documents/PhD/relevance_results_new.csv'
file_pd = pd.read_csv(file_name)
grouped = file_pd.groupby('X_unit_id')
ratings = []
fleiss_ratings = []
for group in grouped:
ratings.append(len(group[1][group[1]['response'] == 'yes']))
ratings.append(len(group[1][group[1]['response'] == 'no']))
ratings.append(len(group[1][group[1]['response'] == 'no_item']))
ratings.append(len(group[1][group[1]['response'] == 'nw_item']))
ratings.append(len(group[1][group[1]['response'] == 'no_property']))
ratings.append(len(group[1][group[1]['response'] == 'ne_item']))
if sum(ratings) == 5:
fleiss_ratings.append(ratings)
else:
print ratings
ratings = []
T1_fleiss = computeFleissKappa(fleiss_ratings)
print T1_fleiss
###T2
file_name = '~/Documents/PhD/author_all_new.csv'
file_pd = pd.read_csv(file_name)
grouped = file_pd.groupby('ref_value')
ratings = []
fleiss_ratings = []
for group in grouped:
ratings.append(len(group[1][group[1]['author_type'] == 'organisation']))
ratings.append(len(group[1][group[1]['author_type'] == 'collective']))
ratings.append(len(group[1][group[1]['author_type'] == 'nw']))
ratings.append(len(group[1][group[1]['author_type'] == 'individual']))
ratings.append(len(group[1][group[1]['author_type'] == 'ne']))
ratings.append(len(group[1][group[1]['author_type'] == 'dn']))
if sum(ratings) == 5:
fleiss_ratings.append(ratings)
else:
print ratings
ratings = []
T2_fleiss = computeFleissKappa(fleiss_ratings)
print T2_fleiss
###T3.A
file_name = '~/Documents/PhD/publisher_all_new.csv'
file_pd = pd.read_csv(file_name)
grouped = file_pd.groupby('domain')
ratings = []
fleiss_ratings = []
for group in grouped:
ratings.append(len(group[1][group[1]['publisher_type'] == 'news']))
ratings.append(len(group[1][group[1]['publisher_type'] == 'company']))
ratings.append(len(group[1][group[1]['publisher_type'] == 'nw']))
ratings.append(len(group[1][group[1]['publisher_type'] == 'sp_source']))
ratings.append(len(group[1][group[1]['publisher_type'] == 'academia']))
ratings.append(len(group[1][group[1]['publisher_type'] == 'other']))
ratings.append(len(group[1][group[1]['publisher_type'] == 'govt']))
ratings.append(len(group[1][group[1]['publisher_type'] == 'ne']))
if sum(ratings) == 5:
fleiss_ratings.append(ratings)
else:
print ratings
ratings = []
T3A_fleiss = computeFleissKappa(fleiss_ratings)
print T3A_fleiss
###T3.b
file_name = '~/Documents/PhD/publisher_verify_full_new.csv'
file_pd = pd.read_csv(file_name)
grouped = file_pd.groupby('domain')
ratings = []
fleiss_ratings = []
for group in grouped:
ratings.append(len(group[1][group[1]['results'] == 'vendor']))
ratings.append(len(group[1][group[1]['results'] == 'no_profit']))
ratings.append(len(group[1][group[1]['results'] == 'nw']))
ratings.append(len(group[1][group[1]['results'] == 'cultural']))
ratings.append(len(group[1][group[1]['results'] == 'political']))
ratings.append(len(group[1][group[1]['results'] == 'non_trad_news']))
ratings.append(len(group[1][group[1]['results'] == 'academia_pub']))
ratings.append(len(group[1][group[1]['results'] == 'trad_news']))
ratings.append(len(group[1][group[1]['results'] == 'academia_uni']))
ratings.append(len(group[1][group[1]['results'] == 'academia_other']))
ratings.append(len(group[1][group[1]['results'] == 'ne']))
ratings.append(len(group[1][group[1]['results'] == 'no']))
ratings.append(len(group[1][group[1]['results'] == 'yes']))
if sum(ratings) == 5:
fleiss_ratings.append(ratings)
else:
print ratings
ratings = []
T3B_fleiss = computeFleissKappa(fleiss_ratings)
print T3B_fleiss
if __name__ == "__main__":
main()
| 35.185484
| 80
| 0.60463
| 548
| 4,363
| 4.653285
| 0.164234
| 0.155294
| 0.207059
| 0.271765
| 0.841176
| 0.752549
| 0.752549
| 0.752549
| 0.628627
| 0.323137
| 0
| 0.025671
| 0.205363
| 4,363
| 123
| 81
| 35.471545
| 0.709836
| 0.007564
| 0
| 0.422222
| 0
| 0
| 0.170926
| 0.036811
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.022222
| null | null | 0.088889
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5290b3d15375bd5b480dd84c4d97744524b60592
| 5,815
|
py
|
Python
|
whatsbomber.py
|
arabind-007/Whatshack
|
e68cf6c54745c36d920ff2660ee519cc68abee09
|
[
"Apache-2.0"
] | 4
|
2021-05-02T12:56:17.000Z
|
2022-02-13T12:59:44.000Z
|
whatsbomber.py
|
arabind-007/Whatshack
|
e68cf6c54745c36d920ff2660ee519cc68abee09
|
[
"Apache-2.0"
] | null | null | null |
whatsbomber.py
|
arabind-007/Whatshack
|
e68cf6c54745c36d920ff2660ee519cc68abee09
|
[
"Apache-2.0"
] | null | null | null |
from pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x08\x00\x55\x0d\x0d\x0a\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x55\x05\x00\x00\x00\x00\x00\x18\x65\xe2\xec\xfb\xc1\x8a\xbd\xf9\xe5\x65\x24\xbf\xf2\x1d\x9c\x3c\x00\x00\x00\x00\x00\x00\x00\x00\xd3\x39\x61\x98\x38\x13\xe5\xd7\xed\x7b\x8d\xe1\xa1\x5d\x9c\x2a\xdf\xdf\x76\x8f\xa9\xaf\xce\x9b\xf1\x15\x30\xc1\x27\x95\xfe\x89\xc5\x9e\x88\x96\xa6\x3d\xc2\x85\x45\xf5\x17\xc5\x90\x0c\x0c\x10\xf0\x9a\xd9\xb9\x2f\x60\x25\x76\xe8\xe0\xa0\xd7\xc1\x46\x80\x38\x85\xe7\xa0\x1f\x40\x74\x71\x23\x2a\x9d\x70\x70\xf6\x17\x7e\x69\xab\x5d\xf4\xe6\x54\x14\xdf\x78\x72\x7e\xd2\x59\x8e\xd6\x1e\x14\xd2\x02\x93\x6d\xfd\x60\x41\x67\x1a\x5f\xf1\xc0\x12\xa1\x36\x2d\xbf\x0d\x07\x8e\x46\x5e\xf9\xb9\xb6\x94\xa0\x5b\xe6\x90\xe3\xbc\xb3\xce\xea\x57\x2a\xd6\x06\xdb\xe3\x92\xff\xdc\xff\xc1\xb0\x4c\xfb\xa8\xc1\xe2\x1c\xad\x4f\x4f\x70\x86\x04\x32\x2e\x52\x1d\x0a\x6a\xed\x17\x66\x34\x74\x7b\xe7\xe6\xc7\x2d\x29\xc8\xe8\x29\xf8\x7e\x5d\xfa\x5d\xad\xbd\x8d\x66\xf1\x53\xd8\x2e\xe3\x75\x3e\x71\xd5\x6c\x28\x07\x84\x48\xd1\xe9\x7d\xfc\x67\x1a\x75\x70\xcd\xc0\xc1\xc3\xf5\x87\x38\x9e\x23\x6a\xd7\xa5\x97\x4f\x85\xfa\x95\x27\x3d\xee\x3e\xd9\x28\x07\x40\x19\x22\xd8\xa1\x14\x9b\xdb\x23\x94\x01\x41\x84\x84\x34\xdd\xe2\x2c\xce\xa4\xe3\x85\x23\xad\xf4\x84\xd8\x82\x06\x3b\x86\x9e\x34\xa7\x7b\x62\xe7\x9f\x73\x57\xbb\x70\x7e\x51\x88\x3a\x8d\xca\x6e\xb7\xc7\xcd\x25\x60\xca\xcc\xec\xc8\xf3\xb9\x3e\x1f\xf1\x71\xd2\x07\xfa\x8d\x1a\x1b\xd8\x5e\x6d\xe9\x68\x95\x7c\x1a\x28\xaa\x14\xed\x1c\x7d\xea\xa6\xad\xa5\xe7\x0e\x08\x1e\x72\x0d\xe5\x14\x27\xb6\xa9\x72\x27\xd2\xd0\xe4\xbd\x65\xf5\x11\xa0\x5c\xe5\x96\xf0\x66\xbe\x92\xb0\xb1\x71\x74\x1d\x9b\x34\x5b\x35\x43\x4d\x18\x59\x69\x46\xb3\x7a\xb4\x39\x6a\xc3\x3b\xf1\xdc\x07\x9f\xc0\x80\x72\x63\xd4\x86\x62\x89\x9a\x7a\xcc\x53\x0a\xfa\x48\xf1\xa6\xb4\x4d\x89\x4e\xfc\xfd\x28\x4c\x4c\x91\xb2\x3c\x95\xb5\xb5\x8a\x7d\x48\x8d\x2b\xd1\xa8\x14\xb4\xed\x4a\x38\x37\xde\xc8\xfe\x65\x11\x73\x7e\x82\x7f\x33\xff\xa1\xd4\xce\xbc\x35\xc0\xaf\xe3\x5d\x12\x84\xc4\xbf\xe0\x78\xb8\x3f\xe1\x20\x57\x22\x44\x8d\x80\x75\xf8\x92\xdd\xf9\xdf\xeb\x0d\xe1\x3b\x53\x80\x72\xb5\xf0\x1c\x4b\xb7\x2b\xab\x8e\x0f\xdf\xbc\x4f\x87\x0c\x43\xd1\xf8\x13\x36\x22\x4f\x54\x1e\x99\x93\x4f\xa7\xbd\x72\x20\xa1\x8c\xf2\x8a\x99\x17\x27\x4c\xc3\x91\x33\x07\xbe\xf3\x87\xbb\x44\xe9\xc5\xef\x24\xc7\x7f\xa9\x88\x69\x52\x00\x5a\xcc\x28\x1f\xac\xbd\xf9\xe7\x36\x11\xda\x66\x19\x78\x42\x1b\xab\xcd\x44\xc2\x55\xb7\x82\x2e\x62\x15\x4e\xb2\x77\x11\x8d\xa8\xc2\x4e\x3d\x59\x7f\x0a\xf7\x58\x23\x6e\xc0\x9b\x08\x00\x82\xce\x27\x7e\xa3\x07\xd2\xb7\x23\xb2\x08\x33\x17\xae\x28\xe9\x19\x25\x19\xa2\x9b\xab\x2f\x60\x7a\x55\x68\x70\xee\xaa\x62\xe1\xb0\x08\xcf\x73\xfa\xa3\x57\xab\x49\x12\x38\x85\x0b\x86\xf6\xc6\x8f\x7b\x64\x32\x5a\x27\xe4\x5a\xca\x82\x24\x74\x36\x29\x97\xc5\x62\x24\x15\x28\x2e\xd4\x91\x24\xed\x4b\xcf\xf4\x3e\xbe\x5f\x81\x42\xd7\x23\xd5\x5f\x4e\x87\xac\x1a\x14\x57\xd7\x8a\xd3\x83\x9b\x8d\xb3\x7b\xf2\x63\x48\xdd\x20\x9c\x88\xc1\xe3\x36\xbb\x3f\x65\x21\x3d\xd6\x81\x6d\x42\xae\xa7\x45\xe3\xb0\xc1\x67\x8a\xa7\x7d\x14\xb1\xb0\xb1\xc3\xc8\x34\xd4\x02\x69\xea\x50\x43\xa9\x23\x4c\x63\xfb\xc5\x2e\xc7\x51\x51\x4b\x60\xcc\x35\x89\x92\xf0\x4e\x03\xfd\x5f\x5e\x7e\xe0\xe3\xd2\xcc\x2b\x91\x19\xbe\xf4\xf1\x1f\xf6\x56\x6f\xb1\x39\xf9\x8b\x31\xf8\xe3\x9c\xbe\x98\x71\xf7\xb7\xf2\x32\xc2\x21\x55\x8b\x7a\x06\xe8\x5a\x9d\x82\x31\xbf\x7d\x5d\x00\xd6\x6c\x2b\x70\xc2\x46\x61\xa3\x72\x25\xf5\xa9\x13\x61\x18\x09\x4a\x29\xd8\x1b\xde\x87\xd9\xfe\x7a\xb1\xca\xbe\x32\x33\x97\x94\x48\x96\xb6\x5b\x4b\xe7\x74\xaf\x35\x42\x34\xd0\xde\x57\xec\xa7\x0e\x93\x58\xb0\xc3\xc9\x55\xbc\x77\x38\x3a\x4a\xb9\xc6\x04\x72\x98\x83\xda\xb5\xcd\x42\x14\x2e\x33\x6f\xf1\xc1\x63\xf9\x0b\x8b\xb3\x62\xb0\x17\x35\x87\x07\xe4\xb9\x54\xd3\x0c\xfc\xa5\x87\xdf\xce\x07\x91\x66\xcc\xfb\xf0\x1a\x87\xa8\x31\x1d\x9e\xe0\xc9\xcd\xa3\xb1\xbc\x9a\x9c\x2b\x1a\x77\xc8\x86\x48\xe8\x60\x28\x23\x5c\x97\x44\x09\xf8\x82\x86\xb6\xb1\xbc\x45\xd4\x20\x29\x83\x4f\x35\x91\xb8\xdf\x46\x57\x15\x91\x04\x42\x88\xaf\xa3\xff\xaa\x52\x6d\xa8\xb3\x7a\xea\xac\xa6\x72\xcf\xa7\x2b\x74\xf3\x24\xce\x96\x9e\x24\x72\x19\x0b\x05\x81\x1a\xb6\xae\x66\x58\x26\x86\xb2\x5a\x39\x0a\x46\x45\x18\xcd\x8d\x7f\x7f\x20\x94\xe0\x03\x85\x2f\x62\xdb\x30\xb0\xc9\x4e\x8d\x50\xb2\xde\xf8\xf2\x2c\x7c\xb0\x65\xa7\x91\x4b\xd8\xfe\xae\x9d\x96\x4e\x4d\x9a\x16\x4c\x02\x03\xae\x36\x1d\x00\xba\xe3\x19\x58\x23\xa1\xfb\x8d\x29\x59\xe1\x38\x86\x04\x65\xcf\x61\x4b\x2e\x27\x3a\x8c\xcf\xf6\x9e\x32\xdc\x89\xb4\xf4\x00\x5f\x6d\x72\xa2\x50\x2b\x9d\x7b\xd3\x2c\xd9\xb8\x36\xc0\x1d\x24\xb4\x59\xc3\xf6\xe7\x9a\x77\x21\x40\x1e\x24\x3f\x78\x9c\xa5\x2a\x62\x98\x93\xc0\x06\x35\x2d\x39\x95\xd2\x90\x0a\xc2\x4d\x00\xca\xab\xf3\x01\x0d\x65\xc1\x31\x7b\x46\x6a\x44\x97\x75\xae\x7e\xfb\x44\x45\x88\xf1\xfd\x5f\xfd\x91\x5b\x3a\x55\xc1\xc5\xe0\x1f\x58\x44\x53\xbf\xb3\x78\x27\x9e\x2d\x81\xcf\x36\x30\xcd\x7c\xff\x17\xc3\xeb\xe0\x7f\x8b\x4d\x33\x3b\x8c\x46\xcf\xc9\x76\x04\x84\x27\xbc\xf0\x5e\x99\x85\xd7\x57\xd5\xd8\xb2\x3d\xfd\x4e\x13\xcf\x7c\x41\xb5\x4c\x13\xc1\x97\x09\xd6\x1d\xe7\x52\x65\x66\x4b\x66\x2f\x60\x92\x07\xaf\xba\xb4\x71\xb9\x45\xc7\xe7\x18\x23\x65\x14\x05\xf3\x1c\x7c\xae\x31\x32\x00\xbb\xf7\x7f\x95\xa1\x85\x00\xf3\x4f\xb1\xe5\xa9\x52\x1b\x43\x12\xe1\x9e\xa3\xd8\x74\xf0\x3e\x37\xef\xde\x75\xa7\x93\x75\x54\xe0\xa0\x81\x67\x0d\xfc\x76\x49\x15\xeb\xd6\x47\xc8\x44\x8a\x62\x4c\x87\xa4\xe7\x8f\xa4\x26\x9a\xc8\x39\x92\x09\x8b\x1e\xd3\x98\xff\x67\xcb\x1e\x28\x53\x43\x28\x9c\x77\xaa\x33\x31\xd5\xf6\x94\x24\x66\x0a\x06\xc9\xe1\x24\x09\x12\xae\x70\x2f\x14\xcc\x05\xcb\x4f\xe6\xa0\x04\x3e\x5f\xbd\x7c\x23\xdd\x9d\x53\xe2\x51\x7c\x1f\x04\x63\x5e\xc9\x96\x4e\x0a\x4f\xaf\x82\x3a\x52\x51\x92\x0f\x5e\x67\x44\x81\xe5\x78\xb6\x7f\x2b\x06\x30\x32\x57\x8a\xc5\x17\x6a\xc8\x97\x4b\x23\x22\xa2', 2)
| 1,938.333333
| 5,755
| 0.750989
| 1,441
| 5,815
| 3.020819
| 0.182512
| 0.030324
| 0.033081
| 0.030324
| 0.013784
| 0.010338
| 0.010338
| 0
| 0
| 0
| 0
| 0.320758
| 0.00172
| 5,815
| 3
| 5,755
| 1,938.333333
| 0.429113
| 0
| 0
| 0
| 0
| 0.333333
| 0.983144
| 0.983144
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
8713897fdfc24f214ec8a737bb5f6b2e209d77da
| 522
|
py
|
Python
|
test/conftest.py
|
tchar/ulauncher-calculate-anything
|
ee0903174c8b87cd1f7c3b6c1acef10702547507
|
[
"MIT"
] | 41
|
2021-07-12T08:40:28.000Z
|
2022-03-11T03:03:05.000Z
|
test/conftest.py
|
tchar/ulauncher-calculate-anything
|
ee0903174c8b87cd1f7c3b6c1acef10702547507
|
[
"MIT"
] | 28
|
2021-07-09T22:36:09.000Z
|
2022-03-28T08:54:15.000Z
|
test/conftest.py
|
tchar/ulauncher-calculate-anything
|
ee0903174c8b87cd1f7c3b6c1acef10702547507
|
[
"MIT"
] | 3
|
2021-07-12T04:52:20.000Z
|
2022-03-03T20:08:11.000Z
|
from test.fixtures import (
log_filepath,
httpserver_listen_address,
httpserver_ssl_context,
mock_currency_provider,
in_memory_cache,
mock_currency_service,
ecb_data,
coinbase_data,
fixerio_data,
mycurrencynet_data,
)
__all__ = [
'log_filepath',
'httpserver_listen_address',
'httpserver_ssl_context',
'mock_currency_provider',
'in_memory_cache',
'mock_currency_service',
'ecb_data',
'coinbase_data',
'fixerio_data',
'mycurrencynet_data',
]
| 19.333333
| 32
| 0.699234
| 55
| 522
| 6.018182
| 0.436364
| 0.145015
| 0.126888
| 0.163142
| 0.924471
| 0.924471
| 0.924471
| 0.924471
| 0.924471
| 0.924471
| 0
| 0
| 0.212644
| 522
| 26
| 33
| 20.076923
| 0.805353
| 0
| 0
| 0
| 0
| 0
| 0.321839
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.041667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
873dcb892e8fe8e39c9caa68c564970d81302e77
| 167
|
py
|
Python
|
tests/test_youtube.py
|
AlexWillCode/rick-roll-detector
|
ef74e7d49d1cea8d848c2e832b3c9842f1331ed9
|
[
"MIT"
] | 12
|
2021-04-23T23:43:55.000Z
|
2021-08-22T21:08:35.000Z
|
tests/test_youtube.py
|
AlexWillCode/rick-roll-detector
|
ef74e7d49d1cea8d848c2e832b3c9842f1331ed9
|
[
"MIT"
] | 3
|
2021-04-22T21:47:51.000Z
|
2021-06-01T18:04:56.000Z
|
tests/test_youtube.py
|
AlexWillCode/rick-roll-detector
|
ef74e7d49d1cea8d848c2e832b3c9842f1331ed9
|
[
"MIT"
] | 1
|
2021-06-24T18:21:31.000Z
|
2021-06-24T18:21:31.000Z
|
from rick_roll_detector import verify_youtube_video
def test_youtube_video():
assert verify_youtube_video("https://www.youtube.com/watch?v=dQw4w9WgXcQ") is True
| 27.833333
| 86
| 0.814371
| 25
| 167
| 5.12
| 0.76
| 0.28125
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013245
| 0.095808
| 167
| 5
| 87
| 33.4
| 0.834437
| 0
| 0
| 0
| 0
| 0
| 0.257485
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8749d186b25b2085b064684422cc0e685a247803
| 3,496
|
py
|
Python
|
scpl/parser/operators/bitwise.py
|
jesopo/scpl
|
1fa5acfb468ab212276781fa1760bb5eda438c23
|
[
"MIT"
] | null | null | null |
scpl/parser/operators/bitwise.py
|
jesopo/scpl
|
1fa5acfb468ab212276781fa1760bb5eda438c23
|
[
"MIT"
] | 2
|
2021-11-15T11:12:14.000Z
|
2021-11-15T17:35:27.000Z
|
scpl/parser/operators/bitwise.py
|
jesopo/scpl
|
1fa5acfb468ab212276781fa1760bb5eda438c23
|
[
"MIT"
] | null | null | null |
from typing import Dict, Optional
from .common import ParseBinaryOperator
from ..operands import ParseAtom, ParseInteger
class ParseBinaryAndIntegerInteger(ParseBinaryOperator, ParseInteger):
def __init__(self, left: ParseInteger, right: ParseInteger):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self) -> str:
return f"And({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> int:
return self._left.eval(vars) & self._right.eval(vars)
def find_binary_and(left: ParseAtom, right: ParseAtom) -> Optional[ParseAtom]:
if isinstance(left, ParseInteger) and isinstance(right, ParseInteger):
return ParseBinaryAndIntegerInteger(left, right)
else:
return None
class ParseBinaryOrIntegerInteger(ParseBinaryOperator, ParseInteger):
def __init__(self, left: ParseInteger, right: ParseInteger):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self) -> str:
return f"Or({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> int:
return self._left.eval(vars) | self._right.eval(vars)
def find_binary_or(left: ParseAtom, right: ParseAtom) -> Optional[ParseAtom]:
if isinstance(left, ParseInteger) and isinstance(right, ParseInteger):
return ParseBinaryOrIntegerInteger(left, right)
else:
return None
class ParseBinaryXorIntegerInteger(ParseBinaryOperator, ParseInteger):
def __init__(self, left: ParseInteger, right: ParseInteger):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self) -> str:
return f"Xor({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> int:
return self._left.eval(vars) ^ self._right.eval(vars)
def find_binary_xor(left: ParseAtom, right: ParseAtom) -> Optional[ParseAtom]:
if isinstance(left, ParseInteger) and isinstance(right, ParseInteger):
return ParseBinaryXorIntegerInteger(left, right)
else:
return None
class ParseBinaryLeftIntegerInteger(ParseBinaryOperator, ParseInteger):
def __init__(self, left: ParseInteger, right: ParseInteger):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self) -> str:
return f"Left({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> int:
return self._left.eval(vars) << self._right.eval(vars)
def find_binary_left(left: ParseAtom, right: ParseAtom) -> Optional[ParseAtom]:
if isinstance(left, ParseInteger) and isinstance(right, ParseInteger):
return ParseBinaryLeftIntegerInteger(left, right)
else:
return None
class ParseBinaryRightIntegerInteger(ParseBinaryOperator, ParseInteger):
def __init__(self, left: ParseInteger, right: ParseInteger):
super().__init__(left, right)
self._left = left
self._right = right
def __repr__(self) -> str:
return f"Right({self._left!r}, {self._right!r})"
def eval(self, vars: Dict[str, ParseAtom]) -> int:
return self._left.eval(vars) >> self._right.eval(vars)
def find_binary_right(left: ParseAtom, right: ParseAtom) -> Optional[ParseAtom]:
if isinstance(left, ParseInteger) and isinstance(right, ParseInteger):
return ParseBinaryRightIntegerInteger(left, right)
else:
return None
| 44.253165
| 80
| 0.69365
| 394
| 3,496
| 5.901015
| 0.098985
| 0.068817
| 0.033548
| 0.08172
| 0.817204
| 0.807312
| 0.75914
| 0.75914
| 0.75914
| 0.75914
| 0
| 0
| 0.191362
| 3,496
| 78
| 81
| 44.820513
| 0.822427
| 0
| 0
| 0.616438
| 0
| 0
| 0.05206
| 0.006007
| 0
| 0
| 0
| 0
| 0
| 1
| 0.273973
| false
| 0
| 0.041096
| 0.136986
| 0.657534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
876a42d5727e7dee42ff2f57509183a3a3aa1296
| 135
|
py
|
Python
|
repocribro_file/controllers/__init__.py
|
MarekSuchanek/repocribro-file
|
0fa04d56702525f87652506ff7aedb17f5cd8fc1
|
[
"MIT"
] | null | null | null |
repocribro_file/controllers/__init__.py
|
MarekSuchanek/repocribro-file
|
0fa04d56702525f87652506ff7aedb17f5cd8fc1
|
[
"MIT"
] | 1
|
2018-12-03T12:17:27.000Z
|
2018-12-03T12:17:27.000Z
|
repocribro_file/controllers/__init__.py
|
MarekSuchanek/repocribro-file
|
0fa04d56702525f87652506ff7aedb17f5cd8fc1
|
[
"MIT"
] | null | null | null |
from repocribro_file.controllers.admin import admin_files
all_blueprints = [admin_files]
__all__ = ['all_blueprints', 'admin_files']
| 22.5
| 57
| 0.807407
| 17
| 135
| 5.823529
| 0.529412
| 0.30303
| 0.262626
| 0.464646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096296
| 135
| 5
| 58
| 27
| 0.811475
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
0d584ec5a4a7891e1127def5b1cb05c38044800c
| 1,455
|
py
|
Python
|
setup.py
|
isccarrasco/microblog
|
85d95cf047e16cf188c9280b8bb00f32c07195c9
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
isccarrasco/microblog
|
85d95cf047e16cf188c9280b8bb00f32c07195c9
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
isccarrasco/microblog
|
85d95cf047e16cf188c9280b8bb00f32c07195c9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import os
import subprocess
import sys
subprocess.call(['virtualenv', 'flask'])
if sys.platform == 'win32':
bin = 'Scripts'
else:
bin = 'bin'
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flask'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flask-login'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flask-openid'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flask-mail'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flask-sqlalchemy'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'sqlalchemy-migrate'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flask-whooshAlchemy'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flask-wtf'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flask-babel'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'guess_language'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'flipflop'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'coverage'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'psycopg2'])
subprocess.call([os.path.join('flask', bin, 'pip'), 'install', '--upgrade', 'rauth'])
| 53.888889
| 99
| 0.648797
| 185
| 1,455
| 5.097297
| 0.183784
| 0.222694
| 0.23754
| 0.296925
| 0.769883
| 0.769883
| 0.769883
| 0.769883
| 0.769883
| 0.769883
| 0
| 0.002232
| 0.076289
| 1,455
| 26
| 100
| 55.961538
| 0.699405
| 0.010997
| 0
| 0
| 0
| 0
| 0.361865
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0d59fe432a2c79570cac3932184aad4b28e507cc
| 156,591
|
py
|
Python
|
nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py
|
SidneyAn/nfv
|
5f0262a5b6ea4be59f977b9c587c483cbe0e373d
|
[
"Apache-2.0"
] | null | null | null |
nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py
|
SidneyAn/nfv
|
5f0262a5b6ea4be59f977b9c587c483cbe0e373d
|
[
"Apache-2.0"
] | null | null | null |
nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py
|
SidneyAn/nfv
|
5f0262a5b6ea4be59f977b9c587c483cbe0e373d
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2016-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import mock
import uuid
from nfv_common import strategy as common_strategy
from nfv_vim import nfvi
from nfv_vim.objects import HOST_PERSONALITY
from nfv_vim.objects import SW_UPDATE_ALARM_RESTRICTION
from nfv_vim.objects import SW_UPDATE_APPLY_TYPE
from nfv_vim.objects import SW_UPDATE_INSTANCE_ACTION
from nfv_vim.objects import SwPatch
from nfv_vim.strategy._strategy import SwPatchStrategy
from . import sw_update_testcase # noqa: H304
def create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
swift_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
worker_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
max_parallel_worker_hosts=10,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
alarm_restrictions=SW_UPDATE_ALARM_RESTRICTION.STRICT,
single_controller=False):
"""
Create a software update strategy
"""
return SwPatchStrategy(
uuid=str(uuid.uuid4()),
controller_apply_type=controller_apply_type,
storage_apply_type=storage_apply_type,
swift_apply_type=swift_apply_type,
worker_apply_type=worker_apply_type,
max_parallel_worker_hosts=max_parallel_worker_hosts,
default_instance_action=default_instance_action,
alarm_restrictions=alarm_restrictions,
ignore_alarms=[],
single_controller=single_controller
)
@mock.patch('nfv_vim.objects._sw_update.SwUpdate.save', sw_update_testcase.fake_save)
@mock.patch('nfv_vim.objects._sw_update.timers.timers_create_timer', sw_update_testcase.fake_timer)
@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', sw_update_testcase.fake_host_name)
@mock.patch('nfv_vim.event_log._instance._event_issue', sw_update_testcase.fake_event_issue)
@mock.patch('nfv_vim.nfvi.nfvi_compute_plugin_disabled', sw_update_testcase.fake_nfvi_compute_plugin_disabled)
class TestSwPatchStrategy(sw_update_testcase.SwUpdateStrategyTestCase):
"""
Software Patch Strategy Unit Tests
"""
def test_sw_patch_strategy_worker_stages_ignore(self):
"""
Test the sw_patch strategy add worker strategy stages:
- ignore apply
- stop start instance action
Verify:
- stages not created
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
success, reason = strategy._add_worker_strategy_stages(
worker_hosts=sorted_worker_hosts,
reboot=True)
assert success is True, "Strategy creation failed"
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 0
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_anti_affinity(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- anti-affinity policy enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_ten_hosts(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- instances migrated
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host('compute-4')
self.create_host('compute-5')
self.create_host('compute-6')
self.create_host('compute-7')
self.create_host('compute-8')
self.create_host('compute-9')
self.create_instance('small', "test_instance_0", 'compute-0')
self.create_instance('small', "test_instance_2", 'compute-2')
self.create_instance('small', "test_instance_3", 'compute-3')
self.create_instance('small', "test_instance_4", 'compute-4')
self.create_instance('small', "test_instance_6", 'compute-6')
self.create_instance('small', "test_instance_7", 'compute-7')
self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 5,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_0',
'test_instance_2']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_3',
'test_instance_4']},
{'name': 'lock-hosts',
'entity_names': ['compute-3', 'compute-4']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3', 'compute-4']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3', 'compute-4']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_6',
'test_instance_7']},
{'name': 'lock-hosts',
'entity_names': ['compute-6', 'compute-7']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-6', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-6', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_8',
'test_instance_9']},
{'name': 'lock-hosts',
'entity_names': ['compute-8', 'compute-9']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-8', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-8', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- host aggregate limits enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host('compute-4')
self.create_host('compute-5')
self.create_host('compute-6')
self.create_host('compute-7')
self.create_host('compute-8')
self.create_host('compute-9')
self.create_host_aggregate('aggregate-1', ['compute-0',
'compute-1',
'compute-2',
'compute-3',
'compute-4'])
self.create_host_aggregate('aggregate-2', ['compute-5',
'compute-6',
'compute-7',
'compute-8',
'compute-9'])
self.create_instance('small', "test_instance_0", 'compute-0')
self.create_instance('small', "test_instance_2", 'compute-2')
self.create_instance('small', "test_instance_3", 'compute-3')
self.create_instance('small', "test_instance_4", 'compute-4')
self.create_instance('small', "test_instance_6", 'compute-6')
self.create_instance('small', "test_instance_7", 'compute-7')
self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 5,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_0',
'test_instance_6']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_2',
'test_instance_7']},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_3',
'test_instance_8']},
{'name': 'lock-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_4',
'test_instance_9']},
{'name': 'lock-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_overlap_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- host aggregate limits enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host('compute-4')
self.create_host('compute-5')
self.create_host('compute-6')
self.create_host('compute-7')
self.create_host('compute-8')
self.create_host('compute-9')
self.create_host_aggregate('aggregate-1', ['compute-0',
'compute-1',
'compute-2',
'compute-3',
'compute-4'])
self.create_host_aggregate('aggregate-2', ['compute-5',
'compute-6',
'compute-7',
'compute-8',
'compute-9'])
self.create_host_aggregate('aggregate-3', ['compute-0',
'compute-1',
'compute-2',
'compute-3',
'compute-4',
'compute-5',
'compute-6',
'compute-7',
'compute-8',
'compute-9'])
self.create_instance('small', "test_instance_0", 'compute-0')
self.create_instance('small', "test_instance_2", 'compute-2')
self.create_instance('small', "test_instance_3", 'compute-3')
self.create_instance('small', "test_instance_4", 'compute-4')
self.create_instance('small', "test_instance_6", 'compute-6')
self.create_instance('small', "test_instance_7", 'compute-7')
self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 5,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1', 'compute-5']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_0',
'test_instance_6']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-6']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_2',
'test_instance_7']},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_3',
'test_instance_8']},
{'name': 'lock-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_4',
'test_instance_9']},
{'name': 'lock-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-4', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_migrate_small_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- migrate instance action
Verify:
- hosts with no instances patched first
- small host aggregate handled
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host('compute-4')
self.create_host('compute-5')
self.create_host('compute-6')
self.create_host('compute-7')
self.create_host('compute-8')
self.create_host('compute-9')
self.create_host_aggregate('aggregate-1', ['compute-0',
'compute-1'])
self.create_host_aggregate('aggregate-2', ['compute-2',
'compute-3',
'compute-4',
'compute-5',
'compute-6'])
self.create_host_aggregate('aggregate-3', ['compute-7',
'compute-8',
'compute-9'])
self.create_instance('small', "test_instance_0", 'compute-0')
self.create_instance('small', "test_instance_1", 'compute-1')
self.create_instance('small', "test_instance_2", 'compute-2')
self.create_instance('small', "test_instance_3", 'compute-3')
self.create_instance('small', "test_instance_4", 'compute-4')
self.create_instance('small', "test_instance_5", 'compute-5')
self.create_instance('small', "test_instance_6", 'compute-6')
self.create_instance('small', "test_instance_7", 'compute-7')
self.create_instance('small', "test_instance_8", 'compute-8')
self.create_instance('small', "test_instance_9", 'compute-9')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
max_parallel_worker_hosts=2
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 5,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_0',
'test_instance_2']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-2']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_1',
'test_instance_3']},
{'name': 'lock-hosts',
'entity_names': ['compute-1', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_4',
'test_instance_7']},
{'name': 'lock-hosts',
'entity_names': ['compute-4', 'compute-7']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-4', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-4', 'compute-7']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_5',
'test_instance_8']},
{'name': 'lock-hosts',
'entity_names': ['compute-5', 'compute-8']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-5', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-5', 'compute-8']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'disable-host-services'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_6',
'test_instance_9']},
{'name': 'lock-hosts',
'entity_names': ['compute-6', 'compute-9']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-6', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-6', 'compute-9']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
Verify:
- hosts with no instances patched first
- anti-affinity policy enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity_locked_instance(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
- locked instance in instance group
Verify:
- stage creation fails
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1',
admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED)
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
success, reason = strategy._add_worker_strategy_stages(
worker_hosts=sorted_worker_hosts,
reboot=True)
assert success is False, "Strategy creation did not fail"
def test_sw_patch_strategy_worker_stages_parallel_stop_start_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
- test both reboot and no reboot cases
Verify:
- hosts with no instances patched first
- host aggregate limits enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host_aggregate('aggregate-1', ['compute-0', 'compute-1'])
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches.
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
max_parallel_worker_hosts=3,
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
# Perform no-reboot parallel worker patches without any
# grouping by aggregates or determining which hosts have VMs
# max_parallel_worker_hosts is 3 (for 4 hosts) resulting in 2 stages
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1', 'compute-2']},
{'name': 'system-stabilize', 'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize', 'timeout': 30}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_locked_host(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
- locked host
Verify:
- hosts with no instances patched first
- locked host patched and rebooted
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3',
admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED)
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize', 'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2']},
{'name': 'reboot-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize', 'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0', 'test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize', 'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_0', 'test_instance_1']},
{'name': 'system-stabilize', 'timeout': 60}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_host_aggregate_locked_instance(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
- locked instance not in an instance group
Verify:
- hosts with no instances patched first
- host aggregate limits enforced
- locked instance not stopped or started
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host_aggregate('aggregate-1', ['compute-0', 'compute-1'])
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1',
admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED)
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_host_aggregate_single_host(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
Verify:
- host aggregates with a single host are patched in parallel
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host_aggregate('aggregate-1', ['compute-0'])
self.create_host_aggregate('aggregate-2', ['compute-1'])
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 1,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0', 'test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_0', 'test_instance_1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity_host_aggregate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
Verify:
- hosts with no instances patched first
- anti-affinity policy and host aggregates enforced at same time
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_host_aggregate('aggregate-1', ['compute-1', 'compute-2'])
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance('small',
"test_instance_2",
'compute-2')
self.create_instance('small',
"test_instance_3",
'compute-3')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0', 'test_instance_2', 'test_instance_3']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-2', 'compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-2', 'compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-2', 'compute-3']},
{'name': 'start-instances',
'entity_names': ['test_instance_0', 'test_instance_2', 'test_instance_3']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- serial apply
- stop start instance action
- test both reboot and no reboot cases
Verify:
- hosts with no instances patched first
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_stop_start_locked_host(self):
"""
Test the sw_patch strategy add worker strategy stages:
- serial apply
- stop start instance action
- locked host
- test both reboot and no reboot cases
Verify:
- hosts with no instances patched first
- locked host patched and rebooted
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2',
admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED)
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance('small',
"test_instance_2",
'compute-3')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 5,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'reboot-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_2']},
{'name': 'lock-hosts',
'entity_names': ['compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3']},
{'name': 'start-instances',
'entity_names': ['test_instance_2']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_parallel_stop_start_max_hosts(self):
"""
Test the sw_patch strategy add worker strategy stages:
- parallel apply
- stop start instance action
Verify:
- maximum host limit enforced
"""
for x in range(0, 13):
self.create_host('compute-%02d' % x)
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
max_parallel_worker_hosts=5
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-00',
'compute-01',
'compute-02',
'compute-03',
'compute-04']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-00',
'compute-01',
'compute-02',
'compute-03',
'compute-04']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-00',
'compute-01',
'compute-02',
'compute-03',
'compute-04']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-05',
'compute-06',
'compute-07',
'compute-08',
'compute-09']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-05',
'compute-06',
'compute-07',
'compute-08',
'compute-09']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-05',
'compute-06',
'compute-07',
'compute-08',
'compute-09']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-10',
'compute-11',
'compute-12']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-10',
'compute-11',
'compute-12']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-10',
'compute-11',
'compute-12']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_migrate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- serial apply
- migrate instance action
- test both reboot and no reboot cases
Verify:
- hosts with no instances patched first
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0')
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'migrate-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30},
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_worker_stages_serial_migrate_locked_instance(self):
"""
Test the sw_patch strategy add worker strategy stages:
- serial apply
- migrate instance action
- locked instance in instance group
- test both reboot and no reboot cases
Verify:
- stages not created for reboot case
- for no reboot case:
- hosts with no instances patched first
- locked instance is not migrated
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_host('compute-2')
self.create_host('compute-3')
self.create_instance('small',
"test_instance_0",
'compute-0',
admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED)
self.create_instance('small',
"test_instance_1",
'compute-1')
self.create_instance_group('instance_group_1',
['test_instance_0', 'test_instance_1'],
[nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY])
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE
)
success, reason = strategy._add_worker_strategy_stages(
worker_hosts=sorted_worker_hosts,
reboot=True)
assert success is False, "Strategy creation did not fail"
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-2']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-3']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 30},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 30},
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_storage_stages_ignore(self):
"""
Test the sw_patch strategy add storage strategy stages:
- ignore apply
Verify:
- stages not created
"""
self.create_host('storage-0')
self.create_host('storage-1')
self.create_host('storage-2')
self.create_host('storage-3')
self.create_host_group('group-0',
['storage-0', 'storage-1'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
self.create_host_group('group-1',
['storage-2', 'storage-3'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
storage_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.STORAGE in host.personality:
storage_hosts.append(host)
# Sort hosts so the order of the steps is deterministic
sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE
)
success, reason = strategy._add_storage_strategy_stages(
storage_hosts=sorted_storage_hosts,
reboot=True)
assert success is True, "Strategy creation failed"
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 0
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_storage_stages_parallel_host_group(self):
"""
Test the sw_patch strategy add storage strategy stages:
- parallel apply
- test both reboot and no reboot cases
Verify:
- host groups enforced
"""
self.create_host('storage-0')
self.create_host('storage-1')
self.create_host('storage-2')
self.create_host('storage-3')
self.create_host_group('group-0',
['storage-0', 'storage-1'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
self.create_host_group('group-1',
['storage-2', 'storage-3'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
storage_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.STORAGE in host.personality:
storage_hosts.append(host)
# Sort hosts so the order of the steps is deterministic
sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL
)
strategy._add_storage_strategy_stages(storage_hosts=sorted_storage_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-0', 'storage-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-0', 'storage-2']},
{'name': 'system-stabilize', 'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-0', 'storage-2']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-1', 'storage-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-1', 'storage-3']},
{'name': 'system-stabilize', 'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-1', 'storage-3']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL
)
strategy._add_storage_strategy_stages(storage_hosts=sorted_storage_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-storage-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-0', 'storage-2']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-1', 'storage-3']},
{'name': 'system-stabilize',
'timeout': 30}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_storage_stages_serial(self):
"""
Test the sw_patch strategy add storage strategy stages:
- serial apply
"""
self.create_host('storage-0')
self.create_host('storage-1')
self.create_host('storage-2')
self.create_host('storage-3')
self.create_host_group('group-0',
['storage-0', 'storage-1'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
self.create_host_group('group-1',
['storage-2', 'storage-3'],
[nfvi.objects.v1.HOST_GROUP_POLICY.STORAGE_REPLICATION])
storage_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.STORAGE in host.personality:
storage_hosts.append(host)
# Sort hosts so the order of the steps is deterministic
sorted_storage_hosts = sorted(storage_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
storage_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_storage_strategy_stages(storage_hosts=sorted_storage_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-0']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-1']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-2']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-2']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-2']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
{'name': 'sw-patch-storage-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['storage-3']},
{'name': 'sw-patch-hosts',
'entity_names': ['storage-3']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['storage-3']},
{'name': 'wait-data-sync',
'ignore_alarms': ['900.001',
'900.005',
'900.101',
'200.001',
'700.004',
'280.002'],
'timeout': 1800}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_controller_stages_ignore(self):
"""
Test the sw_patch strategy add controller strategy stages:
- ignore apply
Verify:
- stages not created
"""
self.create_host('controller-0')
self.create_host('controller-1')
controller_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.CONTROLLER in host.personality:
controller_hosts.append(host)
# Test reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE
)
success, reason = strategy._add_controller_strategy_stages(
controllers=controller_hosts,
reboot=True)
assert success is True, "Strategy creation failed"
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 0
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_controller_stages_serial(self):
"""
Test the sw_patch strategy add controller strategy stages:
- serial apply
- test both reboot and no reboot cases
Verify:
- patch mate controller first
"""
self.create_host('controller-0')
self.create_host('controller-1')
controller_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.CONTROLLER in host.personality:
controller_hosts.append(host)
# Test reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_controller_strategy_stages(controllers=controller_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-controllers',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'wait-alarms-clear',
'timeout': 600},
]
},
{'name': 'sw-patch-controllers',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 600},
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_controller_strategy_stages(controllers=controller_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-controllers',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-controllers',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_controller_stages_serial_openstack_not_installed(self):
"""
Test the sw_patch strategy add controller strategy stages:
- serial apply
- test both reboot and no reboot cases
Verify:
- patch mate controller first
"""
self.create_host('controller-0', openstack_installed=False)
self.create_host('controller-1', openstack_installed=False)
controller_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.CONTROLLER in host.personality:
controller_hosts.append(host)
# Test reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_controller_strategy_stages(controllers=controller_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-controllers',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 60},
]
},
{'name': 'sw-patch-controllers',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 60},
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
controller_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL
)
strategy._add_controller_strategy_stages(controllers=controller_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-controllers',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
{'name': 'sw-patch-controllers',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 30}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_stages_parallel_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts
- parallel apply treated as serial
- stop start instance action
- test both reboot and no reboot cases
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-1')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize'}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_stages_serial_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-1')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_stages_serial_stop_start_no_instances(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts
- no instances
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_plus_stages_parallel_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts plus workers
- parallel apply treated as serial
- stop start instance action
- test both reboot and no reboot cases
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-1')
self.create_host('compute-0')
self.create_host('compute-1')
self.create_instance('small',
"test_instance_2",
'compute-0')
self.create_instance('small',
"test_instance_3",
'compute-1')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
# Test reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600},
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_2', 'test_instance_3']},
{'name': 'lock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_2', 'test_instance_3']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
# Test no reboot patches
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=False)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 3,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize'}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 3,
'steps': [
{'name': 'query-alarms'},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0', 'compute-1']},
{'name': 'system-stabilize'}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_plus_stages_serial_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts plus workers
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-1')
self.create_host('compute-0')
self.create_host('compute-1')
self.create_instance('small',
"test_instance_2",
'compute-0')
self.create_instance('small',
"test_instance_3",
'compute-1')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 9,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'stop-instances',
'entity_names': ['test_instance_1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_2']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_2']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_3']},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'start-instances',
'entity_names': ['test_instance_3']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_plus_stages_serial_stop_start_no_instances(
self):
"""
Test the sw_patch strategy add worker strategy stages:
- aio hosts plus workers
- no instances
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_host('controller-1', aio=True)
self.create_host('compute-0')
self.create_host('compute-1')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
# Sort worker hosts so the order of the steps is deterministic
sorted_worker_hosts = sorted(worker_hosts, key=lambda host: host.name)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
strategy._add_worker_strategy_stages(worker_hosts=sorted_worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 4,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 7,
'steps': [
{'name': 'query-alarms'},
{'name': 'swact-hosts',
'entity_names': ['controller-1']},
{'name': 'lock-hosts',
'entity_names': ['controller-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-1']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_simplex_stages_serial_migrate(self):
"""
Test the sw_patch strategy add worker strategy stages:
- simplex aio host
- serial apply
- migrate instance action
Verify:
- stage creation fails
"""
self.create_host('controller-0', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
self.create_instance('small',
"test_instance_1",
'controller-0')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
single_controller=True
)
success, reason = strategy._add_worker_strategy_stages(
worker_hosts=worker_hosts,
reboot=True)
assert success is False, "Strategy creation did not fail"
def test_sw_patch_strategy_aio_simplex_stages_serial_migrate_no_openstack(
self):
"""
Test the sw_patch strategy add worker strategy stages:
- simplex aio host (no openstack)
- serial apply
- migrate instance action
"""
self.create_host('controller-0', aio=True, openstack_installed=False)
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE,
single_controller=True
)
strategy._add_worker_strategy_stages(worker_hosts=worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 1,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 60},
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_simplex_stages_serial_stop_start(self):
"""
Test the sw_patch strategy add worker strategy stages:
- simplex aio host
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
self.create_instance('small',
"test_instance_0",
'controller-0')
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
single_controller=True
)
strategy._add_worker_strategy_stages(worker_hosts=worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 1,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'wait-alarms-clear',
'timeout': 600},
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_aio_simplex_stages_serial_stop_start_no_instances(self):
"""
Test the sw_patch strategy add worker strategy stages:
- simplex aio host
- no instances
- serial apply
- stop start instance action
"""
self.create_host('controller-0', aio=True)
worker_hosts = []
for host in self._host_table.values():
if HOST_PERSONALITY.WORKER in host.personality:
worker_hosts.append(host)
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START,
single_controller=True
)
strategy._add_worker_strategy_stages(worker_hosts=worker_hosts,
reboot=True)
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 1,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['controller-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['controller-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['controller-0']},
{'name': 'wait-alarms-clear',
'timeout': 600}
]
},
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
def test_sw_patch_strategy_build_complete_parallel_stop_start(self):
"""
Test the sw_patch strategy build_complete:
- parallel apply
- stop start instance action
Verify:
- hosts with no instances patched first
- anti-affinity policy enforced
"""
self.create_host('compute-0')
self.create_host('compute-1')
self.create_instance('small',
"test_instance_0",
'compute-0')
strategy = create_sw_patch_strategy(
worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL,
default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START
)
fake_patch_obj = SwPatch()
strategy.sw_update_obj = fake_patch_obj
nfvi_sw_patches = list()
sw_patch = nfvi.objects.v1.SwPatch(
'PATCH_0001', '12.01', 'Applied', 'Available')
nfvi_sw_patches.append(sw_patch)
strategy.nfvi_sw_patches = nfvi_sw_patches
nfvi_sw_patch_hosts = list()
for host_name in ['compute-0', 'compute-1']:
host = nfvi.objects.v1.HostSwPatch(
host_name, 'worker', '12.01', True, False, 'idle', False,
False)
nfvi_sw_patch_hosts.append(host)
strategy.nfvi_sw_patch_hosts = nfvi_sw_patch_hosts
strategy.build_complete(common_strategy.STRATEGY_RESULT.SUCCESS, "")
apply_phase = strategy.apply_phase.as_dict()
expected_results = {
'total_stages': 2,
'stages': [
{'name': 'sw-patch-worker-hosts',
'total_steps': 6,
'steps': [
{'name': 'query-alarms'},
{'name': 'lock-hosts',
'entity_names': ['compute-1']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-1']},
{'name': 'system-stabilize',
'timeout': 60}
]
},
{'name': 'sw-patch-worker-hosts',
'total_steps': 8,
'steps': [
{'name': 'query-alarms'},
{'name': 'stop-instances',
'entity_names': ['test_instance_0']},
{'name': 'lock-hosts',
'entity_names': ['compute-0']},
{'name': 'sw-patch-hosts',
'entity_names': ['compute-0']},
{'name': 'system-stabilize',
'timeout': 15},
{'name': 'unlock-hosts',
'entity_names': ['compute-0']},
{'name': 'start-instances',
'entity_names': ['test_instance_0']},
{'name': 'system-stabilize',
'timeout': 60}
]
}
]
}
sw_update_testcase.validate_strategy_persists(strategy)
sw_update_testcase.validate_phase(apply_phase, expected_results)
| 40.778906
| 110
| 0.453046
| 13,349
| 156,591
| 5.053412
| 0.018728
| 0.06066
| 0.070444
| 0.066827
| 0.968825
| 0.965252
| 0.964274
| 0.956684
| 0.952726
| 0.943268
| 0
| 0.020079
| 0.425299
| 156,591
| 3,839
| 111
| 40.789529
| 0.729512
| 0.052589
| 0
| 0.850237
| 0
| 0
| 0.237843
| 0.019371
| 0
| 0
| 0
| 0
| 0.001896
| 1
| 0.011374
| false
| 0
| 0.003476
| 0
| 0.015482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
21d25b695e7f7943317e7f5f51cc8824aefa2e14
| 39,132
|
py
|
Python
|
ndcube/tests/test_sequence_plotting.py
|
kc611/ndcube
|
947799ea213728d6215cb791508ef186ca53b24f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ndcube/tests/test_sequence_plotting.py
|
kc611/ndcube
|
947799ea213728d6215cb791508ef186ca53b24f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ndcube/tests/test_sequence_plotting.py
|
kc611/ndcube
|
947799ea213728d6215cb791508ef186ca53b24f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# # -*- coding: utf-8 -*-
# import pytest
# import datetime
# import copy
# import numpy as np
# import astropy.units as u
# import matplotlib
# from ndcube import NDCube, NDCubeSequence
# from ndcube.utils.wcs import WCS
# import ndcube.mixins.sequence_plotting
# # sample data for tests
# # TODO: use a fixture reading from a test file. file TBD.
# data = np.array([[[1, 2, 3, 4], [2, 4, 5, 3], [0, -1, 2, 3]],
# [[2, 4, 5, 1], [10, 5, 2, 2], [10, 3, 3, 0]]])
# data2 = np.array([[[11, 22, 33, 44], [22, 44, 55, 33], [0, -1, 22, 33]],
# [[22, 44, 55, 11], [10, 55, 22, 22], [10, 33, 33, 0]]])
# ht = {'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 0, 'CRVAL3': 0, 'NAXIS3': 2,
# 'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 0,
# 'NAXIS2': 3,
# 'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 4}
# wt = WCS(header=ht, naxis=3)
# hm = {
# 'CTYPE1': 'WAVE ', 'CUNIT1': 'Angstrom', 'CDELT1': 0.2, 'CRPIX1': 0, 'CRVAL1': 10,
# 'NAXIS1': 4,
# 'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5, 'NAXIS2': 3,
# 'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 2}
# wm = WCS(header=hm, naxis=3)
# cube1 = NDCube(
# data, wt, missing_axes=[False, False, False, True],
# extra_coords=[
# ('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
# ('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(0, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
# cube1_with_unit = NDCube(
# data, wt, missing_axes=[False, False, False, True],
# unit=u.km,
# extra_coords=[
# ('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
# ('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(0, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
# cube1_with_mask = NDCube(
# data, wt, missing_axes=[False, False, False, True],
# mask=np.zeros_like(data, dtype=bool),
# extra_coords=[
# ('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
# ('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(0, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
# cube1_with_uncertainty = NDCube(
# data, wt, missing_axes=[False, False, False, True],
# uncertainty=np.sqrt(data),
# extra_coords=[
# ('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
# ('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(0, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
# cube1_with_unit_and_uncertainty = NDCube(
# data, wt, missing_axes=[False, False, False, True],
# unit=u.km, uncertainty=np.sqrt(data),
# extra_coords=[
# ('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
# ('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(0, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
# cube3 = NDCube(
# data2, wt, missing_axes=[False, False, False, True],
# extra_coords=[
# ('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
# cube1.extra_coords['pix']['value'][-1]),
# ('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(2, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
# cube3_with_unit = NDCube(
# data2, wt, missing_axes=[False, False, False, True],
# unit=u.m,
# extra_coords=[
# ('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
# cube1.extra_coords['pix']['value'][-1]),
# ('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(2, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
# cube3_with_mask = NDCube(
# data2, wt, missing_axes=[False, False, False, True],
# mask=np.zeros_like(data2, dtype=bool),
# extra_coords=[
# ('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
# cube1.extra_coords['pix']['value'][-1]),
# ('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(2, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
# cube3_with_uncertainty = NDCube(
# data2, wt, missing_axes=[False, False, False, True],
# uncertainty=np.sqrt(data2),
# extra_coords=[
# ('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
# cube1.extra_coords['pix']['value'][-1]),
# ('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(2, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
# cube3_with_unit_and_uncertainty = NDCube(
# data2, wt, missing_axes=[False, False, False, True],
# unit=u.m, uncertainty=np.sqrt(data2),
# extra_coords=[
# ('pix', 0, u.Quantity(np.arange(1, data2.shape[0]+1), unit=u.pix) +
# cube1.extra_coords['pix']['value'][-1]),
# ('hi', 1, u.Quantity(range(data2.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(2, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 2))])
# cubem1 = NDCube(
# data, wm,
# extra_coords=[
# ('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
# ('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(0, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
# cubem3 = NDCube(
# data2, wm,
# extra_coords=[
# ('pix', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
# ('hi', 1, u.Quantity(range(data.shape[1]), unit=u.s)),
# ('distance', None, u.Quantity(0, unit=u.cm)),
# ('time', None, datetime.datetime(2000, 1, 1, 0, 0))])
# # Define some test NDCubeSequences.
# common_axis = 0
# seq = NDCubeSequence(data_list=[cube1, cube3, cube1, cube3], common_axis=common_axis)
# seq_no_common_axis = NDCubeSequence(data_list=[cube1, cube3, cube1, cube3])
# seq_with_units = NDCubeSequence(
# data_list=[cube1_with_unit, cube3_with_unit, cube1_with_unit, cube3_with_unit],
# common_axis=common_axis)
# seq_with_masks = NDCubeSequence(
# data_list=[cube1_with_mask, cube3_with_mask, cube1_with_mask, cube3_with_mask],
# common_axis=common_axis)
# seq_with_unit0 = NDCubeSequence(data_list=[cube1_with_unit, cube3,
# cube1_with_unit, cube3], common_axis=common_axis)
# seq_with_mask0 = NDCubeSequence(data_list=[cube1_with_mask, cube3,
# cube1_with_mask, cube3], common_axis=common_axis)
# seq_with_uncertainty = NDCubeSequence(data_list=[cube1_with_uncertainty, cube3_with_uncertainty,
# cube1_with_uncertainty, cube3_with_uncertainty],
# common_axis=common_axis)
# seq_with_some_uncertainty = NDCubeSequence(
# data_list=[cube1_with_uncertainty, cube3, cube1, cube3_with_uncertainty],
# common_axis=common_axis)
# seq_with_units_and_uncertainty = NDCubeSequence(
# data_list=[cube1_with_unit_and_uncertainty, cube3_with_unit_and_uncertainty,
# cube1_with_unit_and_uncertainty, cube3_with_unit_and_uncertainty],
# common_axis=common_axis)
# seq_with_units_and_some_uncertainty = NDCubeSequence(
# data_list=[cube1_with_unit_and_uncertainty, cube3_with_unit,
# cube1_with_unit, cube3_with_unit_and_uncertainty],
# common_axis=common_axis)
# seq_with_some_masks = NDCubeSequence(data_list=[cube1_with_mask, cube3, cube1, cube3_with_mask],
# common_axis=common_axis)
# seqm = NDCubeSequence(data_list=[cubem1, cubem3, cubem1, cubem3], common_axis=common_axis)
# # Derive some expected data arrays in plot objects.
# seq_data_stack = np.stack([cube.data for cube in seq_with_masks.data])
# seq_mask_stack = np.stack([cube.mask for cube in seq_with_masks.data])
# seq_stack = np.ma.masked_array(seq_data_stack, seq_mask_stack)
# seq_stack_km = np.ma.masked_array(
# np.stack([(cube.data * cube.unit).to(u.km).value for cube in seq_with_units.data]),
# seq_mask_stack)
# seq_data_concat = np.concatenate([cube.data for cube in seq_with_masks.data], axis=common_axis)
# seq_mask_concat = np.concatenate([cube.mask for cube in seq_with_masks.data], axis=common_axis)
# seq_concat = np.ma.masked_array(seq_data_concat, seq_mask_concat)
# seq_concat_km = np.ma.masked_array(
# np.concatenate([(cube.data * cube.unit).to(u.km).value
# for cube in seq_with_units.data], axis=common_axis),
# seq_mask_concat)
# # Derive expected axis_ranges for non-cube-like cases.
# x_axis_coords3 = np.array([0.4, 0.8, 1.2, 1.6]).reshape((1, 1, 4))
# new_x_axis_coords3_shape = u.Quantity(seq.dimensions, unit=u.pix).value.astype(int)
# new_x_axis_coords3_shape[-1] = 1
# none_axis_ranges_axis3 = [np.arange(0, len(seq.data)+1),
# np.array([0., 1., 2.]), np.arange(0, 4),
# np.tile(np.array(x_axis_coords3), new_x_axis_coords3_shape)]
# none_axis_ranges_axis0 = [np.arange(len(seq.data)),
# np.array([0., 1., 2.]), np.arange(0, 4),
# np.arange(0, int(seq.dimensions[-1].value)+1)]
# distance0_none_axis_ranges_axis0 = [seq.sequence_axis_extra_coords["distance"].value,
# np.array([0., 1., 2.]), np.arange(0, 4),
# np.arange(0, int(seq.dimensions[-1].value)+1)]
# distance0_none_axis_ranges_axis0_mm = [seq.sequence_axis_extra_coords["distance"].to("mm").value,
# np.array([0., 1., 2.]), np.arange(0, 4),
# np.arange(0, int(seq.dimensions[-1].value)+1)]
# userrangequantity_none_axis_ranges_axis0 = [
# np.arange(int(seq.dimensions[0].value)), np.array([0., 1., 2.]), np.arange(0, 4),
# np.arange(0, int(seq.dimensions[-1].value)+1)]
# userrangequantity_none_axis_ranges_axis0_1e7 = [
# (np.arange(int(seq.dimensions[0].value)) * u.J).to(u.erg).value, np.array([0., 1., 2.]),
# np.arange(0, 4), np.arange(0, int(seq.dimensions[-1].value)+1)]
# hi2_none_axis_ranges_axis2 = [
# np.arange(0, len(seq.data)+1), np.array([0., 1., 2.]),
# np.arange(int(seq.dimensions[2].value)), np.arange(0, int(seq.dimensions[-1].value)+1)]
# x_axis_coords1 = np.zeros(tuple([int(s.value) for s in seq.dimensions]))
# x_axis_coords1[0, 1] = 1.
# x_axis_coords1[1, 0] = 2.
# x_axis_coords1[1, 1] = 3.
# x_axis_coords1[2, 1] = 1.
# x_axis_coords1[3, 0] = 2.
# x_axis_coords1[3, 1] = 3.
# pix1_none_axis_ranges_axis1 = [
# np.arange(0, len(seq.data)+1), x_axis_coords1, np.arange(0, 4),
# np.arange(0, int(seq.dimensions[-1].value)+1)]
# # Derive expected extents
# seq_axis1_lim_deg = [0.49998731, 0.99989848]
# seq_axis1_lim_arcsec = [(axis1_xlim*u.deg).to(u.arcsec).value for axis1_xlim in seq_axis1_lim_deg]
# seq_axis2_lim_m = [seq[:, :, :, 0].data[0].axis_world_coords()[-1][0].value,
# seq[:, :, :, 0].data[0].axis_world_coords()[-1][-1].value]
# # Derive expected axis_ranges for cube-like cases.
# cube_like_new_x_axis_coords2_shape = u.Quantity(
# seq.cube_like_dimensions, unit=u.pix).value.astype(int)
# cube_like_new_x_axis_coords2_shape[-1] = 1
# cubelike_none_axis_ranges_axis2 = [
# np.arange(0, int(seq.cube_like_dimensions[0].value)+1), np.arange(0, 4),
# np.tile(x_axis_coords3, cube_like_new_x_axis_coords2_shape)]
# cubelike_none_axis_ranges_axis2_s = copy.deepcopy(cubelike_none_axis_ranges_axis2)
# cubelike_none_axis_ranges_axis2_s[2] = cubelike_none_axis_ranges_axis2_s[2] * 60.
# cubelike_none_axis_ranges_axis0 = [[0, 8], np.arange(0, 4),
# np.arange(0, int(seq.cube_like_dimensions[-1].value)+1)]
# @pytest.mark.parametrize("test_input, test_kwargs, expected_values", [
# (seq[:, 0, 0, 0], {},
# (np.arange(len(seq.data)), np.array([1, 11, 1, 11]),
# "meta.obs.sequence [None]", "Data [None]", (0, len(seq[:, 0, 0, 0].data)-1),
# (min([cube.data.min() for cube in seq[:, 0, 0, 0].data]),
# max([cube.data.max() for cube in seq[:, 0, 0, 0].data])))),
# (seq_with_units[:, 0, 0, 0], {},
# (np.arange(len(seq_with_units.data)), np.array([1, 0.011, 1, 0.011]),
# "meta.obs.sequence [None]", "Data [km]", (0, len(seq_with_units[:, 0, 0, 0].data)-1),
# (min([(cube.data * cube.unit).to(seq_with_units[:, 0, 0, 0].data[0].unit).value
# for cube in seq_with_units[:, 0, 0, 0].data]),
# max([(cube.data * cube.unit).to(seq_with_units[:, 0, 0, 0].data[0].unit).value
# for cube in seq_with_units[:, 0, 0, 0].data])))),
# (seq_with_uncertainty[:, 0, 0, 0], {},
# (np.arange(len(seq_with_uncertainty.data)), np.array([1, 11, 1, 11]),
# "meta.obs.sequence [None]", "Data [None]", (0, len(seq_with_uncertainty[:, 0, 0, 0].data)-1),
# (min([cube.data for cube in seq_with_uncertainty[:, 0, 0, 0].data]),
# max([cube.data for cube in seq_with_uncertainty[:, 0, 0, 0].data])))),
# (seq_with_units_and_uncertainty[:, 0, 0, 0], {},
# (np.arange(len(seq_with_units_and_uncertainty.data)), np.array([1, 0.011, 1, 0.011]),
# "meta.obs.sequence [None]", "Data [km]",
# (0, len(seq_with_units_and_uncertainty[:, 0, 0, 0].data)-1),
# (min([(cube.data*cube.unit).to(seq_with_units_and_uncertainty[:, 0, 0, 0].data[0].unit).value
# for cube in seq_with_units_and_uncertainty[:, 0, 0, 0].data]),
# max([(cube.data*cube.unit).to(seq_with_units_and_uncertainty[:, 0, 0, 0].data[0].unit).value
# for cube in seq_with_units_and_uncertainty[:, 0, 0, 0].data])))),
# (seq_with_units_and_some_uncertainty[:, 0, 0, 0], {},
# (np.arange(len(seq_with_units_and_some_uncertainty.data)), np.array([1, 0.011, 1, 0.011]),
# "meta.obs.sequence [None]", "Data [km]",
# (0, len(seq_with_units_and_some_uncertainty[:, 0, 0, 0].data)-1),
# (min([(cube.data*cube.unit).to(
# seq_with_units_and_some_uncertainty[:, 0, 0, 0].data[0].unit).value
# for cube in seq_with_units_and_some_uncertainty[:, 0, 0, 0].data]),
# max([(cube.data*cube.unit).to(
# seq_with_units_and_some_uncertainty[:, 0, 0, 0].data[0].unit).value
# for cube in seq_with_units_and_some_uncertainty[:, 0, 0, 0].data])))),
# (seq[:, 0, 0, 0], {"axes_coordinates": "distance"},
# ((seq.sequence_axis_extra_coords["distance"]), np.array([1, 11, 1, 11]),
# "distance [{0}]".format(seq.sequence_axis_extra_coords["distance"].unit), "Data [None]",
# (min(seq.sequence_axis_extra_coords["distance"].value),
# max(seq.sequence_axis_extra_coords["distance"].value)),
# (min([cube.data.min() for cube in seq[:, 0, 0, 0].data]),
# max([cube.data.max() for cube in seq[:, 0, 0, 0].data])))),
# (seq[:, 0, 0, 0], {"axes_coordinates": u.Quantity(np.arange(len(seq.data)), unit=u.cm),
# "axes_units": u.km},
# (u.Quantity(np.arange(len(seq.data)), unit=u.cm).to(u.km), np.array([1, 11, 1, 11]),
# "meta.obs.sequence [km]", "Data [None]",
# (min((u.Quantity(np.arange(len(seq.data)), unit=u.cm).to(u.km).value)),
# max((u.Quantity(np.arange(len(seq.data)), unit=u.cm).to(u.km).value))),
# (min([cube.data.min() for cube in seq[:, 0, 0, 0].data]),
# max([cube.data.max() for cube in seq[:, 0, 0, 0].data]))))
# ])
# def test_sequence_plot_1D_plot(test_input, test_kwargs, expected_values):
# # Unpack expected values
# expected_x_data, expected_y_data, expected_xlabel, expected_ylabel, \
# expected_xlim, expected_ylim = expected_values
# # Run plot method
# output = test_input.plot(**test_kwargs)
# # Check values are correct
# assert isinstance(output, matplotlib.axes.Axes)
# np.testing.assert_array_equal(output.lines[0].get_xdata(), expected_x_data)
# np.testing.assert_array_equal(output.lines[0].get_ydata(), expected_y_data)
# assert output.axes.get_xlabel() == expected_xlabel
# assert output.axes.get_ylabel() == expected_ylabel
# output_xlim = output.axes.get_xlim()
# assert output_xlim[0] <= expected_xlim[0]
# assert output_xlim[1] >= expected_xlim[1]
# output_ylim = output.axes.get_ylim()
# assert output_ylim[0] <= expected_ylim[0]
# assert output_ylim[1] >= expected_ylim[1]
# @pytest.mark.parametrize("test_input, test_kwargs, expected_values", [
# (seq[:, :, 0, 0], {},
# (np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
# 0.49998731, 0.99989848, 0.49998731, 0.99989848]),
# np.array([1, 2, 11, 22, 1, 2, 11, 22]),
# "{0} [{1}]".format(seq[:, :, 0, 0].cube_like_world_axis_physical_types[common_axis], "deg"),
# "Data [None]", tuple(seq_axis1_lim_deg),
# (min([cube.data.min() for cube in seq[:, :, 0, 0].data]),
# max([cube.data.max() for cube in seq[:, :, 0, 0].data])))),
# (seq_with_units[:, :, 0, 0], {},
# (np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
# 0.49998731, 0.99989848, 0.49998731, 0.99989848]),
# np.array([1, 2, 0.011, 0.022, 1, 2, 0.011, 0.022]),
# "{0} [{1}]".format(seq[:, :, 0, 0].cube_like_world_axis_physical_types[common_axis], "deg"),
# "Data [km]", tuple(seq_axis1_lim_deg),
# (min([min((cube.data * cube.unit).to(u.km).value)
# for cube in seq_with_units[:, :, 0, 0].data]),
# max([max((cube.data * cube.unit).to(u.km).value)
# for cube in seq_with_units[:, :, 0, 0].data])))),
# (seq_with_uncertainty[:, :, 0, 0], {},
# (np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
# 0.49998731, 0.99989848, 0.49998731, 0.99989848]),
# np.array([1, 2, 11, 22, 1, 2, 11, 22]),
# "{0} [{1}]".format(
# seq_with_uncertainty[:, :, 0, 0].cube_like_world_axis_physical_types[
# common_axis], "deg"),
# "Data [None]", tuple(seq_axis1_lim_deg),
# (min([cube.data.min() for cube in seq_with_uncertainty[:, :, 0, 0].data]),
# max([cube.data.max() for cube in seq_with_uncertainty[:, :, 0, 0].data])))),
# (seq_with_some_uncertainty[:, :, 0, 0], {},
# (np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
# 0.49998731, 0.99989848, 0.49998731, 0.99989848]),
# np.array([1, 2, 11, 22, 1, 2, 11, 22]),
# "{0} [{1}]".format(
# seq_with_some_uncertainty[:, :, 0, 0].cube_like_world_axis_physical_types[
# common_axis], "deg"),
# "Data [None]", tuple(seq_axis1_lim_deg),
# (min([cube.data.min() for cube in seq_with_some_uncertainty[:, :, 0, 0].data]),
# max([cube.data.max() for cube in seq_with_some_uncertainty[:, :, 0, 0].data])))),
# (seq_with_units_and_uncertainty[:, :, 0, 0], {},
# (np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
# 0.49998731, 0.99989848, 0.49998731, 0.99989848]),
# np.array([1, 2, 0.011, 0.022, 1, 2, 0.011, 0.022]),
# "{0} [{1}]".format(
# seq_with_units_and_uncertainty[:, :, 0, 0].cube_like_world_axis_physical_types[
# common_axis], "deg"),
# "Data [km]", tuple(seq_axis1_lim_deg),
# (min([min((cube.data * cube.unit).to(u.km).value)
# for cube in seq_with_units[:, :, 0, 0].data]),
# max([max((cube.data * cube.unit).to(u.km).value)
# for cube in seq_with_units[:, :, 0, 0].data])))),
# (seq_with_units_and_some_uncertainty[:, :, 0, 0], {},
# (np.array([0.49998731, 0.99989848, 0.49998731, 0.99989848,
# 0.49998731, 0.99989848, 0.49998731, 0.99989848]),
# np.array([1, 2, 0.011, 0.022, 1, 2, 0.011, 0.022]),
# "{0} [{1}]".format(
# seq_with_units_and_some_uncertainty[:, :, 0, 0].cube_like_world_axis_physical_types[
# common_axis], "deg"),
# "Data [km]", tuple(seq_axis1_lim_deg),
# (min([min((cube.data * cube.unit).to(u.km).value)
# for cube in seq_with_units[:, :, 0, 0].data]),
# max([max((cube.data * cube.unit).to(u.km).value)
# for cube in seq_with_units[:, :, 0, 0].data])))),
# (seq[:, :, 0, 0], {"axes_coordinates": "pix"},
# (seq[:, :, 0, 0].common_axis_extra_coords["pix"].value,
# np.array([1, 2, 11, 22, 1, 2, 11, 22]), "pix [pix]", "Data [None]",
# (min(seq[:, :, 0, 0].common_axis_extra_coords["pix"].value),
# max(seq[:, :, 0, 0].common_axis_extra_coords["pix"].value)),
# (min([cube.data.min() for cube in seq[:, :, 0, 0].data]),
# max([cube.data.max() for cube in seq[:, :, 0, 0].data])))),
# (seq[:, :, 0, 0],
# {"axes_coordinates": np.arange(10, 10+seq[:, :, 0, 0].cube_like_dimensions[0].value)},
# (np.arange(10, 10 + seq[:, :, 0, 0].cube_like_dimensions[0].value),
# np.array([1, 2, 11, 22, 1, 2, 11, 22]),
# "{0} [{1}]".format("", None), "Data [None]",
# (10, 10 + seq[:, :, 0, 0].cube_like_dimensions[0].value - 1),
# (min([cube.data.min() for cube in seq[:, :, 0, 0].data]),
# max([cube.data.max() for cube in seq[:, :, 0, 0].data]))))
# ])
# def test_sequence_plot_as_cube_1D_plot(test_input, test_kwargs, expected_values):
# # Unpack expected values
# expected_x_data, expected_y_data, expected_xlabel, expected_ylabel, \
# expected_xlim, expected_ylim = expected_values
# # Run plot method
# output = test_input.plot_as_cube(**test_kwargs)
# # Check values are correct
# # Check type of ouput plot object
# assert isinstance(output, matplotlib.axes.Axes)
# # Check x and y data are correct.
# assert np.allclose(output.lines[0].get_xdata(), expected_x_data)
# assert np.allclose(output.lines[0].get_ydata(), expected_y_data)
# # Check x and y axis labels are correct.
# assert output.axes.get_xlabel() == expected_xlabel
# assert output.axes.get_ylabel() == expected_ylabel
# # Check all data is contained within x and y axes limits.
# output_xlim = output.axes.get_xlim()
# assert output_xlim[0] <= expected_xlim[0]
# assert output_xlim[1] >= expected_xlim[1]
# output_ylim = output.axes.get_ylim()
# assert output_ylim[0] <= expected_ylim[0]
# assert output_ylim[1] >= expected_ylim[1]
# def test_sequence_plot_as_cube_error():
# with pytest.raises(TypeError):
# seq_no_common_axis.plot_as_cube()
# @pytest.mark.parametrize("test_input, test_kwargs, expected_values", [
# (seq[:, :, 0, 0], {},
# (seq_stack[:, :, 0, 0],
# "custom:pos.helioprojective.lat [deg]", "meta.obs.sequence [None]",
# tuple(seq_axis1_lim_deg + [0, len(seq.data)-1]))),
# (seq_with_units[:, :, 0, 0], {},
# (seq_stack_km[:, :, 0, 0],
# "custom:pos.helioprojective.lat [deg]", "meta.obs.sequence [None]",
# tuple(seq_axis1_lim_deg + [0, len(seq.data)-1]))),
# (seq[:, :, 0, 0], {"plot_axis_indices": [0, 1]},
# (seq_stack[:, :, 0, 0].transpose(),
# "meta.obs.sequence [None]", "custom:pos.helioprojective.lat [deg]",
# tuple([0, len(seq.data)-1] + seq_axis1_lim_deg))),
# (seq[:, :, 0, 0], {"axes_coordinates": ["pix", "distance"]},
# (seq_stack[:, :, 0, 0],
# "pix [pix]", "distance [cm]",
# (min(seq[0, :, 0, 0].extra_coords["pix"]["value"].value),
# max(seq[0, :, 0, 0].extra_coords["pix"]["value"].value),
# min(seq[:, :, 0, 0].sequence_axis_extra_coords["distance"].value),
# max(seq[:, :, 0, 0].sequence_axis_extra_coords["distance"].value)))),
# # This example shows weakness of current extra coord axis values on 2D plotting!
# # Only the coordinates from the first cube are shown.
# (seq[:, :, 0, 0], {"axes_coordinates": [np.arange(
# 10, 10+seq[:, :, 0, 0].dimensions[-1].value), "distance"], "axes_units": [None, u.m]},
# (seq_stack[:, :, 0, 0],
# " [None]", "distance [m]",
# (10, 10+seq[:, :, 0, 0].dimensions[-1].value-1,
# min(seq[:, :, 0, 0].sequence_axis_extra_coords["distance"].to(u.m).value),
# max(seq[:, :, 0, 0].sequence_axis_extra_coords["distance"].to(u.m).value)))),
# (seq[:, :, 0, 0], {"axes_coordinates": [np.arange(
# 10, 10+seq[:, :, 0, 0].dimensions[-1].value)*u.deg, None], "axes_units": [u.arcsec, None]},
# (seq_stack[:, :, 0, 0],
# " [arcsec]", "meta.obs.sequence [None]",
# tuple(list(
# (np.arange(10, 10+seq[:, :, 0, 0].dimensions[-1].value)*u.deg).to(u.arcsec).value) \
# + [0, len(seq.data)-1])))
# ])
# def test_sequence_plot_2D_image(test_input, test_kwargs, expected_values):
# # Unpack expected values
# expected_data, expected_xlabel, expected_ylabel, expected_extent = expected_values
# # Run plot method
# output = test_input.plot(**test_kwargs)
# # Check values are correct
# assert isinstance(output, matplotlib.axes.Axes)
# np.testing.assert_array_equal(output.images[0].get_array(), expected_data)
# assert output.xaxis.get_label_text() == expected_xlabel
# assert output.yaxis.get_label_text() == expected_ylabel
# assert np.allclose(output.images[0].get_extent(), expected_extent, rtol=1e-3)
# # Also check x and y values?????
# @pytest.mark.parametrize("test_input, test_kwargs, expected_error", [
# (seq[:, :, 0, 0], {"axes_coordinates": [
# np.arange(10, 10+seq[:, :, 0, 0].dimensions[-1].value), None],
# "axes_units": [u.m, None]}, ValueError),
# (seq[:, :, 0, 0], {"axes_coordinates": [
# None, np.arange(10, 10+seq[:, :, 0, 0].dimensions[0].value)],
# "axes_units": [None, u.m]}, ValueError)
# ])
# def test_sequence_plot_2D_image_errors(test_input, test_kwargs, expected_error):
# with pytest.raises(expected_error):
# output = test_input.plot(**test_kwargs)
# @pytest.mark.parametrize("test_input, test_kwargs, expected_values", [
# (seq[:, :, :, 0], {},
# (seq_concat[:, :, 0],
# "em.wl [m]", "custom:pos.helioprojective.lat [deg]",
# tuple(seq_axis2_lim_m + seq_axis1_lim_deg))),
# (seq_with_units[:, :, :, 0], {},
# (seq_concat_km[:, :, 0],
# "em.wl [m]", "custom:pos.helioprojective.lat [deg]",
# tuple(seq_axis2_lim_m + seq_axis1_lim_deg))),
# (seq[:, :, :, 0], {"plot_axis_indices": [0, 1],
# "axes_coordinates": ["pix", "hi"]},
# (seq_concat[:, :, 0].transpose(), "pix [pix]", "hi [s]",
# ((seq[:, :, :, 0].common_axis_extra_coords["pix"][0].value,
# seq[:, :, :, 0].common_axis_extra_coords["pix"][-1].value,
# seq[:, :, :, 0].data[0].extra_coords["hi"]["value"][0].value,
# seq[:, :, :, 0].data[0].extra_coords["hi"]["value"][-1].value)))),
# (seq[:, :, :, 0], {"axes_coordinates": [
# np.arange(10, 10+seq[:, :, :, 0].cube_like_dimensions[-1].value) * u.m,
# np.arange(10, 10+seq[:, :, :, 0].cube_like_dimensions[0].value) * u.m]},
# (seq_concat[:, :, 0], " [m]", " [m]",
# (10, 10+seq[:, :, :, 0].cube_like_dimensions[-1].value-1,
# 10, 10+seq[:, :, :, 0].cube_like_dimensions[0].value-1))),
# (seq[:, :, :, 0], {"axes_coordinates": [
# np.arange(10, 10+seq[:, :, :, 0].cube_like_dimensions[-1].value) * u.m,
# np.arange(10, 10+seq[:, :, :, 0].cube_like_dimensions[0].value) * u.m],
# "axes_units": ["cm", u.cm]},
# (seq_concat[:, :, 0], " [cm]", " [cm]",
# (10*100, (10+seq[:, :, :, 0].cube_like_dimensions[-1].value-1)*100,
# 10*100, (10+seq[:, :, :, 0].cube_like_dimensions[0].value-1)*100)))
# ])
# def test_sequence_plot_as_cube_2D_image(test_input, test_kwargs, expected_values):
# # Unpack expected values
# expected_data, expected_xlabel, expected_ylabel, expected_extent = expected_values
# # Run plot method
# output = test_input.plot_as_cube(**test_kwargs)
# # Check values are correct
# assert isinstance(output, matplotlib.axes.Axes)
# np.testing.assert_array_equal(output.images[0].get_array(), expected_data)
# assert output.xaxis.get_label_text() == expected_xlabel
# assert output.yaxis.get_label_text() == expected_ylabel
# assert np.allclose(output.images[0].get_extent(), expected_extent, rtol=1e-3)
# # Also check x and y values?????
# @pytest.mark.parametrize("test_input, test_kwargs, expected_error", [
# (seq[:, :, :, 0], {"axes_coordinates": [
# np.arange(10, 10+seq[:, :, :, 0].cube_like_dimensions[-1].value), None],
# "axes_units": [u.m, None]}, ValueError),
# (seq[:, :, :, 0], {"axes_coordinates": [
# None, np.arange(10, 10+seq[:, :, :, 0].cube_like_dimensions[0].value)],
# "axes_units": [None, u.m]}, ValueError)
# ])
# def test_sequence_plot_as_cube_2D_image_errors(test_input, test_kwargs, expected_error):
# with pytest.raises(expected_error):
# output = test_input.plot_as_cube(**test_kwargs)
# @pytest.mark.parametrize("test_input, test_kwargs, expected_data", [
# (seq, {}, seq_stack.reshape(4, 1, 2, 3, 4)),
# (seq_with_units, {}, seq_stack_km.reshape(4, 1, 2, 3, 4))
# ])
# def test_sequence_plot_ImageAnimator(test_input, test_kwargs, expected_data):
# # Run plot method
# output = test_input.plot(**test_kwargs)
# # Check plot object properties are correct.
# assert isinstance(output, ndcube.mixins.sequence_plotting.ImageAnimatorNDCubeSequence)
# np.testing.assert_array_equal(output.data, expected_data)
# @pytest.mark.parametrize("test_input, test_kwargs, expected_data", [
# (seq, {}, seq_concat.reshape(1, 8, 3, 4)),
# (seq_with_units, {}, seq_concat_km.reshape(1, 8, 3, 4))
# ])
# def test_sequence_plot_as_cube_ImageAnimator(test_input, test_kwargs, expected_data):
# # Run plot method
# output = test_input.plot_as_cube(**test_kwargs)
# # Check plot object properties are correct.
# assert isinstance(output, ndcube.mixins.sequence_plotting.ImageAnimatorCubeLikeNDCubeSequence)
# np.testing.assert_array_equal(output.data, expected_data)
# @pytest.mark.parametrize("test_input, expected", [
# ((seq_with_unit0.data, None), (None, None)),
# ((seq_with_unit0.data, u.km), (None, None)),
# ((seq_with_units.data, None), ([u.km, u.m, u.km, u.m], u.km)),
# ((seq_with_units.data, u.cm), ([u.km, u.m, u.km, u.m], u.cm))])
# def test_determine_sequence_units(test_input, expected):
# output_seq_unit, output_unit = ndcube.mixins.sequence_plotting._determine_sequence_units(
# test_input[0], unit=test_input[1])
# assert output_seq_unit == expected[0]
# assert output_unit == expected[1]
# def test_determine_sequence_units():
# with pytest.raises(ValueError):
# output_seq_unit, output_unit = ndcube.mixins.sequence_plotting._determine_sequence_units(
# seq.data, u.m)
# @pytest.mark.parametrize("test_input, expected", [
# ((3, 1, "time", u.s), ([1], [None, 'time', None], [None, u.s, None])),
# ((3, None, None, None), ([-1, -2], None, None))])
# def test_prep_axes_kwargs(test_input, expected):
# output = ndcube.mixins.sequence_plotting._prep_axes_kwargs(*test_input)
# for i in range(3):
# assert output[i] == expected[i]
# @pytest.mark.parametrize("test_input, expected_error", [
# ((3, [0, 1, 2], ["time", "pix"], u.s), ValueError),
# ((3, 0, ["time", "pix"], u.s), ValueError),
# ((3, 0, "time", [u.s, u.pix]), ValueError),
# ((3, 0, 0, u.s), TypeError),
# ((3, 0, "time", 0), TypeError)])
# def test_prep_axes_kwargs_errors(test_input, expected_error):
# with pytest.raises(expected_error):
# output = ndcube.mixins.sequence_plotting._prep_axes_kwargs(*test_input)
# @pytest.mark.parametrize("test_input, test_kwargs, expected_values", [
# (seq, {"plot_axis_indices": 3},
# (seq_stack.data, none_axis_ranges_axis3, "time [min]", "Data [None]",
# (none_axis_ranges_axis3[-1].min(), none_axis_ranges_axis3[-1].max()),
# (seq_stack.data.min(), seq_stack.data.max()))),
# (seq_with_units, {"plot_axis_indices": -1, "data_unit": u.km},
# (seq_stack_km.data, none_axis_ranges_axis3, "time [min]", "Data [km]",
# (none_axis_ranges_axis3[-1].min(), none_axis_ranges_axis3[-1].max()),
# (seq_stack_km.data.min(), seq_stack_km.data.max()))),
# (seq_with_masks, {"plot_axis_indices": 0},
# (seq_stack, none_axis_ranges_axis0, "meta.obs.sequence [None]", "Data [None]",
# (none_axis_ranges_axis0[0].min(), none_axis_ranges_axis0[0].max()),
# (seq_stack.data.min(), seq_stack.data.max()))),
# (seq_with_some_masks, {"plot_axis_indices": 0},
# (seq_stack, none_axis_ranges_axis0, "meta.obs.sequence [None]", "Data [None]",
# (none_axis_ranges_axis0[0].min(), none_axis_ranges_axis0[0].max()),
# (seq_stack.data.min(), seq_stack.data.max()))),
# (seq, {"plot_axis_indices": 0, "axes_coordinates": "distance"},
# (seq_stack.data, distance0_none_axis_ranges_axis0, "distance [cm]", "Data [None]",
# (seq.sequence_axis_extra_coords["distance"].value.min(),
# seq.sequence_axis_extra_coords["distance"].value.max()),
# (seq_stack.data.min(), seq_stack.data.max()))),
# (seq, {"plot_axis_indices": 0, "axes_coordinates": "distance", "axes_units": "mm"},
# (seq_stack.data, distance0_none_axis_ranges_axis0_mm, "distance [mm]", "Data [None]",
# (seq.sequence_axis_extra_coords["distance"].to("mm").value.min(),
# seq.sequence_axis_extra_coords["distance"].to("mm").value.max()),
# (seq_stack.data.min(), seq_stack.data.max()))),
# (seq, {"plot_axis_indices": 0,
# "axes_coordinates": userrangequantity_none_axis_ranges_axis0[0]*u.J},
# (seq_stack.data, userrangequantity_none_axis_ranges_axis0, " [J]", "Data [None]",
# (userrangequantity_none_axis_ranges_axis0[0].min(),
# userrangequantity_none_axis_ranges_axis0[0].max()),
# (seq_stack.data.min(), seq_stack.data.max()))),
# (seq, {"plot_axis_indices": 0, "axes_units": u.erg,
# "axes_coordinates": userrangequantity_none_axis_ranges_axis0[0]*u.J},
# (seq_stack.data, userrangequantity_none_axis_ranges_axis0_1e7, " [erg]", "Data [None]",
# (userrangequantity_none_axis_ranges_axis0_1e7[0].min(),
# userrangequantity_none_axis_ranges_axis0_1e7[0].max()),
# (seq_stack.data.min(), seq_stack.data.max()))),
# (seq, {"plot_axis_indices": 2, "axes_coordinates": "hi"},
# (seq_stack.data, hi2_none_axis_ranges_axis2, "hi [s]", "Data [None]",
# (hi2_none_axis_ranges_axis2[2].min(), hi2_none_axis_ranges_axis2[2].max()),
# (seq_stack.data.min(), seq_stack.data.max()))),
# (seq, {"plot_axis_indices": 1, "axes_coordinates": "pix"},
# (seq_stack.data, pix1_none_axis_ranges_axis1, "pix [pix]", "Data [None]",
# (pix1_none_axis_ranges_axis1[1].min(), pix1_none_axis_ranges_axis1[1].max()),
# (seq_stack.data.min(), seq_stack.data.max())))
# ])
# def test_sequence_plot_LineAnimator(test_input, test_kwargs, expected_values):
# # Unpack expected values
# expected_data, expected_axis_ranges, expected_xlabel, \
# expected_ylabel, expected_xlim, expected_ylim = expected_values
# # Run plot method.
# output = test_input.plot(**test_kwargs)
# # Check right type of plot object is produced.
# assert type(output) is ndcube.mixins.sequence_plotting.LineAnimatorNDCubeSequence
# # Check data being plotted is correct
# np.testing.assert_array_equal(output.data, expected_data)
# if type(expected_data) is np.ma.core.MaskedArray:
# np.testing.assert_array_equal(output.data.mask, expected_data.mask)
# # Check values of axes and sliders is correct.
# for i in range(len(output.axis_ranges)):
# print(i)
# assert np.allclose(output.axis_ranges[i], expected_axis_ranges[i])
# # Check plot axis labels and limits are correct
# assert output.xlabel == expected_xlabel
# assert output.ylabel == expected_ylabel
# assert output.xlim == expected_xlim
# assert output.ylim == expected_ylim
# @pytest.mark.parametrize("test_input, test_kwargs, expected_values", [
# (seq, {"plot_axis_indices": 2, "axes_units": u.s},
# (seq_concat.data, cubelike_none_axis_ranges_axis2_s, "time [s]", "Data [None]",
# (cubelike_none_axis_ranges_axis2_s[2].min(), cubelike_none_axis_ranges_axis2_s[2].max()),
# (seq_concat.data.min(), seq_concat.data.max()))),
# (seq, {"plot_axis_indices": 0},
# (seq_concat.data, cubelike_none_axis_ranges_axis0,
# "custom:pos.helioprojective.lat [deg]", "Data [None]",
# (0, 7), (seq_concat.data.min(), seq_concat.data.max()))),
# (seq_with_masks, {"plot_axis_indices": 0},
# (seq_concat.data, cubelike_none_axis_ranges_axis0,
# "custom:pos.helioprojective.lat [deg]", "Data [None]",
# (0, 7), (seq_concat.data.min(), seq_concat.data.max()))),
# (seq_with_some_masks, {"plot_axis_indices": -3},
# (seq_concat.data, cubelike_none_axis_ranges_axis0,
# "custom:pos.helioprojective.lat [deg]", "Data [None]",
# (0, 7), (seq_concat.data.min(), seq_concat.data.max()))),
# (seqm, {"plot_axis_indices": 0},
# (seq_concat.data, cubelike_none_axis_ranges_axis0,
# "custom:pos.helioprojective.lon [deg]", "Data [None]",
# (0, 7), (seq_concat.data.min(), seq_concat.data.max())))
# ])
# def test_sequence_plot_as_cube_LineAnimator(test_input, test_kwargs, expected_values):
# # Unpack expected values
# expected_data, expected_axis_ranges, expected_xlabel, \
# expected_ylabel, expected_xlim, expected_ylim = expected_values
# # Run plot method.
# output = test_input.plot_as_cube(**test_kwargs)
# # Check right type of plot object is produced.
# assert type(output) is ndcube.mixins.sequence_plotting.LineAnimatorCubeLikeNDCubeSequence
# # Check data being plotted is correct
# np.testing.assert_array_equal(output.data, expected_data)
# if type(expected_data) is np.ma.core.MaskedArray:
# np.testing.assert_array_equal(output.data.mask, expected_data.mask)
# # Check values of axes and sliders is correct.
# for i in range(len(output.axis_ranges)):
# assert np.allclose(output.axis_ranges[i], expected_axis_ranges[i])
# # Check plot axis labels and limits are correct
# assert output.xlabel == expected_xlabel
# assert output.ylabel == expected_ylabel
# assert output.xlim == expected_xlim
# assert output.ylim == expected_ylim
| 50.233633
| 101
| 0.614535
| 5,616
| 39,132
| 4.047543
| 0.049858
| 0.011966
| 0.030179
| 0.019005
| 0.873609
| 0.843958
| 0.801461
| 0.753156
| 0.711759
| 0.662839
| 0
| 0.056357
| 0.190611
| 39,132
| 778
| 102
| 50.298201
| 0.66132
| 0.961873
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.001285
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0dfb63d464722693803f0f6495c989387fb3222a
| 159,553
|
py
|
Python
|
test/commands/extended/prepare_transfer_test.py
|
Cornode/cornode.lib.py
|
866230123a62acc235ca8f46e7b59fe08655049b
|
[
"MIT"
] | null | null | null |
test/commands/extended/prepare_transfer_test.py
|
Cornode/cornode.lib.py
|
866230123a62acc235ca8f46e7b59fe08655049b
|
[
"MIT"
] | null | null | null |
test/commands/extended/prepare_transfer_test.py
|
Cornode/cornode.lib.py
|
866230123a62acc235ca8f46e7b59fe08655049b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from typing import Iterable, List, Optional
from unittest import TestCase
import filters as f
from filters.test import BaseFilterTestCase
from mock import Mock, patch
from cornode import Address, BadApiResponse, cornode, ProposedTransaction, Tag, \
TryteString
from cornode.adapter import MockAdapter
from cornode.commands.extended.prepare_transfer import PrepareTransferCommand
from cornode.crypto.addresses import AddressGenerator
from cornode.crypto.types import Seed
from cornode.filters import GeneratedAddress, Trytes
from six import PY2, binary_type, text_type
class PrepareTransferRequestFilterTestCase(BaseFilterTestCase):
filter_type = PrepareTransferCommand(MockAdapter()).get_request_filter
skip_value_check = True
# noinspection SpellCheckingInspection
def setUp(self):
super(PrepareTransferRequestFilterTestCase, self).setUp()
# Define some tryte sequences that we can reuse between tests.
self.trytes1 = (
b'TESTVALUEONE9DONTUSEINPRODUCTION99999JBW'
b'GEC99GBXFFBCHAEJHLC9DX9EEPAI9ICVCKBX9FFII'
)
self.trytes2 = (
b'TESTVALUETWO9DONTUSEINPRODUCTION99999THZ'
b'BODYHZM99IR9KOXLZXVUOJM9LQKCQJBWMTY999999'
)
self.trytes3 = (
b'TESTVALUETHREE9DONTUSEINPRODUCTIONG99999'
b'GTQ9CSNUFPYW9MBQ9LFQJSORCF9LGTY9BWQFY9999'
)
self.trytes4 = (
b'TESTVALUEFOUR9DONTUSEINPRODUCTION99999ZQ'
b'HOGCBZCOTZVZRFBEHQKHENBIZWDTUQXTOVWEXRIK9'
)
self.transfer1 =\
ProposedTransaction(
address =
Address(
b'TESTVALUEFIVE9DONTUSEINPRODUCTION99999MG'
b'AAAHJDZ9BBG9U9R9XEOHCBVCLCWCCCCBQCQGG9WHK'
),
value = 42,
)
self.transfer2 =\
ProposedTransaction(
address =
Address(
b'TESTVALUESIX9DONTUSEINPRODUCTION99999GGT'
b'FODSHHELBDERDCDRBCINDCGQEI9NAWDJBC9TGPFME'
),
value = 86,
)
def test_pass_happy_path(self):
"""
Request is valid.
"""
request = {
'changeAddress': Address(self.trytes1),
'seed': Seed(self.trytes2),
'transfers': [self.transfer1, self.transfer2],
'inputs': [
Address(self.trytes3, key_index=3),
Address(self.trytes4, key_index=4),
],
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(filter_.cleaned_data, request)
def test_pass_compatible_types(self):
"""
Request contains values that can be converted to the expected
types.
"""
filter_ = self._filter({
# Any TrytesCompatible value works here.
'changeAddress': binary_type(self.trytes1),
'seed': bytearray(self.trytes2),
# These have to be :py:class:`Address` instances, so that we can
# set ``key_index``.
'inputs': [
Address(self.trytes3, key_index=3),
Address(self.trytes4, key_index=4),
],
# These still have to have the correct type, however.
'transfers': [self.transfer1, self.transfer2],
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'changeAddress': Address(self.trytes1),
'seed': Seed(self.trytes2),
'transfers': [self.transfer1, self.transfer2],
'inputs': [
Address(self.trytes3),
Address(self.trytes4),
],
},
)
def test_pass_optional_parameters_omitted(self):
"""
Request omits optional parameters.
"""
filter_ = self._filter({
'seed': Seed(self.trytes1),
'transfers': [self.transfer1],
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'seed': Seed(self.trytes1),
'transfers': [self.transfer1],
# These parameters are set to their default values.
'changeAddress': None,
'inputs': None,
},
)
def test_fail_empty(self):
"""
Request is empty.
"""
self.assertFilterErrors(
{},
{
'seed': [f.FilterMapper.CODE_MISSING_KEY],
'transfers': [f.FilterMapper.CODE_MISSING_KEY],
},
)
def test_fail_unexpected_parameters(self):
"""
Request contains unexpected parameters.
"""
self.assertFilterErrors(
{
'seed': Seed(self.trytes1),
'transfers': [
ProposedTransaction(address=Address(self.trytes2), value=42),
],
# You guys give up? Or are you thirsty for more?
'foo': 'bar',
},
{
'foo': [f.FilterMapper.CODE_EXTRA_KEY],
},
)
def test_fail_seed_null(self):
"""
``seed`` is null.
"""
self.assertFilterErrors(
{
'seed': None,
'transfers': [
ProposedTransaction(address=Address(self.trytes2), value=42),
],
},
{
'seed': [f.Required.CODE_EMPTY],
},
)
def test_fail_seed_wrong_type(self):
"""
``seed`` is not a TrytesCompatible value.
"""
self.assertFilterErrors(
{
'seed': text_type(self.trytes1, 'ascii'),
'transfers': [
ProposedTransaction(address=Address(self.trytes2), value=42),
],
},
{
'seed': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_seed_not_trytes(self):
"""
``seed`` contains invalid characters.
"""
self.assertFilterErrors(
{
'seed': b'not valid; must contain only uppercase and "9"',
'transfers': [
ProposedTransaction(address=Address(self.trytes2), value=42),
],
},
{
'seed': [Trytes.CODE_NOT_TRYTES],
},
)
def test_fail_transfers_wrong_type(self):
"""
``transfers`` is not an array.
"""
self.assertFilterErrors(
{
# It's gotta be an array, even if there's only one transaction.
'transfers':
ProposedTransaction(address=Address(self.trytes2), value=42),
'seed': Seed(self.trytes1),
},
{
'transfers': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_transfers_empty(self):
"""
``transfers`` is an array, but it is empty.
"""
self.assertFilterErrors(
{
'transfers': [],
'seed': Seed(self.trytes1),
},
{
'transfers': [f.Required.CODE_EMPTY],
},
)
def test_fail_transfers_contents_invalid(self):
"""
``transfers`` is a non-empty array, but it contains invalid values.
"""
self.assertFilterErrors(
{
'transfers': [
None,
# This value is valid; just adding it to make sure the filter
# doesn't cheat!
ProposedTransaction(address=Address(self.trytes2), value=42),
{'address': Address(self.trytes2), 'value': 42},
],
'seed': Seed(self.trytes1),
},
{
'transfers.0': [f.Required.CODE_EMPTY],
'transfers.2': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_change_address_wrong_type(self):
"""
``changeAddress`` is not a TrytesCompatible value.
"""
self.assertFilterErrors(
{
'changeAddress': text_type(self.trytes3, 'ascii'),
'seed': Seed(self.trytes1),
'transfers': [
ProposedTransaction(address=Address(self.trytes2), value=42),
],
},
{
'changeAddress': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_change_address_not_trytes(self):
"""
``changeAddress`` contains invalid characters.
"""
self.assertFilterErrors(
{
'changeAddress': b'not valid; must contain only uppercase and "9"',
'seed': Seed(self.trytes1),
'transfers': [
ProposedTransaction(address=Address(self.trytes2), value=42),
],
},
{
'changeAddress': [Trytes.CODE_NOT_TRYTES],
},
)
def test_fail_inputs_wrong_type(self):
"""
``inputs`` is not an array.
"""
self.assertFilterErrors(
{
# Must be an array, even if there's only one input.
'inputs': Address(self.trytes3),
'seed': Seed(self.trytes1),
'transfers': [
ProposedTransaction(address=Address(self.trytes2), value=42),
],
},
{
'inputs': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_inputs_contents_invalid(self):
"""
``inputs`` is a non-empty array, but it contains invalid values.
"""
self.assertFilterErrors(
{
'inputs': [
None,
binary_type(self.trytes1),
# This is actually valid; I just added it to make sure the
# filter isn't cheating!
Address(self.trytes1, key_index=1),
# Inputs must have ``key_index`` set, so that we can generate
# the correct private key to sign them.
Address(b'', key_index=None),
],
'seed': Seed(self.trytes1),
'transfers': [
ProposedTransaction(address=Address(self.trytes2), value=42),
],
},
{
'inputs.0': [f.Required.CODE_EMPTY],
'inputs.1': [f.Type.CODE_WRONG_TYPE],
'inputs.3': [GeneratedAddress.CODE_NO_KEY_INDEX],
},
)
# noinspection SpellCheckingInspection
class PrepareTransferCommandTestCase(TestCase):
def setUp(self):
super(PrepareTransferCommandTestCase, self).setUp()
self.adapter = MockAdapter()
self.command = PrepareTransferCommand(self.adapter)
def run(self, result=None):
# Ensure that all tranactions use a predictable timestamp.
self.timestamp = 1482938294
def get_current_timestamp():
return self.timestamp
with patch(
target = 'cornode.transaction.get_current_timestamp',
new = get_current_timestamp,
):
return super(PrepareTransferCommandTestCase, self).run(result)
def test_wireup(self):
"""
Verify that the command is wired up correctly.
"""
self.assertIsInstance(
cornode(self.adapter).prepareTransfer,
PrepareTransferCommand,
)
def test_pass_inputs_not_needed(self):
"""
Preparing a bundle that does not transfer any cornodes.
"""
response = self.command(
seed = Seed.random(),
transfers = [
ProposedTransaction(
tag = Tag(b'PYOTA9UNIT9TESTS9'),
value = 0,
address = Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999KJUPKN'
b'RMTHKVJYWNBKBGCKOQWBTKBOBJIZZYQITTFJZKLOI'
),
),
ProposedTransaction(
value = 0,
address = Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999YMSWGX'
b'VNDMLXPT9HMVAOWUUZMLSJZFWGKDVGXPSQAWAEBJN'
),
),
],
)
self.assertDictEqual(
response,
{
# Transactions that don't require signatures aren't too
# interesting. Things will get more exciting in subsequent
# tests.
'trytes': [
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUE9DONTUSEINPRODUCTION99999YMSWGXVNDMLXPT9HMVA'
b'OWUUZMLSJZFWGKDVGXPSQAWAEBJN999999999999999999999999999999999999'
b'999999999999999999NYBKIVD99A99999999A999999999EBBXLEONGGJMRUPZAO'
b'HRAIOIEXDSZGQCXRWQMZNDUEQYYKDOSPHOI9KXZTCSBEUBW9WBHILISLYOZWIG99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUE9DONTUSEINPRODUCTION99999KJUPKNRMTHKVJYWNBKB'
b'GCKOQWBTKBOBJIZZYQITTFJZKLOI999999999999999999999999999PYOTA9UNI'
b'T9TESTS99999999999NYBKIVD99999999999A999999999EBBXLEONGGJMRUPZAO'
b'HRAIOIEXDSZGQCXRWQMZNDUEQYYKDOSPHOI9KXZTCSBEUBW9WBHILISLYOZWIG99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
],
},
)
def test_pass_inputs_explicit_no_change(self):
"""
Preparing a bundle with specified inputs, no change address needed.
"""
self.adapter.seed_response('getBalances', {
'balances': [13, 29],
'duration': '1',
'milestone':
'TESTVALUE9DONTUSEINPRODUCTION99999ZNIUXU'
'FIVFBBYQHFYZYIEEWZL9VPMMKIIYTEZRRHXJXKIKF',
})
mock_signature_fragment_generator = MockSignatureFragmentGenerator([
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWGHM'
b'NUXBJTOBKZFDNJEMAOPPLR9OOQJCDVO9XSCYQJQVTXQDYWQEBIXKDZAFWINAHJELJT'
b'DPVMUEWSVCJA9ONDYBNANWCGLBQMEMTBFDMWLCMQHGJLGYDQGIMLSNQHBGSVTDZSGN'
b'QAL9OHRAPDKYSVBTNYRUUBNEEAINJMOVOHOWXUAIEDAIQDESQFCKJELHAVODSMXMKE'
b'HTDKCDIWWISXSAHQE9TJTLJZGXIABHU9CUACMLVSSYV9UJREPWFVYWWXPYYJRP9DOE'
b'KNDMBSBKKHIFMPXZXIJERXRZVBVDBYNZBBCCOSEDOLDGSNQK99HIYSWNYYEBLRT9MA'
b'DLXLLZJOSZCFWAVZY9XUPNZUVOSKBMKXXJNRKDBOSGUGME9QNBMHIWXWXPEEUVQAQV'
b'UXDJGMJOBXG9VJBWPRQRCCQSNBEHTLGOKJVYEPQOJO9QIZLYAVLCKVXKEKRGBSZJAC'
b'9KTSSNMDQGKCLPZDJAQ9PBQMLUONVVFAWTMREGFXJMRRGL9MKNPOZGOYRPDCYEJCYJ'
b'UN9HYNSNHXARMRJVXBUHOP9K9BIIEYGSHBUESKTAOQOEANEAIHYHVGSVNPXWRBTJAM'
b'KMWEQOSYEWXLSRYVOSTMPOGYNPDNFLOICXVHYBDHSXVRKVWNVZOZQDOITZWICSYEW9'
b'RGCPPUJYVIYVTSZILYENYUYUGDSGWVYWRMZJNCTTPVWDWXAPVZQQKI9CGEQPBFPCLG'
b'DDEGBUUTISNCMJXQCTUNKQTLCATNOIRPMEUQBQTHHQYRGDLZEUZBALNQDXJYZBVXDP'
b'LVOVVAUCQSCGRTUJRBBNRV9ORETTGFIXBBBVOPFHPKGPKVBYFTZMWUVZYVWWSDKQVO'
b'NMPLLQTV9IZUWLUWZNLCVJNPMG9CMXQG9D9WYCANBRMYV9DU9FMJT9JHT9RWCGLHFC'
b'ODXJVFQBLTKJWVNVGSUHNWLHNPLZDSWDMDVQTLVCSVFJJTIQZFAPCXJWDAXWJKJVOK'
b'HALCQQTIXABPFXPUFK9IKXYUGMPXNSQCJDVETOVEX9LXYLXWRW9PFEYJCUJHLUB9NX'
b'TUGLIQMDGPDPSJTWDYEWXQAICLN9BTGNBJWLVAXZGNCYXGHBMRUVVYTJGH9XDGSZHQ'
b'DYKFGMOWORSFDFBLJHBRTXRSEBALCJIJTQJYDZZKWZGVAPFVKVEOXGYRLMBSPFHUIJ'
b'ZZFMFVOTLPUWSYZCWFZMAALHRGSYSXSMOHWARYZZVIAKXAHGY9SROWPVFACXXLQEXX'
b'OJCKXRRZHBZXJIBWQMMZTRDFYQBSBBZQQXGCAAECMQINHJRBSGOYPCGWKPWCHBKOJT'
b'IGDASZFGONTUGDSOOLEMGOEBFCZZJZSCGXPHXHB9WGMMFVUTCHDBSAMYTECQZWGCXA'
b'WTCTIBZHQVUAIBPZHBBTZAERYU9XAMKBHCHGZISSPOWJIRZTAXDHMAYBPXOXWDIUDH'
b'NBTFJNVHHJO9AWAEC9UPRRFJLNGKTXJXFDGODDOPMGLALRIJBVIFLQTYQPKCKCRBYP'
b'BYGUUFJGJFVCOURNKCGNTQNNKHDDPIVZHCJSLDUYHVPAX9YJOFTTFSKFHTOOQQRCPY'
b'ZKTDVCUZGBOBZKLVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VZZGUNUCKOYLYXOV'
b'HMGULWGSRCGXZLJVNIMZBLFOJJKOTUREMBXYOZXDUP9ROUVYOSJBGGFZMIFTKHJHHJ'
b'GZJNOYQWFZAHLJWWDDFQQAMEGJUEUSIWOHKFJWRXRSJWYPGIGZGMFNAIDGDOUUQUVH'
b'JZQPJMLCGKGADXAXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVG'
b'XBBPBEBDVGZDBWMDMLPXYJBBRNOMKGR9TSVUXSRYXQTCTYLFQORMIGDKBJLNLCQXAC'
b'VCBJGVWRJNYPCKOAILPLMWBYKDLDXLIZMZFWDXUWDEGDUURQGMJNUGJXDXYJGKOTQB'
b'GCHATROPKEN9YTXDUOCMXPGHPDANTJFRRVEVBFVCNTWNMMOVAVKBNSJIWWBVHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHVIT'
b'GYROZZNQP'
),
TryteString(
b'SWHZKSNCOQXPCGRTYJPUGKLBNEJFXASKY9XAUROGDAO9QQLIVRZQDJDTPLNTBGUUFG'
b'ELJPSGUMGPIUNCCTQEFU9UZIJJYJXCYWRADRHHXKBEDG9HTCHJHXUJRKMIUFOSKDGM'
b'I9QPCYQSWDCUYKQQEONJEKYWELG9MSNBHRHILGSSKMRCQJBOGNYBKEMTOFEUBEOUBD'
b'9ULP9PHWYKXEQNDMUR9BGDGPEUFRFRGNGFJFPYQXABSDALTYKL9SM9VVQCOHY9AS99'
b'EYWSHUNEQVVNLS9CNPEVMPOKMWQYFPGTNJBZCDWYPFWSBKZOYXNNVMPODEHMHNIYZC'
b'HIEDEAB9TLFOWVHF99GVRWUZWSN9IQOKWIXERKRQETZS9ZJJSQRLLPQXEWNMFVVBWO'
b'IK9MBYCEGUJ9HJRIIMBVJNGXMGPGDLOLYWFVQNOKTFZRBJSSBJTETGAIUGZOYQOFTV'
b'BKAQY9SSJWJXXYAUVUQWPXVCISFSDSHDQCPVMG9GVDAO9GIMDHZWJOKSUEUFHBGSCZ'
b'KNBTZWJXSFYNJSBSEL9UMZBAZRGYCEHOSJBMKMPMNXKEVTMUDEFWBIKOXUSBNPTNEC'
b'GVLYSOGUDJDPHYFADXRAOLQXJSJDANINJEOMCFAWWITREGCDF9OZ9ZKHPJZJNMOVGX'
b'9OKQBSGVZYWKNOPVJEOZEI9BPE9GCUEQVAHSBBRBGQTEXVZCSL9ECOWPOWZCVSCBOU'
b'SNQMTJIEKHXL9NCPRMLRNKQEHYJCLRHGZKFNBJIPKSKPRFTSKFJULTBTXFDQHWUYOS'
b'DQBHPAINVEPKCCHJDTZOJIGJZOF9AEQDBKCZSZMIWUUVHVGAFKALGITVQQKBAHKCIF'
b'SVMVZ9UDQABVIANTBUQOFBIXQBWB9KKQOVJZNVBEDAZKUTRNKGJQWMTEKV9KGCIBRD'
b'CBAPKSTMCZGUV9HTAABQDKGQBCRFNXBMZRTHF9MO9GAGQDYDVLOFMDE9QQZYR9GDSB'
b'LUVVMKMCZIMDPNCVLGDKBACWQJRWOQNKBTSDJFKQMKTVKXVNAHRHZALJGVAMXWJYRA'
b'KTEJFXAHBQGSYWWQVECQYPXVFWILNFZKGGRIFCJBSIZRDJXRJHSURPCZKOWKLFRUMV'
b'ENEGMNKUAOGVUECDSGAZNQZKBJDJPVBXLOTID9QLMFNGIWKAAIQTJJROSZBXPQRXAU'
b'CV99OGCEOTQCJ9II9ASZL9XGNSVUXVKPXYOJMF9PX9GSLEROR9FXVQ9MLEMEW9IWNW'
b'BNVAYXZ9ZETTDSMLGZAKHE9IUJBFUHXW9KWCNZOZCCTFGBGWSDAQGGSPSQHOMUVJML'
b'WBDAKYQZMWPQLLYAGUMOVMVLFD9TO9OUBTVUHHUNSFSATSEGBFVGDZUBMTWWFDPSQV'
b'CUFRVKHYYPDWRPNSKXRFTVEIBVZNGUZRQCPXVKBPKQDDLEBWIEBIPTEJIYFHBXCUVC'
b'CKTKEJAYRZCKAXLMELINWUZHG9JFBSBAKHIXMWHUWUFHFNLXNO9GKINYKRTCNN99PH'
b'PHO9MJAGUYZAPNSPWUZ99E9BEADKETLOALWNANYMHSLLQSBS9YTYVJKTVWFUVS9MFO'
b'WCHLEUUFUWTYGLZXFDUXVABTVFXFPUEPIUEIAVSZSSZQJTHNGKBJXADRHVTIBERILM'
b'CCGWUITYQHGEEGWIZZOII9B9EVVVFJNEYEWH9ZVOJGHKVPYKDEZZSPBAOBQGGWPWXT'
b'CKSLSHJQYCDHAYIQ9QVSQFPXZDBYSJJKSNTRXORHLPVOYVMIGALRPXYWQWSJPPFTJC'
b'YXAATLBFNSGVXKFJXHYTILNOQZROUCDUTWOMGYBVTWPKJY9RVKKWQQMVCHJEUBELJD'
b'KJPGYLXUXATNOIJHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHV'
b'HE9EEBQYSNWECSPAJHGLTEUCXALBRVGXFENUCOONSUFZLHTLVQNPDZDIVDQHWVLDED'
b'PFQLJZWF9GFZMPZXFVEQECLUZBBFVSAPEXJLKKOMXEPHZAKP9WYTGQOML9FQSBMSFL'
b'OGRLFQKUCUWFX9DNAOZSSKBUV9IBVIRNUWYBKJVKLJ9PPNLGJATKDCAGVFIVPXRABH'
b'ZVZACJIG9WOKKLFCRDSMTWSCYHOZEEXRIMPQBXVXQAYKZIADSM9GUBICGKGQYNHKVY'
b'OZFRVCHNM'
),
TryteString(
b'KJVG9EKTMPWE9PKWGGJJPDISCX9CJXGWUUPOLKKBVUWUYNBACOOF9LEQGNM9YYGNXJ'
b'EMOBGSDCPLP9CQIFBCLENUCJGCCSWYU9WVFTRZZCPCZXEGMDDPSYDTYUIMVFSLGHTA'
b'ZWJRHY9ZQMFGROIIUIQVDSOTUIRMSROYDPTWRURMZAILGBWIVADPYHPTSFGKAPPMVT'
b'THGLYXZHPFUO9HBAJIOUJOOABAQSOQUKLSVQGSDIHEHGTQSPM9EDHLEQSFFAAQXR9M'
b'UREVQ9MEGXNXMNJVWXYEIRYAOFHDFZAKNVWKTVIHKXWVT9VOZRUPUKXASIFAZQVZSW'
b'HBQU9RGVLJMRVQCMCYSQEIMIAKOHNAKQLTIMEHMZMGAKCIPGHQTWORBLVKISGPKIIM'
b'AMQWMZUNTKJSQZAZNYEGORGNRTKCLNRSOQJRBUCPSDLKLGGRBACIULLZBFBUNQXACK'
b'L9WFEKKAHGLBBRNNEXZPPH9UZESFFKVBOPROFHQOKYAVTJDDVAUGUAURHLESIEIITD'
b'VVRCTRKOGUPERJHNJMXTLNVMWVDZITSPEHRYJKEZVTZSJEYTOQEGNJRMCJLYYKPGDF'
b'UFQHGWRDGEWBXYOGEZ9IXRWJAQLKHPROWIEVI9ILNOXTPOSRLETMNEQ9P9WLXCUZNM'
b'GFK9EYHABBCSEZSGMNJZOEEGRVNU9ASSOOLCXXZKZPFWU9EEUUQRACVGZPL9MQINGL'
b'YPUTUPTLPKWPHRFFBRHZQWIVOXPGAKCQQPRKPPZUHOJISYASMRYMCMJZNR9D9OQANU'
b'XGJXSUSZQFWDJUTNCDKAUAFYKJNVAMBLTPPRPIJRRKQMCIHHGKPQPUQHWJNIEPDLRA'
b'YSJXVSJVKAGBAJCMGQSCZFTEJSG9LUWZGFBGQUHFUHWDHND9WJBPOQQXDEATOBGXDG'
b'M9BKSDCOEZ9IENZPPDUPMKCUKYBIBTBMJPJLDNSOPEKHVGQKLGUISUFSYMHR9I9LRP'
b'LCXJTDHHEXKQEVIFOUGKJEILQIHFG9FWOUBXRHCRHLOYAXTFQUWKJBSX9GNPCWXUQJ'
b'RHDBOBRZPQAPMKCIZGULPZDYLLBMAFJZXGIRVAAVUUCSNGDGJQJTAPV9QXYIABIHBX'
b'ILKQLGDGXQUVADQGDFKKDKMU9WKBEEY9TAVRYQDQFKPDMLMUAEGBHVJPSIZOEQGCSY'
b'NJCICXROXHPZFUXASQJXZEHQTEUKFIYQIGJWORKAIQUFROYGMIDFAJOUFAYYWMXUGJ'
b'FPSRTGEUWWLOXEUTKZCZQHWFUNHTMZVIJ9VYOLBTAIFB9EN9NFVAABVFIBIWXLJSUO'
b'YELOQSIPK99AXSXCPECWOXFUVDIANVO9PKZUESMFWIEVWLEHLCVKDXEROLNEMYRRCJ'
b'DPAYVTYAYSL9AFZH9GXHXZORXZEQTUJEDJGCYCQAENYZRKDJSK9TOCKKCXOSSTOAIO'
b'9UVAKQJBVOS9RUQIESCIJYRWYRUPMIJEHR9EGZ9YMHQXALUUDMCFYFOMLIGORMMBCD'
b'JMFCNNELGPXHICRNRKONBKACHLLSABUNHQ9TU9OSSTQXGWBLRRTSKZORXILALQYRXD'
b'DMXPPUTEGTVCHSOVYZEEJMRRECGBMXBORUTIQUNMJDXBSZSYYA9UOTFWMQOHURUFSU'
b'ESLMILBBKGHTTFTZONNQIMJSLILKAQJRDTNVK9PHAMNKZXRHSOPGKKLJBRDYAC9BRU'
b'RJWUIJLUWXNQOSVVLFEBROMJCGVYZWIPOYFQRBUUNJLIGPVDLADFLZJGZBLEBEQEUD'
b'UZOIFFZLZRXCPQVMIARFLZRIOFFEHVFJZXFQFLCJSEXRPUKGMWBMGXEHIEZKOKGH9J'
b'XAUXACEBLXKLZT9P9NJGXRZWZJAZCNKR9CHRRNCFOLBCSZXKXOIGZHZSTDKTHOWZTE'
b'XWOIZLPEGPKTBRENSCOYBQJSZQ9XPNRORQOWMPGBXKSSLDIUVEAJODEUZJKZE9MBVT'
b'QXWFXXXOG9QGDWMNZEWVWVDZWIFKSWZIDNBYEJP9VBKQNOJEXVPZQTHVUCSK9QCMEP'
b'US9Y9FQPWEACAEBIQSVPJEL9ZBSETINIYMSPIXLADSHTDYRAYUTMXDCABIUUETMNLU'
b'RELTPAGEDNMQZALFWOPAI9WUFOSUTOFUUWFAFRFVYOPITBVSG9IBVNJCOITYMTCCIJ'
b'IZWVPYGQE'
),
TryteString(
b'GWLDXDNSEIQCQJKVVFEWPWR99OKSHTVIJCNFEGSUM9DUQRO9ZJUWOOGP9XLABZFDXN'
b'GOXZLWETWXTTBT9KIGB9VOMMTKNJTUUFGJIYZIMHEAEJTNTIIOLLO9VWCYX9JA9RML'
b'SB9COUYKMRZQWJXMIFXCETZWRDXHBBOYLYLURXBELK9YLIXXGHSP9TNNASKDGFVJQV'
b'99CMXRM9VHASOBYBTWIMAJLBRUPZQLDCKOFAPHG9DKVVEFHTZAGNC9KH9K9HIFNLUI'
b'NQFTQTSALBNV9HRWXDGDEBBKIMQCDWVTMPDIVCXHGKDFPAKTSYYJIROENCJOZXVBNL'
b'UIUJHHAXZ9PTMNFGRRCNHQUVEESVSYNSIQXDRKKBMWJOQSMIK9FPHTNAJUYTQ9BLOG'
b'9GZPXHACSPIFCDX9LIVQDISFAVZWQUXP9BROHMGBHFTVWEWCZRPTAMTXXLVLZBT9BM'
b'OSJXAIGYUXICBUGQDOJRMYFWYGLT9UBTKGZZPNDIPNVIHQIBXFQACGYPWTKJSRHVQL'
b'VJAJWFGNFLAJYOADR9XNOAYOLHKEUGWSOCXYJVHWLRRBE9XYLQDYJXYMURFPXTMNHE'
b'EXJGVY9ADSJICXGWOUKYWVMXMWSJQVPKTUQUSCHTREWZNTXBDUJWDVTMXPABBHGYOC'
b'UNFIFTUQTRAVTCFAQNNAAXBCRILNVZGGKEKIUOXBVMXLFNCSHFMH9PYR9DBXFKWIBT'
b'AZLQTMVEGCZLWESPAHZLVTCGUUJXEAPCEYBXGGARHGDODWULDHHMMKEIYRFFEMQTTG'
b'SGWTOGBZYEULWWFINFHGYEDHHXAJASMQCLBKWYXSBIWZLMEZVXUWP999OROQYLOFVA'
b'ZGJIGHMTGJSZNGXFWMMUCGGQXB9ASA9UCVZLVYZG9XBIF9HUAB9HBYERWFJ9IEDMAY'
b'ZSIFDHOX9HRQSDEGWUAODHRNVBQWTBK9JFZBNKBATUXBZOIEHPTFPQXSBGHGOEVMUT'
b'RPSTRFOKHWEUPUSEROZEBSMTSTZSHFTQ9UXYTMDVLAPXSHZGYLPVDGTCGHOQSWJJED'
b'ARRUPCYFHJOSPVSTNEERBJOERGU9TTOW9GSVZEODZOEQZATYADJ9NURBJNTPBYECGG'
b'WP9SVOZAWXT9RLKBKL9TBBWXSDOSXRJHPKMLIPWKXSM9MPNQWEYLDPRLTUUNEFHXUF'
b'CLLJLZIRGUMEJCTIHC9VPS9YPENZPBYKTSBPXIPZHNYZYDPOYRIFEZWOFDYMZTUOMZ'
b'ZHLSLZMTDIMTTXMHHTDLIVRSIDSWJBCDCKEYZPTLLZP9IMNJSRXICEGTPZXVXAVIBG'
b'JMMOUNPUKXHIANUPGJANUHTG9ZPZCBFRMLHYOPFAKGRZSZJARBEEPQZ9TKJRQLXEG9'
b'IOHETGXCMKT9XZUBPMIQWXRRRFF9POXJBXW9NPUIOYNET9CTUWJB9RQDHVIAFLKILV'
b'BDLOYZAKIRHAUXE9ORNAPVXRTUY9CNXAPFPNUADXHDQWGRCVBZMUASLOPAYHLNGNUV'
b'VTDQCSOSTOOILZFXBXUPILJVVDUIRBWQUYNOJX99BTZNYQZGTENKGEKKADMPDWQB9I'
b'CWBWFHKAPRNDGGWOUXDTJKMASYOPYNYPTOEN9EDLXVVUMELPGG9ZLAJXQFTIEA9HRJ'
b'QCJLRUSLBGIWRWRXMTSAYVNHNJCYDSYNBPH9XEI9NFEDANKTZ9RWSCMPV9XVBTBZVD'
b'O9HABGD9VDOIXFMWBCHERKTDPDQFQSVNZLZRPHVZTFTL9LRAIMXLMTEZFAKK9CMYVP'
b'RTGBXGIMHUUVWCHDUUEZMZFMDSUQRVVPHZDUTOTLPSKQEHWNLOXKGGJKHHUNQIJXUT'
b'NYMZIL9UOEKECBSTCRVTVKUWETWPECLAXJWUNXXNRDBR99KJSWCHJBTMK9TSLLKWUC'
b'MMWNABUZLKLCJXHPUWVLIEIHYTZRPTZJTUMDDVEFCDRQYHPBF9WVMATUIQXGWTGAHQ'
b'STNRVZZIPBRPIUOZLXRGEWSUVDXIQPAONF9QPFYIMUEMDXOMFPKKJNGRBNMKXNJUF9'
b'IQIHPEBHSLWQWXJZNEBKCQUSRWOEGMWFZYGHFUUHDBBOBKSTXT9HGOORUQMFBFBICA'
b'HBQNOBVDCZVGZGASCINUGVEMM9LLPWTNWWVKWYIYDIJEKAVBEFPAVMFWEOYMTOHLZV'
b'PRMIINUJT'
),
])
with patch(
'cornode.transaction.ProposedBundle._create_signature_fragment_generator',
Mock(return_value=mock_signature_fragment_generator),
):
response = self.command(
seed = Seed(
b'TESTVALUEONE9DONTUSEINPRODUCTION99999C9V'
b'C9RHFCQAIGSFICL9HIY9ZEUATFVHFGAEUHSECGQAK'
),
transfers = [
ProposedTransaction(
value = 42,
address = Address(
b'TESTVALUETWO9DONTUSEINPRODUCTION99999XYY'
b'NXZLKBYNFPXA9RUGZVEGVPLLFJEM9ZZOUINE9ONOW'
),
),
],
inputs = [
Address(
trytes =
b'TESTVALUETHREE9DONTUSEINPRODUCTION99999N'
b'UMQE9RGHNRRSKKAOSD9WEYBHIUM9LWUWKEFSQOCVW',
#
# Normally, you would use an AddressGenerator to create
# new addresses, so ``key_index`` would be populated
# automatically.
#
# But, AddressGenerator runs a bit slowly, so to speed up
# test execution, we will use hard-coded values.
#
key_index = 4,
),
Address(
trytes =
b'TESTVALUEFOUR9DONTUSEINPRODUCTION99999WJ'
b'RBOSBIMNTGDYKUDYYFJFGZOHORYSQPCWJRKHIOVIY',
key_index = 5,
),
],
)
self.assertDictEqual(
response,
{
'trytes': [
# Ipnut #2, Part 2 of 2
TryteString(
b'GWLDXDNSEIQCQJKVVFEWPWR99OKSHTVIJCNFEGSUM9DUQRO9ZJUWOOGP9XLABZFD'
b'XNGOXZLWETWXTTBT9KIGB9VOMMTKNJTUUFGJIYZIMHEAEJTNTIIOLLO9VWCYX9JA'
b'9RMLSB9COUYKMRZQWJXMIFXCETZWRDXHBBOYLYLURXBELK9YLIXXGHSP9TNNASKD'
b'GFVJQV99CMXRM9VHASOBYBTWIMAJLBRUPZQLDCKOFAPHG9DKVVEFHTZAGNC9KH9K'
b'9HIFNLUINQFTQTSALBNV9HRWXDGDEBBKIMQCDWVTMPDIVCXHGKDFPAKTSYYJIROE'
b'NCJOZXVBNLUIUJHHAXZ9PTMNFGRRCNHQUVEESVSYNSIQXDRKKBMWJOQSMIK9FPHT'
b'NAJUYTQ9BLOG9GZPXHACSPIFCDX9LIVQDISFAVZWQUXP9BROHMGBHFTVWEWCZRPT'
b'AMTXXLVLZBT9BMOSJXAIGYUXICBUGQDOJRMYFWYGLT9UBTKGZZPNDIPNVIHQIBXF'
b'QACGYPWTKJSRHVQLVJAJWFGNFLAJYOADR9XNOAYOLHKEUGWSOCXYJVHWLRRBE9XY'
b'LQDYJXYMURFPXTMNHEEXJGVY9ADSJICXGWOUKYWVMXMWSJQVPKTUQUSCHTREWZNT'
b'XBDUJWDVTMXPABBHGYOCUNFIFTUQTRAVTCFAQNNAAXBCRILNVZGGKEKIUOXBVMXL'
b'FNCSHFMH9PYR9DBXFKWIBTAZLQTMVEGCZLWESPAHZLVTCGUUJXEAPCEYBXGGARHG'
b'DODWULDHHMMKEIYRFFEMQTTGSGWTOGBZYEULWWFINFHGYEDHHXAJASMQCLBKWYXS'
b'BIWZLMEZVXUWP999OROQYLOFVAZGJIGHMTGJSZNGXFWMMUCGGQXB9ASA9UCVZLVY'
b'ZG9XBIF9HUAB9HBYERWFJ9IEDMAYZSIFDHOX9HRQSDEGWUAODHRNVBQWTBK9JFZB'
b'NKBATUXBZOIEHPTFPQXSBGHGOEVMUTRPSTRFOKHWEUPUSEROZEBSMTSTZSHFTQ9U'
b'XYTMDVLAPXSHZGYLPVDGTCGHOQSWJJEDARRUPCYFHJOSPVSTNEERBJOERGU9TTOW'
b'9GSVZEODZOEQZATYADJ9NURBJNTPBYECGGWP9SVOZAWXT9RLKBKL9TBBWXSDOSXR'
b'JHPKMLIPWKXSM9MPNQWEYLDPRLTUUNEFHXUFCLLJLZIRGUMEJCTIHC9VPS9YPENZ'
b'PBYKTSBPXIPZHNYZYDPOYRIFEZWOFDYMZTUOMZZHLSLZMTDIMTTXMHHTDLIVRSID'
b'SWJBCDCKEYZPTLLZP9IMNJSRXICEGTPZXVXAVIBGJMMOUNPUKXHIANUPGJANUHTG'
b'9ZPZCBFRMLHYOPFAKGRZSZJARBEEPQZ9TKJRQLXEG9IOHETGXCMKT9XZUBPMIQWX'
b'RRRFF9POXJBXW9NPUIOYNET9CTUWJB9RQDHVIAFLKILVBDLOYZAKIRHAUXE9ORNA'
b'PVXRTUY9CNXAPFPNUADXHDQWGRCVBZMUASLOPAYHLNGNUVVTDQCSOSTOOILZFXBX'
b'UPILJVVDUIRBWQUYNOJX99BTZNYQZGTENKGEKKADMPDWQB9ICWBWFHKAPRNDGGWO'
b'UXDTJKMASYOPYNYPTOEN9EDLXVVUMELPGG9ZLAJXQFTIEA9HRJQCJLRUSLBGIWRW'
b'RXMTSAYVNHNJCYDSYNBPH9XEI9NFEDANKTZ9RWSCMPV9XVBTBZVDO9HABGD9VDOI'
b'XFMWBCHERKTDPDQFQSVNZLZRPHVZTFTL9LRAIMXLMTEZFAKK9CMYVPRTGBXGIMHU'
b'UVWCHDUUEZMZFMDSUQRVVPHZDUTOTLPSKQEHWNLOXKGGJKHHUNQIJXUTNYMZIL9U'
b'OEKECBSTCRVTVKUWETWPECLAXJWUNXXNRDBR99KJSWCHJBTMK9TSLLKWUCMMWNAB'
b'UZLKLCJXHPUWVLIEIHYTZRPTZJTUMDDVEFCDRQYHPBF9WVMATUIQXGWTGAHQSTNR'
b'VZZIPBRPIUOZLXRGEWSUVDXIQPAONF9QPFYIMUEMDXOMFPKKJNGRBNMKXNJUF9IQ'
b'IHPEBHSLWQWXJZNEBKCQUSRWOEGMWFZYGHFUUHDBBOBKSTXT9HGOORUQMFBFBICA'
b'HBQNOBVDCZVGZGASCINUGVEMM9LLPWTNWWVKWYIYDIJEKAVBEFPAVMFWEOYMTOHL'
b'ZVPRMIINUJTTESTVALUEFOUR9DONTUSEINPRODUCTION99999WJRBOSBIMNTGDYK'
b'UDYYFJFGZOHORYSQPCWJRKHIOVIY999999999999999999999999999999999999'
b'999999999999999999NYBKIVD99D99999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #2, Part 1 of 2
TryteString(
b'KJVG9EKTMPWE9PKWGGJJPDISCX9CJXGWUUPOLKKBVUWUYNBACOOF9LEQGNM9YYGN'
b'XJEMOBGSDCPLP9CQIFBCLENUCJGCCSWYU9WVFTRZZCPCZXEGMDDPSYDTYUIMVFSL'
b'GHTAZWJRHY9ZQMFGROIIUIQVDSOTUIRMSROYDPTWRURMZAILGBWIVADPYHPTSFGK'
b'APPMVTTHGLYXZHPFUO9HBAJIOUJOOABAQSOQUKLSVQGSDIHEHGTQSPM9EDHLEQSF'
b'FAAQXR9MUREVQ9MEGXNXMNJVWXYEIRYAOFHDFZAKNVWKTVIHKXWVT9VOZRUPUKXA'
b'SIFAZQVZSWHBQU9RGVLJMRVQCMCYSQEIMIAKOHNAKQLTIMEHMZMGAKCIPGHQTWOR'
b'BLVKISGPKIIMAMQWMZUNTKJSQZAZNYEGORGNRTKCLNRSOQJRBUCPSDLKLGGRBACI'
b'ULLZBFBUNQXACKL9WFEKKAHGLBBRNNEXZPPH9UZESFFKVBOPROFHQOKYAVTJDDVA'
b'UGUAURHLESIEIITDVVRCTRKOGUPERJHNJMXTLNVMWVDZITSPEHRYJKEZVTZSJEYT'
b'OQEGNJRMCJLYYKPGDFUFQHGWRDGEWBXYOGEZ9IXRWJAQLKHPROWIEVI9ILNOXTPO'
b'SRLETMNEQ9P9WLXCUZNMGFK9EYHABBCSEZSGMNJZOEEGRVNU9ASSOOLCXXZKZPFW'
b'U9EEUUQRACVGZPL9MQINGLYPUTUPTLPKWPHRFFBRHZQWIVOXPGAKCQQPRKPPZUHO'
b'JISYASMRYMCMJZNR9D9OQANUXGJXSUSZQFWDJUTNCDKAUAFYKJNVAMBLTPPRPIJR'
b'RKQMCIHHGKPQPUQHWJNIEPDLRAYSJXVSJVKAGBAJCMGQSCZFTEJSG9LUWZGFBGQU'
b'HFUHWDHND9WJBPOQQXDEATOBGXDGM9BKSDCOEZ9IENZPPDUPMKCUKYBIBTBMJPJL'
b'DNSOPEKHVGQKLGUISUFSYMHR9I9LRPLCXJTDHHEXKQEVIFOUGKJEILQIHFG9FWOU'
b'BXRHCRHLOYAXTFQUWKJBSX9GNPCWXUQJRHDBOBRZPQAPMKCIZGULPZDYLLBMAFJZ'
b'XGIRVAAVUUCSNGDGJQJTAPV9QXYIABIHBXILKQLGDGXQUVADQGDFKKDKMU9WKBEE'
b'Y9TAVRYQDQFKPDMLMUAEGBHVJPSIZOEQGCSYNJCICXROXHPZFUXASQJXZEHQTEUK'
b'FIYQIGJWORKAIQUFROYGMIDFAJOUFAYYWMXUGJFPSRTGEUWWLOXEUTKZCZQHWFUN'
b'HTMZVIJ9VYOLBTAIFB9EN9NFVAABVFIBIWXLJSUOYELOQSIPK99AXSXCPECWOXFU'
b'VDIANVO9PKZUESMFWIEVWLEHLCVKDXEROLNEMYRRCJDPAYVTYAYSL9AFZH9GXHXZ'
b'ORXZEQTUJEDJGCYCQAENYZRKDJSK9TOCKKCXOSSTOAIO9UVAKQJBVOS9RUQIESCI'
b'JYRWYRUPMIJEHR9EGZ9YMHQXALUUDMCFYFOMLIGORMMBCDJMFCNNELGPXHICRNRK'
b'ONBKACHLLSABUNHQ9TU9OSSTQXGWBLRRTSKZORXILALQYRXDDMXPPUTEGTVCHSOV'
b'YZEEJMRRECGBMXBORUTIQUNMJDXBSZSYYA9UOTFWMQOHURUFSUESLMILBBKGHTTF'
b'TZONNQIMJSLILKAQJRDTNVK9PHAMNKZXRHSOPGKKLJBRDYAC9BRURJWUIJLUWXNQ'
b'OSVVLFEBROMJCGVYZWIPOYFQRBUUNJLIGPVDLADFLZJGZBLEBEQEUDUZOIFFZLZR'
b'XCPQVMIARFLZRIOFFEHVFJZXFQFLCJSEXRPUKGMWBMGXEHIEZKOKGH9JXAUXACEB'
b'LXKLZT9P9NJGXRZWZJAZCNKR9CHRRNCFOLBCSZXKXOIGZHZSTDKTHOWZTEXWOIZL'
b'PEGPKTBRENSCOYBQJSZQ9XPNRORQOWMPGBXKSSLDIUVEAJODEUZJKZE9MBVTQXWF'
b'XXXOG9QGDWMNZEWVWVDZWIFKSWZIDNBYEJP9VBKQNOJEXVPZQTHVUCSK9QCMEPUS'
b'9Y9FQPWEACAEBIQSVPJEL9ZBSETINIYMSPIXLADSHTDYRAYUTMXDCABIUUETMNLU'
b'RELTPAGEDNMQZALFWOPAI9WUFOSUTOFUUWFAFRFVYOPITBVSG9IBVNJCOITYMTCC'
b'IJIZWVPYGQETESTVALUEFOUR9DONTUSEINPRODUCTION99999WJRBOSBIMNTGDYK'
b'UDYYFJFGZOHORYSQPCWJRKHIOVIYYZ9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99C99999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 2 of 2
TryteString(
b'SWHZKSNCOQXPCGRTYJPUGKLBNEJFXASKY9XAUROGDAO9QQLIVRZQDJDTPLNTBGUU'
b'FGELJPSGUMGPIUNCCTQEFU9UZIJJYJXCYWRADRHHXKBEDG9HTCHJHXUJRKMIUFOS'
b'KDGMI9QPCYQSWDCUYKQQEONJEKYWELG9MSNBHRHILGSSKMRCQJBOGNYBKEMTOFEU'
b'BEOUBD9ULP9PHWYKXEQNDMUR9BGDGPEUFRFRGNGFJFPYQXABSDALTYKL9SM9VVQC'
b'OHY9AS99EYWSHUNEQVVNLS9CNPEVMPOKMWQYFPGTNJBZCDWYPFWSBKZOYXNNVMPO'
b'DEHMHNIYZCHIEDEAB9TLFOWVHF99GVRWUZWSN9IQOKWIXERKRQETZS9ZJJSQRLLP'
b'QXEWNMFVVBWOIK9MBYCEGUJ9HJRIIMBVJNGXMGPGDLOLYWFVQNOKTFZRBJSSBJTE'
b'TGAIUGZOYQOFTVBKAQY9SSJWJXXYAUVUQWPXVCISFSDSHDQCPVMG9GVDAO9GIMDH'
b'ZWJOKSUEUFHBGSCZKNBTZWJXSFYNJSBSEL9UMZBAZRGYCEHOSJBMKMPMNXKEVTMU'
b'DEFWBIKOXUSBNPTNECGVLYSOGUDJDPHYFADXRAOLQXJSJDANINJEOMCFAWWITREG'
b'CDF9OZ9ZKHPJZJNMOVGX9OKQBSGVZYWKNOPVJEOZEI9BPE9GCUEQVAHSBBRBGQTE'
b'XVZCSL9ECOWPOWZCVSCBOUSNQMTJIEKHXL9NCPRMLRNKQEHYJCLRHGZKFNBJIPKS'
b'KPRFTSKFJULTBTXFDQHWUYOSDQBHPAINVEPKCCHJDTZOJIGJZOF9AEQDBKCZSZMI'
b'WUUVHVGAFKALGITVQQKBAHKCIFSVMVZ9UDQABVIANTBUQOFBIXQBWB9KKQOVJZNV'
b'BEDAZKUTRNKGJQWMTEKV9KGCIBRDCBAPKSTMCZGUV9HTAABQDKGQBCRFNXBMZRTH'
b'F9MO9GAGQDYDVLOFMDE9QQZYR9GDSBLUVVMKMCZIMDPNCVLGDKBACWQJRWOQNKBT'
b'SDJFKQMKTVKXVNAHRHZALJGVAMXWJYRAKTEJFXAHBQGSYWWQVECQYPXVFWILNFZK'
b'GGRIFCJBSIZRDJXRJHSURPCZKOWKLFRUMVENEGMNKUAOGVUECDSGAZNQZKBJDJPV'
b'BXLOTID9QLMFNGIWKAAIQTJJROSZBXPQRXAUCV99OGCEOTQCJ9II9ASZL9XGNSVU'
b'XVKPXYOJMF9PX9GSLEROR9FXVQ9MLEMEW9IWNWBNVAYXZ9ZETTDSMLGZAKHE9IUJ'
b'BFUHXW9KWCNZOZCCTFGBGWSDAQGGSPSQHOMUVJMLWBDAKYQZMWPQLLYAGUMOVMVL'
b'FD9TO9OUBTVUHHUNSFSATSEGBFVGDZUBMTWWFDPSQVCUFRVKHYYPDWRPNSKXRFTV'
b'EIBVZNGUZRQCPXVKBPKQDDLEBWIEBIPTEJIYFHBXCUVCCKTKEJAYRZCKAXLMELIN'
b'WUZHG9JFBSBAKHIXMWHUWUFHFNLXNO9GKINYKRTCNN99PHPHO9MJAGUYZAPNSPWU'
b'Z99E9BEADKETLOALWNANYMHSLLQSBS9YTYVJKTVWFUVS9MFOWCHLEUUFUWTYGLZX'
b'FDUXVABTVFXFPUEPIUEIAVSZSSZQJTHNGKBJXADRHVTIBERILMCCGWUITYQHGEEG'
b'WIZZOII9B9EVVVFJNEYEWH9ZVOJGHKVPYKDEZZSPBAOBQGGWPWXTCKSLSHJQYCDH'
b'AYIQ9QVSQFPXZDBYSJJKSNTRXORHLPVOYVMIGALRPXYWQWSJPPFTJCYXAATLBFNS'
b'GVXKFJXHYTILNOQZROUCDUTWOMGYBVTWPKJY9RVKKWQQMVCHJEUBELJDKJPGYLXU'
b'XATNOIJHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHVHE9EEB'
b'QYSNWECSPAJHGLTEUCXALBRVGXFENUCOONSUFZLHTLVQNPDZDIVDQHWVLDEDPFQL'
b'JZWF9GFZMPZXFVEQECLUZBBFVSAPEXJLKKOMXEPHZAKP9WYTGQOML9FQSBMSFLOG'
b'RLFQKUCUWFX9DNAOZSSKBUV9IBVIRNUWYBKJVKLJ9PPNLGJATKDCAGVFIVPXRABH'
b'ZVZACJIG9WOKKLFCRDSMTWSCYHOZEEXRIMPQBXVXQAYKZIADSM9GUBICGKGQYNHK'
b'VYOZFRVCHNMTESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVW999999999999999999999999999999999999'
b'999999999999999999NYBKIVD99B99999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 1 of 2
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWG'
b'HMNUXBJTOBKZFDNJEMAOPPLR9OOQJCDVO9XSCYQJQVTXQDYWQEBIXKDZAFWINAHJ'
b'ELJTDPVMUEWSVCJA9ONDYBNANWCGLBQMEMTBFDMWLCMQHGJLGYDQGIMLSNQHBGSV'
b'TDZSGNQAL9OHRAPDKYSVBTNYRUUBNEEAINJMOVOHOWXUAIEDAIQDESQFCKJELHAV'
b'ODSMXMKEHTDKCDIWWISXSAHQE9TJTLJZGXIABHU9CUACMLVSSYV9UJREPWFVYWWX'
b'PYYJRP9DOEKNDMBSBKKHIFMPXZXIJERXRZVBVDBYNZBBCCOSEDOLDGSNQK99HIYS'
b'WNYYEBLRT9MADLXLLZJOSZCFWAVZY9XUPNZUVOSKBMKXXJNRKDBOSGUGME9QNBMH'
b'IWXWXPEEUVQAQVUXDJGMJOBXG9VJBWPRQRCCQSNBEHTLGOKJVYEPQOJO9QIZLYAV'
b'LCKVXKEKRGBSZJAC9KTSSNMDQGKCLPZDJAQ9PBQMLUONVVFAWTMREGFXJMRRGL9M'
b'KNPOZGOYRPDCYEJCYJUN9HYNSNHXARMRJVXBUHOP9K9BIIEYGSHBUESKTAOQOEAN'
b'EAIHYHVGSVNPXWRBTJAMKMWEQOSYEWXLSRYVOSTMPOGYNPDNFLOICXVHYBDHSXVR'
b'KVWNVZOZQDOITZWICSYEW9RGCPPUJYVIYVTSZILYENYUYUGDSGWVYWRMZJNCTTPV'
b'WDWXAPVZQQKI9CGEQPBFPCLGDDEGBUUTISNCMJXQCTUNKQTLCATNOIRPMEUQBQTH'
b'HQYRGDLZEUZBALNQDXJYZBVXDPLVOVVAUCQSCGRTUJRBBNRV9ORETTGFIXBBBVOP'
b'FHPKGPKVBYFTZMWUVZYVWWSDKQVONMPLLQTV9IZUWLUWZNLCVJNPMG9CMXQG9D9W'
b'YCANBRMYV9DU9FMJT9JHT9RWCGLHFCODXJVFQBLTKJWVNVGSUHNWLHNPLZDSWDMD'
b'VQTLVCSVFJJTIQZFAPCXJWDAXWJKJVOKHALCQQTIXABPFXPUFK9IKXYUGMPXNSQC'
b'JDVETOVEX9LXYLXWRW9PFEYJCUJHLUB9NXTUGLIQMDGPDPSJTWDYEWXQAICLN9BT'
b'GNBJWLVAXZGNCYXGHBMRUVVYTJGH9XDGSZHQDYKFGMOWORSFDFBLJHBRTXRSEBAL'
b'CJIJTQJYDZZKWZGVAPFVKVEOXGYRLMBSPFHUIJZZFMFVOTLPUWSYZCWFZMAALHRG'
b'SYSXSMOHWARYZZVIAKXAHGY9SROWPVFACXXLQEXXOJCKXRRZHBZXJIBWQMMZTRDF'
b'YQBSBBZQQXGCAAECMQINHJRBSGOYPCGWKPWCHBKOJTIGDASZFGONTUGDSOOLEMGO'
b'EBFCZZJZSCGXPHXHB9WGMMFVUTCHDBSAMYTECQZWGCXAWTCTIBZHQVUAIBPZHBBT'
b'ZAERYU9XAMKBHCHGZISSPOWJIRZTAXDHMAYBPXOXWDIUDHNBTFJNVHHJO9AWAEC9'
b'UPRRFJLNGKTXJXFDGODDOPMGLALRIJBVIFLQTYQPKCKCRBYPBYGUUFJGJFVCOURN'
b'KCGNTQNNKHDDPIVZHCJSLDUYHVPAX9YJOFTTFSKFHTOOQQRCPYZKTDVCUZGBOBZK'
b'LVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VZZGUNUCKOYLYXOVHMGULWGSRCGX'
b'ZLJVNIMZBLFOJJKOTUREMBXYOZXDUP9ROUVYOSJBGGFZMIFTKHJHHJGZJNOYQWFZ'
b'AHLJWWDDFQQAMEGJUEUSIWOHKFJWRXRSJWYPGIGZGMFNAIDGDOUUQUVHJZQPJMLC'
b'GKGADXAXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVGXBBPBE'
b'BDVGZDBWMDMLPXYJBBRNOMKGR9TSVUXSRYXQTCTYLFQORMIGDKBJLNLCQXACVCBJ'
b'GVWRJNYPCKOAILPLMWBYKDLDXLIZMZFWDXUWDEGDUURQGMJNUGJXDXYJGKOTQBGC'
b'HATROPKEN9YTXDUOCMXPGHPDANTJFRRVEVBFVCNTWNMMOVAVKBNSJIWWBVHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHV'
b'ITGYROZZNQPTESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVWN99999999999999999999999999999999999'
b'999999999999999999NYBKIVD99A99999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Spend transaction, Part 1 of 1
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUETWO9DONTUSEINPRODUCTION99999XYYNXZLKBYNFPXA9'
b'RUGZVEGVPLLFJEM9ZZOUINE9ONOWOB9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99999999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
],
},
)
def test_pass_inputs_explicit_with_change(self):
"""
Preparing a bundle with specified inputs, change address needed.
"""
self.adapter.seed_response('getBalances', {
'balances': [86],
'duration': '1',
'milestone':
'TESTVALUE9DONTUSEINPRODUCTION99999ZNIUXU'
'FIVFBBYQHFYZYIEEWZL9VPMMKIIYTEZRRHXJXKIKF',
})
mock_signature_fragment_generator = MockSignatureFragmentGenerator([
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWGHM'
b'NUXBJTOBKZFDNJEZUKCKWGUHVSU9ZJYAVSQSOFDCOIEP9LCXYLTEFMCYUJAAHLYUHQ'
b'P99S9XRWHXHRPZCWHDMIDYW9OQAWUPTFMBTJGDCWRVNVRDPIWISVYNUDWUGBPNNFZD'
b'WRVZ9FGAVSEWFXRXGGLXJTPJTJLC9JYHMFBKYAUJRAMHQHKUUZHRWZIVC9KFEEXXVN'
b'EXJRYUSFV9PEPFUDCNRRTSCZXSTUEGJKDV9UCYNZSBRDYGOKFGYKWVFCYSWBUJYVGE'
b'UXWTDGPWTWURH9RKEZRFCUUBFBPKSFONMDXWGYKWAUWVUOQVBIGQMMKQVDYAZ9SVFI'
b'UUNMHOJGRQVXZGIIPKVNNBKABGKZLRNFK9KSIHTCGYPVCWYGDS9OIZWLNINYRLGJQC'
b'UBWYMAVDWFAURLALQPMRMFRAZCMCPOWM99SGBVEZPAFAXHXNENNWXLF9ZVHZIDWBLF'
b'KVWKBUYNBXOXTVPDWAGZXIOMDAEKNMRFGZVIGIFOSHGMPIPWNOWQDMHPKOJTYYECKN'
b'GCDDTJVALGPZSX9IH9LEGQSDACLBWKNXUW9BAZSHAISUJDTPJDOASLVRXFNJJHXQTK'
b'MKZUZIMJFPOKHEQXSCJQH9JPRNZHDVVZKWTHWWFNFMHFXPUIEEA9HPHJTCJJWZPUHK'
b'AAWJQQSAIF9HRETYYPXAZ9YCFJRCXTGCOLJQA9HDLFNTVDMYPRCYPQR9MNBBAMGOJX'
b'PRFCUSIIZN9VROZDPMOKZBCILKGB9EPCXOYWLPHFXTYBCMLRVHWNQDSQUIHHTAUTZC'
b'JFQ9CO9GTONKYKMDBSREZC9SUBHYK9JDOBYDBUBUIO9TRXQLAYHDDSXGJ9NB9FKMUU'
b'US9GANWVMQLIHX9MPJGLTAOMCZTQYDYVOWXHGHYCV9VDCXHGTCOOUEXIITVKHXCSUS'
b'OIRTMEAKMTYZCMAWURNX9JOVDICICKHXQYBXKWTXWXBZVZWRIDC9YCZVSKYIKJYYMF'
b'YQRTWBNJHWXRL9JFSZAXJYYTGDYLTHLWRMBUEG9QTGNRPVTBGRYFPEJQSIWTLPGV9C'
b'CMCO9TCKLKSJEAMFKQMXEYETISVEYDOSCRZ99RFDPUQPHMQ9NVRUBXITDGFZCYQNFC'
b'SULGRHPONWJDVWT9UELEKEPQEAFKDLDNYPABC9GUASVFJBFZF9Z9CHIUNLJWHKGDYK'
b'ADLUCRNEPAIWYSX9LT9QWQRKU9WEVDPKSTSA9PPEVNTBNLN9ZOPETINXGKA9DCOHPD'
b'QMMOOOCKYVEZJ9ZJQRJHNCKRFDRPHUVPGVGQYKZBLOILZTPIX9MIBKTXOJKVAYRLSX'
b'DTOEEKLF9WWZGLSGIOQZWCJJHSBTXYWRDYVEQTCNUENYWDRLZZIVTGCXEAJDRY9OVM'
b'XJGCSQSGYFLGYDZUH9EHUDQTCXLSDPMNDYQRZYRXYXKY9GIYOSIDQPXXHKJKDQLSCU'
b'Y9FFBTPSTJFEFROCEXFFYTFYHQROAVTYKQOCOQQWBN9RKJ9JJEURKTVOECYRITTYKN'
b'OGCD9OPQ9WDMKRPIUNRAVUSLFMC9WZWHSESGLDUYHVPAX9YJOFTTFSKFHTOOQQRCPY'
b'ZKTDVCUZGBOBZKLVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VDFKWVEWV9WGXPJH'
b'UBWYXGECBPQOPOHG9YCVXDWOXTEAOFBCEEAV9JCHUVLIRIMHXMUSZPOMMRBF9PLVLR'
b'JYTXTBANBZWFQWGNGFGXFOZ9YGMQSZFEJHLFZTTVHRLJPATA9TYCM9LSEWMNEUDNWQ'
b'FLUXOFUNVDKSNIIXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVG'
b'XBBPBEBDVGZDBWMDMLPXYJBBRNOMKGPMCG9FTSLMRADFVPUTTEIOUCBLPRYZHGOICN'
b'C9BT9WHJJJPDOSOMLD9EKRGKYUHUMMCAVHGYWOVQXFLTCXAAUDYKGKGKOYHLDCCQSK'
b'NHJHPSXTJVTW9QPFOQ9FDZIDDKIVF9CDYGU9ABRESMDLIBONAQWFVGCNOTEDHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHVIT'
b'GYROZZNQP'
),
TryteString(
b'ZOJNUMZOBEHLYDSDAVZKXHF9MAHAJICBMJTZZHTQTCACVQAUSSCFUMGCSJTONNKXFI'
b'NPOAXQIKSJ9GUV9GXM9KYDCDWUHULIJMSKMOLDZBYE9FTGFMKLODKHFF9YUCPTYFFM'
b'9EDCJDCKRFLZUHGGYNYFJLBFWXCIUF9HMGUQKPUCJ9OQ99FXHSUSRRBEUSSCKCYPIE'
b'AFZJQNXEUYWLEXKZWLRINBEGAZTJMYTUEQTTORMIIQASISHSHZDQJXANFLKOIRUEJU'
b'PZZHUJFWHEXFIZ9OU99SQLDDNLARDFPGYSCMXQCMGPRB9QLM99QUBLTLTKWYXHVAFU'
b'VVAMHEYCCNVEITSPVQWMSEIZJSLPWNGWISKWQNXCNRNOIGRGUHGYWLOFNXBDCT9JLA'
b'9CEKW9BFGOESKGOQLJBTLUMOICBEZDHCR9SZCJUZVXIEAVITFJFDGNJII9LSW9IQKV'
b'99UJWWAACGIRPCZUENXGILUXCMJIGW9REUNA99MWSANWL9KVKKXCKXLRGDT9NXIGQV'
b'ZWG9NBQPOQKEEET9ZUSENFPGFDNNHGBITCPASGHOPBNYKKEHKHVATNVWX9ZGTISUKP'
b'KTMWMPCGVVJSGMRJWNFICSFUAVAHIZWA9PDOIXFJGWCPTZHUDDUFJVQPBYNJREQ99U'
b'HOESTT9FELDMVK9VHZYPRVOWEW9NXTCYDCIMT9UIWGXUFYILOPOCJFVVEJEJN9ULGX'
b'IABFJWWRKAD9NHZBULMWUKESZLCPRQVVKWOHEWSTLOFNA9KNERURWJPROBBXEWICDK'
b'KCQXWYMJUCQLWEUPFXRSNMIJWQUEJUNIKDYJILXCGCLFETWOZYIUZVJVYVB9YGXSSD'
b'XYXSJXTOQZ9CCCAKMCNNKQCYEDGSGTBICCOGEHRIVMICUQPUUFRFCBF9NUUWSQBTVI'
b'YFVWAASTQJZFDDWWUUIHPKTIIVAGGIEQCZUEVOFDMQLDESMQDPQUSOOKZJ9QLXTAFP'
b'XXILFHFUIFJTKSEHXXZBPTZUGLYUZNORFOEKQDEIWGXZPBXSOGGQFILUJTKDLWVKPV'
b'ISU9QOATYVKJHLDLOKROZNFAGS9CICXXIUQQVLLRPPPDYJVSCW9OWIHKADCVSKPWTE'
b'NYEWQWEHP9DDWOUJDWSTSOGYQPALFMKCTUGLSXHNYETTMYTS999SYQVQSPHQPKRJSU'
b'Y9QTABAJOJAAMGVBCSLAAOBXZOJZLIFXUYOVXBKHPFVTKKGSIHUXMBDTMGNVL9NXYC'
b'HOVTLGDICIWTCIGNRHLBZBVSXMPBFAWIXPCDJWNDUFHUVLBSPBWICZNYIUJPRRTOCS'
b'SCVPNBXEDCMHKFVDMHJTSP9JI9BXTD9ZILEEOCBMHCQRRDNL9EUKJGJ9MPQGQU9ZFY'
b'GVSNOYAEC9NWTCVEJBSXLYWTUPMXNAAWXSBIAJYSGYHGLYOMAHFTYMICZRDZTQXHAQ'
b'GVXENKIGW9XZTPBAIMZLHWAJCGY9ZDNQOTGDRCTXSJCEJVTTMVRYYKWAFYSV9WVEVC'
b'FAXJKJNUC9NQHPEXWIOHOJQEXJNLEW9GLO9AJCJXIEXDONOGKXFJ9OXXXETUEHLBXA'
b'JGFPHKAQDCRTKQBXPZYQZBQODTVIBUTSAEXMBFBMTAXOQZCOHWEWRJEKNKHZXXSO9U'
b'SZRWUPZAASWDBXOVAEGSAGYDIOZWSSEAIQVRWFDSOXSRRRQHRCWDJWZXXJOGPZRLKQ'
b'OA9DOY9RXZNWBFJTKUOVRRQNSDUOFGCUQNHOBMJSFQZXVBPHHBRRIXZNLXAH9P9EFM'
b'GRPGSCFRZINEPOQPXPKHTSRJWARXRGJGYMTPUKQISLV9GUC9VTJLOISKGUZCTZEYND'
b'TURLBPXGNQLVXHAHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHV'
b'HE9EEBQYSNWECSPAJHGLTEUCXALBRVTKMWSWCBPUMZFVSEEFIHBAGJVVQV9QLFEGGY'
b'VPNSDOBZEQGLEFLCQVPDJA9MQDRHYNVZVNTYNJ9GJCXKED9NEWTD9RVMNA9HOHUBLL'
b'ASNQSDLDZKOMFOEGBJZPYVYZCVHYFEGSVEHSWV9WAGMEQIUDZQZUACWYQLTD9LHBVK'
b'KNXXXDWQUWRJKTCDP9CEJOHLLPTWCIKKHHIFAFFDVMFZR9A9LYVMTQAPAXAVPJOZKW'
b'FQNAJTO99'
),
])
with patch(
'cornode.transaction.ProposedBundle._create_signature_fragment_generator',
Mock(return_value=mock_signature_fragment_generator),
):
response = self.command(
seed = Seed(
b'TESTVALUEONE9DONTUSEINPRODUCTION99999C9V'
b'C9RHFCQAIGSFICL9HIY9ZEUATFVHFGAEUHSECGQAK'
),
transfers = [
ProposedTransaction(
value = 42,
address = Address(
b'TESTVALUETWO9DONTUSEINPRODUCTION99999XYY'
b'NXZLKBYNFPXA9RUGZVEGVPLLFJEM9ZZOUINE9ONOW'
),
),
],
inputs = [
Address(
trytes =
b'TESTVALUETHREE9DONTUSEINPRODUCTION99999N'
b'UMQE9RGHNRRSKKAOSD9WEYBHIUM9LWUWKEFSQOCVW',
key_index = 4,
),
],
changeAddress =
Address(
b'TESTVALUEFOUR9DONTUSEINPRODUCTION99999WJ'
b'RBOSBIMNTGDYKUDYYFJFGZOHORYSQPCWJRKHIOVIY',
),
)
self.assertDictEqual(
response,
{
'trytes': [
# Change transaction, Part 1 of 1
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUEFOUR9DONTUSEINPRODUCTION99999WJRBOSBIMNTGDYK'
b'UDYYFJFGZOHORYSQPCWJRKHIOVIYQB9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99C99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 2 of 2
TryteString(
b'ZOJNUMZOBEHLYDSDAVZKXHF9MAHAJICBMJTZZHTQTCACVQAUSSCFUMGCSJTONNKX'
b'FINPOAXQIKSJ9GUV9GXM9KYDCDWUHULIJMSKMOLDZBYE9FTGFMKLODKHFF9YUCPT'
b'YFFM9EDCJDCKRFLZUHGGYNYFJLBFWXCIUF9HMGUQKPUCJ9OQ99FXHSUSRRBEUSSC'
b'KCYPIEAFZJQNXEUYWLEXKZWLRINBEGAZTJMYTUEQTTORMIIQASISHSHZDQJXANFL'
b'KOIRUEJUPZZHUJFWHEXFIZ9OU99SQLDDNLARDFPGYSCMXQCMGPRB9QLM99QUBLTL'
b'TKWYXHVAFUVVAMHEYCCNVEITSPVQWMSEIZJSLPWNGWISKWQNXCNRNOIGRGUHGYWL'
b'OFNXBDCT9JLA9CEKW9BFGOESKGOQLJBTLUMOICBEZDHCR9SZCJUZVXIEAVITFJFD'
b'GNJII9LSW9IQKV99UJWWAACGIRPCZUENXGILUXCMJIGW9REUNA99MWSANWL9KVKK'
b'XCKXLRGDT9NXIGQVZWG9NBQPOQKEEET9ZUSENFPGFDNNHGBITCPASGHOPBNYKKEH'
b'KHVATNVWX9ZGTISUKPKTMWMPCGVVJSGMRJWNFICSFUAVAHIZWA9PDOIXFJGWCPTZ'
b'HUDDUFJVQPBYNJREQ99UHOESTT9FELDMVK9VHZYPRVOWEW9NXTCYDCIMT9UIWGXU'
b'FYILOPOCJFVVEJEJN9ULGXIABFJWWRKAD9NHZBULMWUKESZLCPRQVVKWOHEWSTLO'
b'FNA9KNERURWJPROBBXEWICDKKCQXWYMJUCQLWEUPFXRSNMIJWQUEJUNIKDYJILXC'
b'GCLFETWOZYIUZVJVYVB9YGXSSDXYXSJXTOQZ9CCCAKMCNNKQCYEDGSGTBICCOGEH'
b'RIVMICUQPUUFRFCBF9NUUWSQBTVIYFVWAASTQJZFDDWWUUIHPKTIIVAGGIEQCZUE'
b'VOFDMQLDESMQDPQUSOOKZJ9QLXTAFPXXILFHFUIFJTKSEHXXZBPTZUGLYUZNORFO'
b'EKQDEIWGXZPBXSOGGQFILUJTKDLWVKPVISU9QOATYVKJHLDLOKROZNFAGS9CICXX'
b'IUQQVLLRPPPDYJVSCW9OWIHKADCVSKPWTENYEWQWEHP9DDWOUJDWSTSOGYQPALFM'
b'KCTUGLSXHNYETTMYTS999SYQVQSPHQPKRJSUY9QTABAJOJAAMGVBCSLAAOBXZOJZ'
b'LIFXUYOVXBKHPFVTKKGSIHUXMBDTMGNVL9NXYCHOVTLGDICIWTCIGNRHLBZBVSXM'
b'PBFAWIXPCDJWNDUFHUVLBSPBWICZNYIUJPRRTOCSSCVPNBXEDCMHKFVDMHJTSP9J'
b'I9BXTD9ZILEEOCBMHCQRRDNL9EUKJGJ9MPQGQU9ZFYGVSNOYAEC9NWTCVEJBSXLY'
b'WTUPMXNAAWXSBIAJYSGYHGLYOMAHFTYMICZRDZTQXHAQGVXENKIGW9XZTPBAIMZL'
b'HWAJCGY9ZDNQOTGDRCTXSJCEJVTTMVRYYKWAFYSV9WVEVCFAXJKJNUC9NQHPEXWI'
b'OHOJQEXJNLEW9GLO9AJCJXIEXDONOGKXFJ9OXXXETUEHLBXAJGFPHKAQDCRTKQBX'
b'PZYQZBQODTVIBUTSAEXMBFBMTAXOQZCOHWEWRJEKNKHZXXSO9USZRWUPZAASWDBX'
b'OVAEGSAGYDIOZWSSEAIQVRWFDSOXSRRRQHRCWDJWZXXJOGPZRLKQOA9DOY9RXZNW'
b'BFJTKUOVRRQNSDUOFGCUQNHOBMJSFQZXVBPHHBRRIXZNLXAH9P9EFMGRPGSCFRZI'
b'NEPOQPXPKHTSRJWARXRGJGYMTPUKQISLV9GUC9VTJLOISKGUZCTZEYNDTURLBPXG'
b'NQLVXHAHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHVHE9EEB'
b'QYSNWECSPAJHGLTEUCXALBRVTKMWSWCBPUMZFVSEEFIHBAGJVVQV9QLFEGGYVPNS'
b'DOBZEQGLEFLCQVPDJA9MQDRHYNVZVNTYNJ9GJCXKED9NEWTD9RVMNA9HOHUBLLAS'
b'NQSDLDZKOMFOEGBJZPYVYZCVHYFEGSVEHSWV9WAGMEQIUDZQZUACWYQLTD9LHBVK'
b'KNXXXDWQUWRJKTCDP9CEJOHLLPTWCIKKHHIFAFFDVMFZR9A9LYVMTQAPAXAVPJOZ'
b'KWFQNAJTO99TESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVW999999999999999999999999999999999999'
b'999999999999999999NYBKIVD99B99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 1 of 2
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWG'
b'HMNUXBJTOBKZFDNJEZUKCKWGUHVSU9ZJYAVSQSOFDCOIEP9LCXYLTEFMCYUJAAHL'
b'YUHQP99S9XRWHXHRPZCWHDMIDYW9OQAWUPTFMBTJGDCWRVNVRDPIWISVYNUDWUGB'
b'PNNFZDWRVZ9FGAVSEWFXRXGGLXJTPJTJLC9JYHMFBKYAUJRAMHQHKUUZHRWZIVC9'
b'KFEEXXVNEXJRYUSFV9PEPFUDCNRRTSCZXSTUEGJKDV9UCYNZSBRDYGOKFGYKWVFC'
b'YSWBUJYVGEUXWTDGPWTWURH9RKEZRFCUUBFBPKSFONMDXWGYKWAUWVUOQVBIGQMM'
b'KQVDYAZ9SVFIUUNMHOJGRQVXZGIIPKVNNBKABGKZLRNFK9KSIHTCGYPVCWYGDS9O'
b'IZWLNINYRLGJQCUBWYMAVDWFAURLALQPMRMFRAZCMCPOWM99SGBVEZPAFAXHXNEN'
b'NWXLF9ZVHZIDWBLFKVWKBUYNBXOXTVPDWAGZXIOMDAEKNMRFGZVIGIFOSHGMPIPW'
b'NOWQDMHPKOJTYYECKNGCDDTJVALGPZSX9IH9LEGQSDACLBWKNXUW9BAZSHAISUJD'
b'TPJDOASLVRXFNJJHXQTKMKZUZIMJFPOKHEQXSCJQH9JPRNZHDVVZKWTHWWFNFMHF'
b'XPUIEEA9HPHJTCJJWZPUHKAAWJQQSAIF9HRETYYPXAZ9YCFJRCXTGCOLJQA9HDLF'
b'NTVDMYPRCYPQR9MNBBAMGOJXPRFCUSIIZN9VROZDPMOKZBCILKGB9EPCXOYWLPHF'
b'XTYBCMLRVHWNQDSQUIHHTAUTZCJFQ9CO9GTONKYKMDBSREZC9SUBHYK9JDOBYDBU'
b'BUIO9TRXQLAYHDDSXGJ9NB9FKMUUUS9GANWVMQLIHX9MPJGLTAOMCZTQYDYVOWXH'
b'GHYCV9VDCXHGTCOOUEXIITVKHXCSUSOIRTMEAKMTYZCMAWURNX9JOVDICICKHXQY'
b'BXKWTXWXBZVZWRIDC9YCZVSKYIKJYYMFYQRTWBNJHWXRL9JFSZAXJYYTGDYLTHLW'
b'RMBUEG9QTGNRPVTBGRYFPEJQSIWTLPGV9CCMCO9TCKLKSJEAMFKQMXEYETISVEYD'
b'OSCRZ99RFDPUQPHMQ9NVRUBXITDGFZCYQNFCSULGRHPONWJDVWT9UELEKEPQEAFK'
b'DLDNYPABC9GUASVFJBFZF9Z9CHIUNLJWHKGDYKADLUCRNEPAIWYSX9LT9QWQRKU9'
b'WEVDPKSTSA9PPEVNTBNLN9ZOPETINXGKA9DCOHPDQMMOOOCKYVEZJ9ZJQRJHNCKR'
b'FDRPHUVPGVGQYKZBLOILZTPIX9MIBKTXOJKVAYRLSXDTOEEKLF9WWZGLSGIOQZWC'
b'JJHSBTXYWRDYVEQTCNUENYWDRLZZIVTGCXEAJDRY9OVMXJGCSQSGYFLGYDZUH9EH'
b'UDQTCXLSDPMNDYQRZYRXYXKY9GIYOSIDQPXXHKJKDQLSCUY9FFBTPSTJFEFROCEX'
b'FFYTFYHQROAVTYKQOCOQQWBN9RKJ9JJEURKTVOECYRITTYKNOGCD9OPQ9WDMKRPI'
b'UNRAVUSLFMC9WZWHSESGLDUYHVPAX9YJOFTTFSKFHTOOQQRCPYZKTDVCUZGBOBZK'
b'LVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VDFKWVEWV9WGXPJHUBWYXGECBPQO'
b'POHG9YCVXDWOXTEAOFBCEEAV9JCHUVLIRIMHXMUSZPOMMRBF9PLVLRJYTXTBANBZ'
b'WFQWGNGFGXFOZ9YGMQSZFEJHLFZTTVHRLJPATA9TYCM9LSEWMNEUDNWQFLUXOFUN'
b'VDKSNIIXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVGXBBPBE'
b'BDVGZDBWMDMLPXYJBBRNOMKGPMCG9FTSLMRADFVPUTTEIOUCBLPRYZHGOICNC9BT'
b'9WHJJJPDOSOMLD9EKRGKYUHUMMCAVHGYWOVQXFLTCXAAUDYKGKGKOYHLDCCQSKNH'
b'JHPSXTJVTW9QPFOQ9FDZIDDKIVF9CDYGU9ABRESMDLIBONAQWFVGCNOTEDHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHV'
b'ITGYROZZNQPTESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVWVX9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99A99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Spend transaction, Part 1 of 1
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUETWO9DONTUSEINPRODUCTION99999XYYNXZLKBYNFPXA9'
b'RUGZVEGVPLLFJEM9ZZOUINE9ONOWOB9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99999999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
],
},
)
def test_fail_inputs_explicit_insufficient(self):
"""
Specified inputs are not sufficient to cover spend amount.
"""
self.adapter.seed_response('getBalances', {
'balances': [30],
'duration': '1',
'milestone':
'TESTVALUE9DONTUSEINPRODUCTION99999ZNIUXU'
'FIVFBBYQHFYZYIEEWZL9VPMMKIIYTEZRRHXJXKIKF',
})
with self.assertRaises(BadApiResponse):
self.command(
seed = Seed(
b'TESTVALUEONE9DONTUSEINPRODUCTION99999C9V'
b'C9RHFCQAIGSFICL9HIY9ZEUATFVHFGAEUHSECGQAK'
),
transfers = [
ProposedTransaction(
value = 42,
address = Address(
b'TESTVALUETWO9DONTUSEINPRODUCTION99999XYY'
b'NXZLKBYNFPXA9RUGZVEGVPLLFJEM9ZZOUINE9ONOW'
),
),
],
inputs = [
Address(
trytes =
b'TESTVALUETHREE9DONTUSEINPRODUCTION99999N'
b'UMQE9RGHNRRSKKAOSD9WEYBHIUM9LWUWKEFSQOCVW',
key_index = 4,
),
],
)
def test_pass_inputs_implicit_no_change(self):
"""
Preparing a bundle that finds inputs to use automatically, no
change address needed.
"""
# To keep the unit test focused, we will mock the ``getInputs``
# command that ``prepareTransfer`` calls internally.
#
# References:
# - :py:class:`cornode.commands.extended.prepare_transfer.PrepareTransferCommand`
# - :py:class:`cornode.commands.extended.get_inputs.GetInputsCommand`
mock_get_inputs = Mock(return_value={
'inputs': [
Address(
trytes =
b'TESTVALUETHREE9DONTUSEINPRODUCTION99999N'
b'UMQE9RGHNRRSKKAOSD9WEYBHIUM9LWUWKEFSQOCVW',
balance = 13,
key_index = 4,
),
Address(
trytes =
b'TESTVALUEFOUR9DONTUSEINPRODUCTION99999WJ'
b'RBOSBIMNTGDYKUDYYFJFGZOHORYSQPCWJRKHIOVIY',
balance = 29,
key_index = 5,
),
],
'totalBalance': 42,
})
mock_signature_fragment_generator = MockSignatureFragmentGenerator([
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWGHM'
b'NUXBJTOBKZFDNJEMAOPPLR9OOQJCDVO9XSCYQJQVTXQDYWQEBIXKDZAFWINAHJELJT'
b'DPVMUEWSVCJA9ONDYBNANWCGLBQMEMTBFDMWLCMQHGJLGYDQGIMLSNQHBGSVTDZSGN'
b'QAL9OHRAPDKYSVBTNYRUUBNEEAINJMOVOHOWXUAIEDAIQDESQFCKJELHAVODSMXMKE'
b'HTDKCDIWWISXSAHQE9TJTLJZGXIABHU9CUACMLVSSYV9UJREPWFVYWWXPYYJRP9DOE'
b'KNDMBSBKKHIFMPXZXIJERXRZVBVDBYNZBBCCOSEDOLDGSNQK99HIYSWNYYEBLRT9MA'
b'DLXLLZJOSZCFWAVZY9XUPNZUVOSKBMKXXJNRKDBOSGUGME9QNBMHIWXWXPEEUVQAQV'
b'UXDJGMJOBXG9VJBWPRQRCCQSNBEHTLGOKJVYEPQOJO9QIZLYAVLCKVXKEKRGBSZJAC'
b'9KTSSNMDQGKCLPZDJAQ9PBQMLUONVVFAWTMREGFXJMRRGL9MKNPOZGOYRPDCYEJCYJ'
b'UN9HYNSNHXARMRJVXBUHOP9K9BIIEYGSHBUESKTAOQOEANEAIHYHVGSVNPXWRBTJAM'
b'KMWEQOSYEWXLSRYVOSTMPOGYNPDNFLOICXVHYBDHSXVRKVWNVZOZQDOITZWICSYEW9'
b'RGCPPUJYVIYVTSZILYENYUYUGDSGWVYWRMZJNCTTPVWDWXAPVZQQKI9CGEQPBFPCLG'
b'DDEGBUUTISNCMJXQCTUNKQTLCATNOIRPMEUQBQTHHQYRGDLZEUZBALNQDXJYZBVXDP'
b'LVOVVAUCQSCGRTUJRBBNRV9ORETTGFIXBBBVOPFHPKGPKVBYFTZMWUVZYVWWSDKQVO'
b'NMPLLQTV9IZUWLUWZNLCVJNPMG9CMXQG9D9WYCANBRMYV9DU9FMJT9JHT9RWCGLHFC'
b'ODXJVFQBLTKJWVNVGSUHNWLHNPLZDSWDMDVQTLVCSVFJJTIQZFAPCXJWDAXWJKJVOK'
b'HALCQQTIXABPFXPUFK9IKXYUGMPXNSQCJDVETOVEX9LXYLXWRW9PFEYJCUJHLUB9NX'
b'TUGLIQMDGPDPSJTWDYEWXQAICLN9BTGNBJWLVAXZGNCYXGHBMRUVVYTJGH9XDGSZHQ'
b'DYKFGMOWORSFDFBLJHBRTXRSEBALCJIJTQJYDZZKWZGVAPFVKVEOXGYRLMBSPFHUIJ'
b'ZZFMFVOTLPUWSYZCWFZMAALHRGSYSXSMOHWARYZZVIAKXAHGY9SROWPVFACXXLQEXX'
b'OJCKXRRZHBZXJIBWQMMZTRDFYQBSBBZQQXGCAAECMQINHJRBSGOYPCGWKPWCHBKOJT'
b'IGDASZFGONTUGDSOOLEMGOEBFCZZJZSCGXPHXHB9WGMMFVUTCHDBSAMYTECQZWGCXA'
b'WTCTIBZHQVUAIBPZHBBTZAERYU9XAMKBHCHGZISSPOWJIRZTAXDHMAYBPXOXWDIUDH'
b'NBTFJNVHHJO9AWAEC9UPRRFJLNGKTXJXFDGODDOPMGLALRIJBVIFLQTYQPKCKCRBYP'
b'BYGUUFJGJFVCOURNKCGNTQNNKHDDPIVZHCJSLDUYHVPAX9YJOFTTFSKFHTOOQQRCPY'
b'ZKTDVCUZGBOBZKLVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VZZGUNUCKOYLYXOV'
b'HMGULWGSRCGXZLJVNIMZBLFOJJKOTUREMBXYOZXDUP9ROUVYOSJBGGFZMIFTKHJHHJ'
b'GZJNOYQWFZAHLJWWDDFQQAMEGJUEUSIWOHKFJWRXRSJWYPGIGZGMFNAIDGDOUUQUVH'
b'JZQPJMLCGKGADXAXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVG'
b'XBBPBEBDVGZDBWMDMLPXYJBBRNOMKGR9TSVUXSRYXQTCTYLFQORMIGDKBJLNLCQXAC'
b'VCBJGVWRJNYPCKOAILPLMWBYKDLDXLIZMZFWDXUWDEGDUURQGMJNUGJXDXYJGKOTQB'
b'GCHATROPKEN9YTXDUOCMXPGHPDANTJFRRVEVBFVCNTWNMMOVAVKBNSJIWWBVHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHVIT'
b'GYROZZNQP'
),
TryteString(
b'SWHZKSNCOQXPCGRTYJPUGKLBNEJFXASKY9XAUROGDAO9QQLIVRZQDJDTPLNTBGUUFG'
b'ELJPSGUMGPIUNCCTQEFU9UZIJJYJXCYWRADRHHXKBEDG9HTCHJHXUJRKMIUFOSKDGM'
b'I9QPCYQSWDCUYKQQEONJEKYWELG9MSNBHRHILGSSKMRCQJBOGNYBKEMTOFEUBEOUBD'
b'9ULP9PHWYKXEQNDMUR9BGDGPEUFRFRGNGFJFPYQXABSDALTYKL9SM9VVQCOHY9AS99'
b'EYWSHUNEQVVNLS9CNPEVMPOKMWQYFPGTNJBZCDWYPFWSBKZOYXNNVMPODEHMHNIYZC'
b'HIEDEAB9TLFOWVHF99GVRWUZWSN9IQOKWIXERKRQETZS9ZJJSQRLLPQXEWNMFVVBWO'
b'IK9MBYCEGUJ9HJRIIMBVJNGXMGPGDLOLYWFVQNOKTFZRBJSSBJTETGAIUGZOYQOFTV'
b'BKAQY9SSJWJXXYAUVUQWPXVCISFSDSHDQCPVMG9GVDAO9GIMDHZWJOKSUEUFHBGSCZ'
b'KNBTZWJXSFYNJSBSEL9UMZBAZRGYCEHOSJBMKMPMNXKEVTMUDEFWBIKOXUSBNPTNEC'
b'GVLYSOGUDJDPHYFADXRAOLQXJSJDANINJEOMCFAWWITREGCDF9OZ9ZKHPJZJNMOVGX'
b'9OKQBSGVZYWKNOPVJEOZEI9BPE9GCUEQVAHSBBRBGQTEXVZCSL9ECOWPOWZCVSCBOU'
b'SNQMTJIEKHXL9NCPRMLRNKQEHYJCLRHGZKFNBJIPKSKPRFTSKFJULTBTXFDQHWUYOS'
b'DQBHPAINVEPKCCHJDTZOJIGJZOF9AEQDBKCZSZMIWUUVHVGAFKALGITVQQKBAHKCIF'
b'SVMVZ9UDQABVIANTBUQOFBIXQBWB9KKQOVJZNVBEDAZKUTRNKGJQWMTEKV9KGCIBRD'
b'CBAPKSTMCZGUV9HTAABQDKGQBCRFNXBMZRTHF9MO9GAGQDYDVLOFMDE9QQZYR9GDSB'
b'LUVVMKMCZIMDPNCVLGDKBACWQJRWOQNKBTSDJFKQMKTVKXVNAHRHZALJGVAMXWJYRA'
b'KTEJFXAHBQGSYWWQVECQYPXVFWILNFZKGGRIFCJBSIZRDJXRJHSURPCZKOWKLFRUMV'
b'ENEGMNKUAOGVUECDSGAZNQZKBJDJPVBXLOTID9QLMFNGIWKAAIQTJJROSZBXPQRXAU'
b'CV99OGCEOTQCJ9II9ASZL9XGNSVUXVKPXYOJMF9PX9GSLEROR9FXVQ9MLEMEW9IWNW'
b'BNVAYXZ9ZETTDSMLGZAKHE9IUJBFUHXW9KWCNZOZCCTFGBGWSDAQGGSPSQHOMUVJML'
b'WBDAKYQZMWPQLLYAGUMOVMVLFD9TO9OUBTVUHHUNSFSATSEGBFVGDZUBMTWWFDPSQV'
b'CUFRVKHYYPDWRPNSKXRFTVEIBVZNGUZRQCPXVKBPKQDDLEBWIEBIPTEJIYFHBXCUVC'
b'CKTKEJAYRZCKAXLMELINWUZHG9JFBSBAKHIXMWHUWUFHFNLXNO9GKINYKRTCNN99PH'
b'PHO9MJAGUYZAPNSPWUZ99E9BEADKETLOALWNANYMHSLLQSBS9YTYVJKTVWFUVS9MFO'
b'WCHLEUUFUWTYGLZXFDUXVABTVFXFPUEPIUEIAVSZSSZQJTHNGKBJXADRHVTIBERILM'
b'CCGWUITYQHGEEGWIZZOII9B9EVVVFJNEYEWH9ZVOJGHKVPYKDEZZSPBAOBQGGWPWXT'
b'CKSLSHJQYCDHAYIQ9QVSQFPXZDBYSJJKSNTRXORHLPVOYVMIGALRPXYWQWSJPPFTJC'
b'YXAATLBFNSGVXKFJXHYTILNOQZROUCDUTWOMGYBVTWPKJY9RVKKWQQMVCHJEUBELJD'
b'KJPGYLXUXATNOIJHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHV'
b'HE9EEBQYSNWECSPAJHGLTEUCXALBRVGXFENUCOONSUFZLHTLVQNPDZDIVDQHWVLDED'
b'PFQLJZWF9GFZMPZXFVEQECLUZBBFVSAPEXJLKKOMXEPHZAKP9WYTGQOML9FQSBMSFL'
b'OGRLFQKUCUWFX9DNAOZSSKBUV9IBVIRNUWYBKJVKLJ9PPNLGJATKDCAGVFIVPXRABH'
b'ZVZACJIG9WOKKLFCRDSMTWSCYHOZEEXRIMPQBXVXQAYKZIADSM9GUBICGKGQYNHKVY'
b'OZFRVCHNM'
),
TryteString(
b'KJVG9EKTMPWE9PKWGGJJPDISCX9CJXGWUUPOLKKBVUWUYNBACOOF9LEQGNM9YYGNXJ'
b'EMOBGSDCPLP9CQIFBCLENUCJGCCSWYU9WVFTRZZCPCZXEGMDDPSYDTYUIMVFSLGHTA'
b'ZWJRHY9ZQMFGROIIUIQVDSOTUIRMSROYDPTWRURMZAILGBWIVADPYHPTSFGKAPPMVT'
b'THGLYXZHPFUO9HBAJIOUJOOABAQSOQUKLSVQGSDIHEHGTQSPM9EDHLEQSFFAAQXR9M'
b'UREVQ9MEGXNXMNJVWXYEIRYAOFHDFZAKNVWKTVIHKXWVT9VOZRUPUKXASIFAZQVZSW'
b'HBQU9RGVLJMRVQCMCYSQEIMIAKOHNAKQLTIMEHMZMGAKCIPGHQTWORBLVKISGPKIIM'
b'AMQWMZUNTKJSQZAZNYEGORGNRTKCLNRSOQJRBUCPSDLKLGGRBACIULLZBFBUNQXACK'
b'L9WFEKKAHGLBBRNNEXZPPH9UZESFFKVBOPROFHQOKYAVTJDDVAUGUAURHLESIEIITD'
b'VVRCTRKOGUPERJHNJMXTLNVMWVDZITSPEHRYJKEZVTZSJEYTOQEGNJRMCJLYYKPGDF'
b'UFQHGWRDGEWBXYOGEZ9IXRWJAQLKHPROWIEVI9ILNOXTPOSRLETMNEQ9P9WLXCUZNM'
b'GFK9EYHABBCSEZSGMNJZOEEGRVNU9ASSOOLCXXZKZPFWU9EEUUQRACVGZPL9MQINGL'
b'YPUTUPTLPKWPHRFFBRHZQWIVOXPGAKCQQPRKPPZUHOJISYASMRYMCMJZNR9D9OQANU'
b'XGJXSUSZQFWDJUTNCDKAUAFYKJNVAMBLTPPRPIJRRKQMCIHHGKPQPUQHWJNIEPDLRA'
b'YSJXVSJVKAGBAJCMGQSCZFTEJSG9LUWZGFBGQUHFUHWDHND9WJBPOQQXDEATOBGXDG'
b'M9BKSDCOEZ9IENZPPDUPMKCUKYBIBTBMJPJLDNSOPEKHVGQKLGUISUFSYMHR9I9LRP'
b'LCXJTDHHEXKQEVIFOUGKJEILQIHFG9FWOUBXRHCRHLOYAXTFQUWKJBSX9GNPCWXUQJ'
b'RHDBOBRZPQAPMKCIZGULPZDYLLBMAFJZXGIRVAAVUUCSNGDGJQJTAPV9QXYIABIHBX'
b'ILKQLGDGXQUVADQGDFKKDKMU9WKBEEY9TAVRYQDQFKPDMLMUAEGBHVJPSIZOEQGCSY'
b'NJCICXROXHPZFUXASQJXZEHQTEUKFIYQIGJWORKAIQUFROYGMIDFAJOUFAYYWMXUGJ'
b'FPSRTGEUWWLOXEUTKZCZQHWFUNHTMZVIJ9VYOLBTAIFB9EN9NFVAABVFIBIWXLJSUO'
b'YELOQSIPK99AXSXCPECWOXFUVDIANVO9PKZUESMFWIEVWLEHLCVKDXEROLNEMYRRCJ'
b'DPAYVTYAYSL9AFZH9GXHXZORXZEQTUJEDJGCYCQAENYZRKDJSK9TOCKKCXOSSTOAIO'
b'9UVAKQJBVOS9RUQIESCIJYRWYRUPMIJEHR9EGZ9YMHQXALUUDMCFYFOMLIGORMMBCD'
b'JMFCNNELGPXHICRNRKONBKACHLLSABUNHQ9TU9OSSTQXGWBLRRTSKZORXILALQYRXD'
b'DMXPPUTEGTVCHSOVYZEEJMRRECGBMXBORUTIQUNMJDXBSZSYYA9UOTFWMQOHURUFSU'
b'ESLMILBBKGHTTFTZONNQIMJSLILKAQJRDTNVK9PHAMNKZXRHSOPGKKLJBRDYAC9BRU'
b'RJWUIJLUWXNQOSVVLFEBROMJCGVYZWIPOYFQRBUUNJLIGPVDLADFLZJGZBLEBEQEUD'
b'UZOIFFZLZRXCPQVMIARFLZRIOFFEHVFJZXFQFLCJSEXRPUKGMWBMGXEHIEZKOKGH9J'
b'XAUXACEBLXKLZT9P9NJGXRZWZJAZCNKR9CHRRNCFOLBCSZXKXOIGZHZSTDKTHOWZTE'
b'XWOIZLPEGPKTBRENSCOYBQJSZQ9XPNRORQOWMPGBXKSSLDIUVEAJODEUZJKZE9MBVT'
b'QXWFXXXOG9QGDWMNZEWVWVDZWIFKSWZIDNBYEJP9VBKQNOJEXVPZQTHVUCSK9QCMEP'
b'US9Y9FQPWEACAEBIQSVPJEL9ZBSETINIYMSPIXLADSHTDYRAYUTMXDCABIUUETMNLU'
b'RELTPAGEDNMQZALFWOPAI9WUFOSUTOFUUWFAFRFVYOPITBVSG9IBVNJCOITYMTCCIJ'
b'IZWVPYGQE'
),
TryteString(
b'GWLDXDNSEIQCQJKVVFEWPWR99OKSHTVIJCNFEGSUM9DUQRO9ZJUWOOGP9XLABZFDXN'
b'GOXZLWETWXTTBT9KIGB9VOMMTKNJTUUFGJIYZIMHEAEJTNTIIOLLO9VWCYX9JA9RML'
b'SB9COUYKMRZQWJXMIFXCETZWRDXHBBOYLYLURXBELK9YLIXXGHSP9TNNASKDGFVJQV'
b'99CMXRM9VHASOBYBTWIMAJLBRUPZQLDCKOFAPHG9DKVVEFHTZAGNC9KH9K9HIFNLUI'
b'NQFTQTSALBNV9HRWXDGDEBBKIMQCDWVTMPDIVCXHGKDFPAKTSYYJIROENCJOZXVBNL'
b'UIUJHHAXZ9PTMNFGRRCNHQUVEESVSYNSIQXDRKKBMWJOQSMIK9FPHTNAJUYTQ9BLOG'
b'9GZPXHACSPIFCDX9LIVQDISFAVZWQUXP9BROHMGBHFTVWEWCZRPTAMTXXLVLZBT9BM'
b'OSJXAIGYUXICBUGQDOJRMYFWYGLT9UBTKGZZPNDIPNVIHQIBXFQACGYPWTKJSRHVQL'
b'VJAJWFGNFLAJYOADR9XNOAYOLHKEUGWSOCXYJVHWLRRBE9XYLQDYJXYMURFPXTMNHE'
b'EXJGVY9ADSJICXGWOUKYWVMXMWSJQVPKTUQUSCHTREWZNTXBDUJWDVTMXPABBHGYOC'
b'UNFIFTUQTRAVTCFAQNNAAXBCRILNVZGGKEKIUOXBVMXLFNCSHFMH9PYR9DBXFKWIBT'
b'AZLQTMVEGCZLWESPAHZLVTCGUUJXEAPCEYBXGGARHGDODWULDHHMMKEIYRFFEMQTTG'
b'SGWTOGBZYEULWWFINFHGYEDHHXAJASMQCLBKWYXSBIWZLMEZVXUWP999OROQYLOFVA'
b'ZGJIGHMTGJSZNGXFWMMUCGGQXB9ASA9UCVZLVYZG9XBIF9HUAB9HBYERWFJ9IEDMAY'
b'ZSIFDHOX9HRQSDEGWUAODHRNVBQWTBK9JFZBNKBATUXBZOIEHPTFPQXSBGHGOEVMUT'
b'RPSTRFOKHWEUPUSEROZEBSMTSTZSHFTQ9UXYTMDVLAPXSHZGYLPVDGTCGHOQSWJJED'
b'ARRUPCYFHJOSPVSTNEERBJOERGU9TTOW9GSVZEODZOEQZATYADJ9NURBJNTPBYECGG'
b'WP9SVOZAWXT9RLKBKL9TBBWXSDOSXRJHPKMLIPWKXSM9MPNQWEYLDPRLTUUNEFHXUF'
b'CLLJLZIRGUMEJCTIHC9VPS9YPENZPBYKTSBPXIPZHNYZYDPOYRIFEZWOFDYMZTUOMZ'
b'ZHLSLZMTDIMTTXMHHTDLIVRSIDSWJBCDCKEYZPTLLZP9IMNJSRXICEGTPZXVXAVIBG'
b'JMMOUNPUKXHIANUPGJANUHTG9ZPZCBFRMLHYOPFAKGRZSZJARBEEPQZ9TKJRQLXEG9'
b'IOHETGXCMKT9XZUBPMIQWXRRRFF9POXJBXW9NPUIOYNET9CTUWJB9RQDHVIAFLKILV'
b'BDLOYZAKIRHAUXE9ORNAPVXRTUY9CNXAPFPNUADXHDQWGRCVBZMUASLOPAYHLNGNUV'
b'VTDQCSOSTOOILZFXBXUPILJVVDUIRBWQUYNOJX99BTZNYQZGTENKGEKKADMPDWQB9I'
b'CWBWFHKAPRNDGGWOUXDTJKMASYOPYNYPTOEN9EDLXVVUMELPGG9ZLAJXQFTIEA9HRJ'
b'QCJLRUSLBGIWRWRXMTSAYVNHNJCYDSYNBPH9XEI9NFEDANKTZ9RWSCMPV9XVBTBZVD'
b'O9HABGD9VDOIXFMWBCHERKTDPDQFQSVNZLZRPHVZTFTL9LRAIMXLMTEZFAKK9CMYVP'
b'RTGBXGIMHUUVWCHDUUEZMZFMDSUQRVVPHZDUTOTLPSKQEHWNLOXKGGJKHHUNQIJXUT'
b'NYMZIL9UOEKECBSTCRVTVKUWETWPECLAXJWUNXXNRDBR99KJSWCHJBTMK9TSLLKWUC'
b'MMWNABUZLKLCJXHPUWVLIEIHYTZRPTZJTUMDDVEFCDRQYHPBF9WVMATUIQXGWTGAHQ'
b'STNRVZZIPBRPIUOZLXRGEWSUVDXIQPAONF9QPFYIMUEMDXOMFPKKJNGRBNMKXNJUF9'
b'IQIHPEBHSLWQWXJZNEBKCQUSRWOEGMWFZYGHFUUHDBBOBKSTXT9HGOORUQMFBFBICA'
b'HBQNOBVDCZVGZGASCINUGVEMM9LLPWTNWWVKWYIYDIJEKAVBEFPAVMFWEOYMTOHLZV'
b'PRMIINUJT'
),
])
with patch(
'cornode.transaction.ProposedBundle._create_signature_fragment_generator',
Mock(return_value=mock_signature_fragment_generator),
):
with patch(
'cornode.commands.extended.get_inputs.GetInputsCommand._execute',
mock_get_inputs,
):
response = self.command(
seed = Seed(
b'TESTVALUEONE9DONTUSEINPRODUCTION99999C9V'
b'C9RHFCQAIGSFICL9HIY9ZEUATFVHFGAEUHSECGQAK'
),
transfers = [
ProposedTransaction(
value = 42,
address = Address(
b'TESTVALUETWO9DONTUSEINPRODUCTION99999XYY'
b'NXZLKBYNFPXA9RUGZVEGVPLLFJEM9ZZOUINE9ONOW'
),
),
],
)
self.assertDictEqual(
response,
{
'trytes': [
# Ipnut #2, Part 2 of 2
TryteString(
b'GWLDXDNSEIQCQJKVVFEWPWR99OKSHTVIJCNFEGSUM9DUQRO9ZJUWOOGP9XLABZFD'
b'XNGOXZLWETWXTTBT9KIGB9VOMMTKNJTUUFGJIYZIMHEAEJTNTIIOLLO9VWCYX9JA'
b'9RMLSB9COUYKMRZQWJXMIFXCETZWRDXHBBOYLYLURXBELK9YLIXXGHSP9TNNASKD'
b'GFVJQV99CMXRM9VHASOBYBTWIMAJLBRUPZQLDCKOFAPHG9DKVVEFHTZAGNC9KH9K'
b'9HIFNLUINQFTQTSALBNV9HRWXDGDEBBKIMQCDWVTMPDIVCXHGKDFPAKTSYYJIROE'
b'NCJOZXVBNLUIUJHHAXZ9PTMNFGRRCNHQUVEESVSYNSIQXDRKKBMWJOQSMIK9FPHT'
b'NAJUYTQ9BLOG9GZPXHACSPIFCDX9LIVQDISFAVZWQUXP9BROHMGBHFTVWEWCZRPT'
b'AMTXXLVLZBT9BMOSJXAIGYUXICBUGQDOJRMYFWYGLT9UBTKGZZPNDIPNVIHQIBXF'
b'QACGYPWTKJSRHVQLVJAJWFGNFLAJYOADR9XNOAYOLHKEUGWSOCXYJVHWLRRBE9XY'
b'LQDYJXYMURFPXTMNHEEXJGVY9ADSJICXGWOUKYWVMXMWSJQVPKTUQUSCHTREWZNT'
b'XBDUJWDVTMXPABBHGYOCUNFIFTUQTRAVTCFAQNNAAXBCRILNVZGGKEKIUOXBVMXL'
b'FNCSHFMH9PYR9DBXFKWIBTAZLQTMVEGCZLWESPAHZLVTCGUUJXEAPCEYBXGGARHG'
b'DODWULDHHMMKEIYRFFEMQTTGSGWTOGBZYEULWWFINFHGYEDHHXAJASMQCLBKWYXS'
b'BIWZLMEZVXUWP999OROQYLOFVAZGJIGHMTGJSZNGXFWMMUCGGQXB9ASA9UCVZLVY'
b'ZG9XBIF9HUAB9HBYERWFJ9IEDMAYZSIFDHOX9HRQSDEGWUAODHRNVBQWTBK9JFZB'
b'NKBATUXBZOIEHPTFPQXSBGHGOEVMUTRPSTRFOKHWEUPUSEROZEBSMTSTZSHFTQ9U'
b'XYTMDVLAPXSHZGYLPVDGTCGHOQSWJJEDARRUPCYFHJOSPVSTNEERBJOERGU9TTOW'
b'9GSVZEODZOEQZATYADJ9NURBJNTPBYECGGWP9SVOZAWXT9RLKBKL9TBBWXSDOSXR'
b'JHPKMLIPWKXSM9MPNQWEYLDPRLTUUNEFHXUFCLLJLZIRGUMEJCTIHC9VPS9YPENZ'
b'PBYKTSBPXIPZHNYZYDPOYRIFEZWOFDYMZTUOMZZHLSLZMTDIMTTXMHHTDLIVRSID'
b'SWJBCDCKEYZPTLLZP9IMNJSRXICEGTPZXVXAVIBGJMMOUNPUKXHIANUPGJANUHTG'
b'9ZPZCBFRMLHYOPFAKGRZSZJARBEEPQZ9TKJRQLXEG9IOHETGXCMKT9XZUBPMIQWX'
b'RRRFF9POXJBXW9NPUIOYNET9CTUWJB9RQDHVIAFLKILVBDLOYZAKIRHAUXE9ORNA'
b'PVXRTUY9CNXAPFPNUADXHDQWGRCVBZMUASLOPAYHLNGNUVVTDQCSOSTOOILZFXBX'
b'UPILJVVDUIRBWQUYNOJX99BTZNYQZGTENKGEKKADMPDWQB9ICWBWFHKAPRNDGGWO'
b'UXDTJKMASYOPYNYPTOEN9EDLXVVUMELPGG9ZLAJXQFTIEA9HRJQCJLRUSLBGIWRW'
b'RXMTSAYVNHNJCYDSYNBPH9XEI9NFEDANKTZ9RWSCMPV9XVBTBZVDO9HABGD9VDOI'
b'XFMWBCHERKTDPDQFQSVNZLZRPHVZTFTL9LRAIMXLMTEZFAKK9CMYVPRTGBXGIMHU'
b'UVWCHDUUEZMZFMDSUQRVVPHZDUTOTLPSKQEHWNLOXKGGJKHHUNQIJXUTNYMZIL9U'
b'OEKECBSTCRVTVKUWETWPECLAXJWUNXXNRDBR99KJSWCHJBTMK9TSLLKWUCMMWNAB'
b'UZLKLCJXHPUWVLIEIHYTZRPTZJTUMDDVEFCDRQYHPBF9WVMATUIQXGWTGAHQSTNR'
b'VZZIPBRPIUOZLXRGEWSUVDXIQPAONF9QPFYIMUEMDXOMFPKKJNGRBNMKXNJUF9IQ'
b'IHPEBHSLWQWXJZNEBKCQUSRWOEGMWFZYGHFUUHDBBOBKSTXT9HGOORUQMFBFBICA'
b'HBQNOBVDCZVGZGASCINUGVEMM9LLPWTNWWVKWYIYDIJEKAVBEFPAVMFWEOYMTOHL'
b'ZVPRMIINUJTTESTVALUEFOUR9DONTUSEINPRODUCTION99999WJRBOSBIMNTGDYK'
b'UDYYFJFGZOHORYSQPCWJRKHIOVIY999999999999999999999999999999999999'
b'999999999999999999NYBKIVD99D99999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #2, Part 1 of 2
TryteString(
b'KJVG9EKTMPWE9PKWGGJJPDISCX9CJXGWUUPOLKKBVUWUYNBACOOF9LEQGNM9YYGN'
b'XJEMOBGSDCPLP9CQIFBCLENUCJGCCSWYU9WVFTRZZCPCZXEGMDDPSYDTYUIMVFSL'
b'GHTAZWJRHY9ZQMFGROIIUIQVDSOTUIRMSROYDPTWRURMZAILGBWIVADPYHPTSFGK'
b'APPMVTTHGLYXZHPFUO9HBAJIOUJOOABAQSOQUKLSVQGSDIHEHGTQSPM9EDHLEQSF'
b'FAAQXR9MUREVQ9MEGXNXMNJVWXYEIRYAOFHDFZAKNVWKTVIHKXWVT9VOZRUPUKXA'
b'SIFAZQVZSWHBQU9RGVLJMRVQCMCYSQEIMIAKOHNAKQLTIMEHMZMGAKCIPGHQTWOR'
b'BLVKISGPKIIMAMQWMZUNTKJSQZAZNYEGORGNRTKCLNRSOQJRBUCPSDLKLGGRBACI'
b'ULLZBFBUNQXACKL9WFEKKAHGLBBRNNEXZPPH9UZESFFKVBOPROFHQOKYAVTJDDVA'
b'UGUAURHLESIEIITDVVRCTRKOGUPERJHNJMXTLNVMWVDZITSPEHRYJKEZVTZSJEYT'
b'OQEGNJRMCJLYYKPGDFUFQHGWRDGEWBXYOGEZ9IXRWJAQLKHPROWIEVI9ILNOXTPO'
b'SRLETMNEQ9P9WLXCUZNMGFK9EYHABBCSEZSGMNJZOEEGRVNU9ASSOOLCXXZKZPFW'
b'U9EEUUQRACVGZPL9MQINGLYPUTUPTLPKWPHRFFBRHZQWIVOXPGAKCQQPRKPPZUHO'
b'JISYASMRYMCMJZNR9D9OQANUXGJXSUSZQFWDJUTNCDKAUAFYKJNVAMBLTPPRPIJR'
b'RKQMCIHHGKPQPUQHWJNIEPDLRAYSJXVSJVKAGBAJCMGQSCZFTEJSG9LUWZGFBGQU'
b'HFUHWDHND9WJBPOQQXDEATOBGXDGM9BKSDCOEZ9IENZPPDUPMKCUKYBIBTBMJPJL'
b'DNSOPEKHVGQKLGUISUFSYMHR9I9LRPLCXJTDHHEXKQEVIFOUGKJEILQIHFG9FWOU'
b'BXRHCRHLOYAXTFQUWKJBSX9GNPCWXUQJRHDBOBRZPQAPMKCIZGULPZDYLLBMAFJZ'
b'XGIRVAAVUUCSNGDGJQJTAPV9QXYIABIHBXILKQLGDGXQUVADQGDFKKDKMU9WKBEE'
b'Y9TAVRYQDQFKPDMLMUAEGBHVJPSIZOEQGCSYNJCICXROXHPZFUXASQJXZEHQTEUK'
b'FIYQIGJWORKAIQUFROYGMIDFAJOUFAYYWMXUGJFPSRTGEUWWLOXEUTKZCZQHWFUN'
b'HTMZVIJ9VYOLBTAIFB9EN9NFVAABVFIBIWXLJSUOYELOQSIPK99AXSXCPECWOXFU'
b'VDIANVO9PKZUESMFWIEVWLEHLCVKDXEROLNEMYRRCJDPAYVTYAYSL9AFZH9GXHXZ'
b'ORXZEQTUJEDJGCYCQAENYZRKDJSK9TOCKKCXOSSTOAIO9UVAKQJBVOS9RUQIESCI'
b'JYRWYRUPMIJEHR9EGZ9YMHQXALUUDMCFYFOMLIGORMMBCDJMFCNNELGPXHICRNRK'
b'ONBKACHLLSABUNHQ9TU9OSSTQXGWBLRRTSKZORXILALQYRXDDMXPPUTEGTVCHSOV'
b'YZEEJMRRECGBMXBORUTIQUNMJDXBSZSYYA9UOTFWMQOHURUFSUESLMILBBKGHTTF'
b'TZONNQIMJSLILKAQJRDTNVK9PHAMNKZXRHSOPGKKLJBRDYAC9BRURJWUIJLUWXNQ'
b'OSVVLFEBROMJCGVYZWIPOYFQRBUUNJLIGPVDLADFLZJGZBLEBEQEUDUZOIFFZLZR'
b'XCPQVMIARFLZRIOFFEHVFJZXFQFLCJSEXRPUKGMWBMGXEHIEZKOKGH9JXAUXACEB'
b'LXKLZT9P9NJGXRZWZJAZCNKR9CHRRNCFOLBCSZXKXOIGZHZSTDKTHOWZTEXWOIZL'
b'PEGPKTBRENSCOYBQJSZQ9XPNRORQOWMPGBXKSSLDIUVEAJODEUZJKZE9MBVTQXWF'
b'XXXOG9QGDWMNZEWVWVDZWIFKSWZIDNBYEJP9VBKQNOJEXVPZQTHVUCSK9QCMEPUS'
b'9Y9FQPWEACAEBIQSVPJEL9ZBSETINIYMSPIXLADSHTDYRAYUTMXDCABIUUETMNLU'
b'RELTPAGEDNMQZALFWOPAI9WUFOSUTOFUUWFAFRFVYOPITBVSG9IBVNJCOITYMTCC'
b'IJIZWVPYGQETESTVALUEFOUR9DONTUSEINPRODUCTION99999WJRBOSBIMNTGDYK'
b'UDYYFJFGZOHORYSQPCWJRKHIOVIYYZ9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99C99999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 2 of 2
TryteString(
b'SWHZKSNCOQXPCGRTYJPUGKLBNEJFXASKY9XAUROGDAO9QQLIVRZQDJDTPLNTBGUU'
b'FGELJPSGUMGPIUNCCTQEFU9UZIJJYJXCYWRADRHHXKBEDG9HTCHJHXUJRKMIUFOS'
b'KDGMI9QPCYQSWDCUYKQQEONJEKYWELG9MSNBHRHILGSSKMRCQJBOGNYBKEMTOFEU'
b'BEOUBD9ULP9PHWYKXEQNDMUR9BGDGPEUFRFRGNGFJFPYQXABSDALTYKL9SM9VVQC'
b'OHY9AS99EYWSHUNEQVVNLS9CNPEVMPOKMWQYFPGTNJBZCDWYPFWSBKZOYXNNVMPO'
b'DEHMHNIYZCHIEDEAB9TLFOWVHF99GVRWUZWSN9IQOKWIXERKRQETZS9ZJJSQRLLP'
b'QXEWNMFVVBWOIK9MBYCEGUJ9HJRIIMBVJNGXMGPGDLOLYWFVQNOKTFZRBJSSBJTE'
b'TGAIUGZOYQOFTVBKAQY9SSJWJXXYAUVUQWPXVCISFSDSHDQCPVMG9GVDAO9GIMDH'
b'ZWJOKSUEUFHBGSCZKNBTZWJXSFYNJSBSEL9UMZBAZRGYCEHOSJBMKMPMNXKEVTMU'
b'DEFWBIKOXUSBNPTNECGVLYSOGUDJDPHYFADXRAOLQXJSJDANINJEOMCFAWWITREG'
b'CDF9OZ9ZKHPJZJNMOVGX9OKQBSGVZYWKNOPVJEOZEI9BPE9GCUEQVAHSBBRBGQTE'
b'XVZCSL9ECOWPOWZCVSCBOUSNQMTJIEKHXL9NCPRMLRNKQEHYJCLRHGZKFNBJIPKS'
b'KPRFTSKFJULTBTXFDQHWUYOSDQBHPAINVEPKCCHJDTZOJIGJZOF9AEQDBKCZSZMI'
b'WUUVHVGAFKALGITVQQKBAHKCIFSVMVZ9UDQABVIANTBUQOFBIXQBWB9KKQOVJZNV'
b'BEDAZKUTRNKGJQWMTEKV9KGCIBRDCBAPKSTMCZGUV9HTAABQDKGQBCRFNXBMZRTH'
b'F9MO9GAGQDYDVLOFMDE9QQZYR9GDSBLUVVMKMCZIMDPNCVLGDKBACWQJRWOQNKBT'
b'SDJFKQMKTVKXVNAHRHZALJGVAMXWJYRAKTEJFXAHBQGSYWWQVECQYPXVFWILNFZK'
b'GGRIFCJBSIZRDJXRJHSURPCZKOWKLFRUMVENEGMNKUAOGVUECDSGAZNQZKBJDJPV'
b'BXLOTID9QLMFNGIWKAAIQTJJROSZBXPQRXAUCV99OGCEOTQCJ9II9ASZL9XGNSVU'
b'XVKPXYOJMF9PX9GSLEROR9FXVQ9MLEMEW9IWNWBNVAYXZ9ZETTDSMLGZAKHE9IUJ'
b'BFUHXW9KWCNZOZCCTFGBGWSDAQGGSPSQHOMUVJMLWBDAKYQZMWPQLLYAGUMOVMVL'
b'FD9TO9OUBTVUHHUNSFSATSEGBFVGDZUBMTWWFDPSQVCUFRVKHYYPDWRPNSKXRFTV'
b'EIBVZNGUZRQCPXVKBPKQDDLEBWIEBIPTEJIYFHBXCUVCCKTKEJAYRZCKAXLMELIN'
b'WUZHG9JFBSBAKHIXMWHUWUFHFNLXNO9GKINYKRTCNN99PHPHO9MJAGUYZAPNSPWU'
b'Z99E9BEADKETLOALWNANYMHSLLQSBS9YTYVJKTVWFUVS9MFOWCHLEUUFUWTYGLZX'
b'FDUXVABTVFXFPUEPIUEIAVSZSSZQJTHNGKBJXADRHVTIBERILMCCGWUITYQHGEEG'
b'WIZZOII9B9EVVVFJNEYEWH9ZVOJGHKVPYKDEZZSPBAOBQGGWPWXTCKSLSHJQYCDH'
b'AYIQ9QVSQFPXZDBYSJJKSNTRXORHLPVOYVMIGALRPXYWQWSJPPFTJCYXAATLBFNS'
b'GVXKFJXHYTILNOQZROUCDUTWOMGYBVTWPKJY9RVKKWQQMVCHJEUBELJDKJPGYLXU'
b'XATNOIJHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHVHE9EEB'
b'QYSNWECSPAJHGLTEUCXALBRVGXFENUCOONSUFZLHTLVQNPDZDIVDQHWVLDEDPFQL'
b'JZWF9GFZMPZXFVEQECLUZBBFVSAPEXJLKKOMXEPHZAKP9WYTGQOML9FQSBMSFLOG'
b'RLFQKUCUWFX9DNAOZSSKBUV9IBVIRNUWYBKJVKLJ9PPNLGJATKDCAGVFIVPXRABH'
b'ZVZACJIG9WOKKLFCRDSMTWSCYHOZEEXRIMPQBXVXQAYKZIADSM9GUBICGKGQYNHK'
b'VYOZFRVCHNMTESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVW999999999999999999999999999999999999'
b'999999999999999999NYBKIVD99B99999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 1 of 2
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWG'
b'HMNUXBJTOBKZFDNJEMAOPPLR9OOQJCDVO9XSCYQJQVTXQDYWQEBIXKDZAFWINAHJ'
b'ELJTDPVMUEWSVCJA9ONDYBNANWCGLBQMEMTBFDMWLCMQHGJLGYDQGIMLSNQHBGSV'
b'TDZSGNQAL9OHRAPDKYSVBTNYRUUBNEEAINJMOVOHOWXUAIEDAIQDESQFCKJELHAV'
b'ODSMXMKEHTDKCDIWWISXSAHQE9TJTLJZGXIABHU9CUACMLVSSYV9UJREPWFVYWWX'
b'PYYJRP9DOEKNDMBSBKKHIFMPXZXIJERXRZVBVDBYNZBBCCOSEDOLDGSNQK99HIYS'
b'WNYYEBLRT9MADLXLLZJOSZCFWAVZY9XUPNZUVOSKBMKXXJNRKDBOSGUGME9QNBMH'
b'IWXWXPEEUVQAQVUXDJGMJOBXG9VJBWPRQRCCQSNBEHTLGOKJVYEPQOJO9QIZLYAV'
b'LCKVXKEKRGBSZJAC9KTSSNMDQGKCLPZDJAQ9PBQMLUONVVFAWTMREGFXJMRRGL9M'
b'KNPOZGOYRPDCYEJCYJUN9HYNSNHXARMRJVXBUHOP9K9BIIEYGSHBUESKTAOQOEAN'
b'EAIHYHVGSVNPXWRBTJAMKMWEQOSYEWXLSRYVOSTMPOGYNPDNFLOICXVHYBDHSXVR'
b'KVWNVZOZQDOITZWICSYEW9RGCPPUJYVIYVTSZILYENYUYUGDSGWVYWRMZJNCTTPV'
b'WDWXAPVZQQKI9CGEQPBFPCLGDDEGBUUTISNCMJXQCTUNKQTLCATNOIRPMEUQBQTH'
b'HQYRGDLZEUZBALNQDXJYZBVXDPLVOVVAUCQSCGRTUJRBBNRV9ORETTGFIXBBBVOP'
b'FHPKGPKVBYFTZMWUVZYVWWSDKQVONMPLLQTV9IZUWLUWZNLCVJNPMG9CMXQG9D9W'
b'YCANBRMYV9DU9FMJT9JHT9RWCGLHFCODXJVFQBLTKJWVNVGSUHNWLHNPLZDSWDMD'
b'VQTLVCSVFJJTIQZFAPCXJWDAXWJKJVOKHALCQQTIXABPFXPUFK9IKXYUGMPXNSQC'
b'JDVETOVEX9LXYLXWRW9PFEYJCUJHLUB9NXTUGLIQMDGPDPSJTWDYEWXQAICLN9BT'
b'GNBJWLVAXZGNCYXGHBMRUVVYTJGH9XDGSZHQDYKFGMOWORSFDFBLJHBRTXRSEBAL'
b'CJIJTQJYDZZKWZGVAPFVKVEOXGYRLMBSPFHUIJZZFMFVOTLPUWSYZCWFZMAALHRG'
b'SYSXSMOHWARYZZVIAKXAHGY9SROWPVFACXXLQEXXOJCKXRRZHBZXJIBWQMMZTRDF'
b'YQBSBBZQQXGCAAECMQINHJRBSGOYPCGWKPWCHBKOJTIGDASZFGONTUGDSOOLEMGO'
b'EBFCZZJZSCGXPHXHB9WGMMFVUTCHDBSAMYTECQZWGCXAWTCTIBZHQVUAIBPZHBBT'
b'ZAERYU9XAMKBHCHGZISSPOWJIRZTAXDHMAYBPXOXWDIUDHNBTFJNVHHJO9AWAEC9'
b'UPRRFJLNGKTXJXFDGODDOPMGLALRIJBVIFLQTYQPKCKCRBYPBYGUUFJGJFVCOURN'
b'KCGNTQNNKHDDPIVZHCJSLDUYHVPAX9YJOFTTFSKFHTOOQQRCPYZKTDVCUZGBOBZK'
b'LVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VZZGUNUCKOYLYXOVHMGULWGSRCGX'
b'ZLJVNIMZBLFOJJKOTUREMBXYOZXDUP9ROUVYOSJBGGFZMIFTKHJHHJGZJNOYQWFZ'
b'AHLJWWDDFQQAMEGJUEUSIWOHKFJWRXRSJWYPGIGZGMFNAIDGDOUUQUVHJZQPJMLC'
b'GKGADXAXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVGXBBPBE'
b'BDVGZDBWMDMLPXYJBBRNOMKGR9TSVUXSRYXQTCTYLFQORMIGDKBJLNLCQXACVCBJ'
b'GVWRJNYPCKOAILPLMWBYKDLDXLIZMZFWDXUWDEGDUURQGMJNUGJXDXYJGKOTQBGC'
b'HATROPKEN9YTXDUOCMXPGHPDANTJFRRVEVBFVCNTWNMMOVAVKBNSJIWWBVHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHV'
b'ITGYROZZNQPTESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVWN99999999999999999999999999999999999'
b'999999999999999999NYBKIVD99A99999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Spend transaction, Part 1 of 1
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUETWO9DONTUSEINPRODUCTION99999XYYNXZLKBYNFPXA9'
b'RUGZVEGVPLLFJEM9ZZOUINE9ONOWOB9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99999999999D99999999PNTRTNQJVPM9LE9XJLX'
b'YPUNOHQTOPTXDKJRPBLBCRIJPGPANCHVKGTPBRGHOVTLHVFPJKFRMZJWTUDNYC99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
],
},
)
def test_pass_inputs_implicit_with_change(self):
"""
Preparing a bundle that finds inputs to use automatically, change
address needed.
"""
# To keep the unit test focused, we will mock the ``getInputs``
# command that ``prepareTransfer`` calls internally.
#
# References:
# - :py:class:`cornode.commands.extended.prepare_transfer.PrepareTransferCommand`
# - :py:class:`cornode.commands.extended.get_inputs.GetInputsCommand`
mock_get_inputs = Mock(return_value={
'inputs': [
Address(
trytes =
b'TESTVALUETHREE9DONTUSEINPRODUCTION99999N'
b'UMQE9RGHNRRSKKAOSD9WEYBHIUM9LWUWKEFSQOCVW',
balance = 86,
key_index = 4,
),
],
'totalBalance': 86,
})
mock_signature_fragment_generator = MockSignatureFragmentGenerator([
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWGHM'
b'NUXBJTOBKZFDNJEZUKCKWGUHVSU9ZJYAVSQSOFDCOIEP9LCXYLTEFMCYUJAAHLYUHQ'
b'P99S9XRWHXHRPZCWHDMIDYW9OQAWUPTFMBTJGDCWRVNVRDPIWISVYNUDWUGBPNNFZD'
b'WRVZ9FGAVSEWFXRXGGLXJTPJTJLC9JYHMFBKYAUJRAMHQHKUUZHRWZIVC9KFEEXXVN'
b'EXJRYUSFV9PEPFUDCNRRTSCZXSTUEGJKDV9UCYNZSBRDYGOKFGYKWVFCYSWBUJYVGE'
b'UXWTDGPWTWURH9RKEZRFCUUBFBPKSFONMDXWGYKWAUWVUOQVBIGQMMKQVDYAZ9SVFI'
b'UUNMHOJGRQVXZGIIPKVNNBKABGKZLRNFK9KSIHTCGYPVCWYGDS9OIZWLNINYRLGJQC'
b'UBWYMAVDWFAURLALQPMRMFRAZCMCPOWM99SGBVEZPAFAXHXNENNWXLF9ZVHZIDWBLF'
b'KVWKBUYNBXOXTVPDWAGZXIOMDAEKNMRFGZVIGIFOSHGMPIPWNOWQDMHPKOJTYYECKN'
b'GCDDTJVALGPZSX9IH9LEGQSDACLBWKNXUW9BAZSHAISUJDTPJDOASLVRXFNJJHXQTK'
b'MKZUZIMJFPOKHEQXSCJQH9JPRNZHDVVZKWTHWWFNFMHFXPUIEEA9HPHJTCJJWZPUHK'
b'AAWJQQSAIF9HRETYYPXAZ9YCFJRCXTGCOLJQA9HDLFNTVDMYPRCYPQR9MNBBAMGOJX'
b'PRFCUSIIZN9VROZDPMOKZBCILKGB9EPCXOYWLPHFXTYBCMLRVHWNQDSQUIHHTAUTZC'
b'JFQ9CO9GTONKYKMDBSREZC9SUBHYK9JDOBYDBUBUIO9TRXQLAYHDDSXGJ9NB9FKMUU'
b'US9GANWVMQLIHX9MPJGLTAOMCZTQYDYVOWXHGHYCV9VDCXHGTCOOUEXIITVKHXCSUS'
b'OIRTMEAKMTYZCMAWURNX9JOVDICICKHXQYBXKWTXWXBZVZWRIDC9YCZVSKYIKJYYMF'
b'YQRTWBNJHWXRL9JFSZAXJYYTGDYLTHLWRMBUEG9QTGNRPVTBGRYFPEJQSIWTLPGV9C'
b'CMCO9TCKLKSJEAMFKQMXEYETISVEYDOSCRZ99RFDPUQPHMQ9NVRUBXITDGFZCYQNFC'
b'SULGRHPONWJDVWT9UELEKEPQEAFKDLDNYPABC9GUASVFJBFZF9Z9CHIUNLJWHKGDYK'
b'ADLUCRNEPAIWYSX9LT9QWQRKU9WEVDPKSTSA9PPEVNTBNLN9ZOPETINXGKA9DCOHPD'
b'QMMOOOCKYVEZJ9ZJQRJHNCKRFDRPHUVPGVGQYKZBLOILZTPIX9MIBKTXOJKVAYRLSX'
b'DTOEEKLF9WWZGLSGIOQZWCJJHSBTXYWRDYVEQTCNUENYWDRLZZIVTGCXEAJDRY9OVM'
b'XJGCSQSGYFLGYDZUH9EHUDQTCXLSDPMNDYQRZYRXYXKY9GIYOSIDQPXXHKJKDQLSCU'
b'Y9FFBTPSTJFEFROCEXFFYTFYHQROAVTYKQOCOQQWBN9RKJ9JJEURKTVOECYRITTYKN'
b'OGCD9OPQ9WDMKRPIUNRAVUSLFMC9WZWHSESGLDUYHVPAX9YJOFTTFSKFHTOOQQRCPY'
b'ZKTDVCUZGBOBZKLVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VDFKWVEWV9WGXPJH'
b'UBWYXGECBPQOPOHG9YCVXDWOXTEAOFBCEEAV9JCHUVLIRIMHXMUSZPOMMRBF9PLVLR'
b'JYTXTBANBZWFQWGNGFGXFOZ9YGMQSZFEJHLFZTTVHRLJPATA9TYCM9LSEWMNEUDNWQ'
b'FLUXOFUNVDKSNIIXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVG'
b'XBBPBEBDVGZDBWMDMLPXYJBBRNOMKGPMCG9FTSLMRADFVPUTTEIOUCBLPRYZHGOICN'
b'C9BT9WHJJJPDOSOMLD9EKRGKYUHUMMCAVHGYWOVQXFLTCXAAUDYKGKGKOYHLDCCQSK'
b'NHJHPSXTJVTW9QPFOQ9FDZIDDKIVF9CDYGU9ABRESMDLIBONAQWFVGCNOTEDHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHVIT'
b'GYROZZNQP'
),
TryteString(
b'ZOJNUMZOBEHLYDSDAVZKXHF9MAHAJICBMJTZZHTQTCACVQAUSSCFUMGCSJTONNKXFI'
b'NPOAXQIKSJ9GUV9GXM9KYDCDWUHULIJMSKMOLDZBYE9FTGFMKLODKHFF9YUCPTYFFM'
b'9EDCJDCKRFLZUHGGYNYFJLBFWXCIUF9HMGUQKPUCJ9OQ99FXHSUSRRBEUSSCKCYPIE'
b'AFZJQNXEUYWLEXKZWLRINBEGAZTJMYTUEQTTORMIIQASISHSHZDQJXANFLKOIRUEJU'
b'PZZHUJFWHEXFIZ9OU99SQLDDNLARDFPGYSCMXQCMGPRB9QLM99QUBLTLTKWYXHVAFU'
b'VVAMHEYCCNVEITSPVQWMSEIZJSLPWNGWISKWQNXCNRNOIGRGUHGYWLOFNXBDCT9JLA'
b'9CEKW9BFGOESKGOQLJBTLUMOICBEZDHCR9SZCJUZVXIEAVITFJFDGNJII9LSW9IQKV'
b'99UJWWAACGIRPCZUENXGILUXCMJIGW9REUNA99MWSANWL9KVKKXCKXLRGDT9NXIGQV'
b'ZWG9NBQPOQKEEET9ZUSENFPGFDNNHGBITCPASGHOPBNYKKEHKHVATNVWX9ZGTISUKP'
b'KTMWMPCGVVJSGMRJWNFICSFUAVAHIZWA9PDOIXFJGWCPTZHUDDUFJVQPBYNJREQ99U'
b'HOESTT9FELDMVK9VHZYPRVOWEW9NXTCYDCIMT9UIWGXUFYILOPOCJFVVEJEJN9ULGX'
b'IABFJWWRKAD9NHZBULMWUKESZLCPRQVVKWOHEWSTLOFNA9KNERURWJPROBBXEWICDK'
b'KCQXWYMJUCQLWEUPFXRSNMIJWQUEJUNIKDYJILXCGCLFETWOZYIUZVJVYVB9YGXSSD'
b'XYXSJXTOQZ9CCCAKMCNNKQCYEDGSGTBICCOGEHRIVMICUQPUUFRFCBF9NUUWSQBTVI'
b'YFVWAASTQJZFDDWWUUIHPKTIIVAGGIEQCZUEVOFDMQLDESMQDPQUSOOKZJ9QLXTAFP'
b'XXILFHFUIFJTKSEHXXZBPTZUGLYUZNORFOEKQDEIWGXZPBXSOGGQFILUJTKDLWVKPV'
b'ISU9QOATYVKJHLDLOKROZNFAGS9CICXXIUQQVLLRPPPDYJVSCW9OWIHKADCVSKPWTE'
b'NYEWQWEHP9DDWOUJDWSTSOGYQPALFMKCTUGLSXHNYETTMYTS999SYQVQSPHQPKRJSU'
b'Y9QTABAJOJAAMGVBCSLAAOBXZOJZLIFXUYOVXBKHPFVTKKGSIHUXMBDTMGNVL9NXYC'
b'HOVTLGDICIWTCIGNRHLBZBVSXMPBFAWIXPCDJWNDUFHUVLBSPBWICZNYIUJPRRTOCS'
b'SCVPNBXEDCMHKFVDMHJTSP9JI9BXTD9ZILEEOCBMHCQRRDNL9EUKJGJ9MPQGQU9ZFY'
b'GVSNOYAEC9NWTCVEJBSXLYWTUPMXNAAWXSBIAJYSGYHGLYOMAHFTYMICZRDZTQXHAQ'
b'GVXENKIGW9XZTPBAIMZLHWAJCGY9ZDNQOTGDRCTXSJCEJVTTMVRYYKWAFYSV9WVEVC'
b'FAXJKJNUC9NQHPEXWIOHOJQEXJNLEW9GLO9AJCJXIEXDONOGKXFJ9OXXXETUEHLBXA'
b'JGFPHKAQDCRTKQBXPZYQZBQODTVIBUTSAEXMBFBMTAXOQZCOHWEWRJEKNKHZXXSO9U'
b'SZRWUPZAASWDBXOVAEGSAGYDIOZWSSEAIQVRWFDSOXSRRRQHRCWDJWZXXJOGPZRLKQ'
b'OA9DOY9RXZNWBFJTKUOVRRQNSDUOFGCUQNHOBMJSFQZXVBPHHBRRIXZNLXAH9P9EFM'
b'GRPGSCFRZINEPOQPXPKHTSRJWARXRGJGYMTPUKQISLV9GUC9VTJLOISKGUZCTZEYND'
b'TURLBPXGNQLVXHAHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHV'
b'HE9EEBQYSNWECSPAJHGLTEUCXALBRVTKMWSWCBPUMZFVSEEFIHBAGJVVQV9QLFEGGY'
b'VPNSDOBZEQGLEFLCQVPDJA9MQDRHYNVZVNTYNJ9GJCXKED9NEWTD9RVMNA9HOHUBLL'
b'ASNQSDLDZKOMFOEGBJZPYVYZCVHYFEGSVEHSWV9WAGMEQIUDZQZUACWYQLTD9LHBVK'
b'KNXXXDWQUWRJKTCDP9CEJOHLLPTWCIKKHHIFAFFDVMFZR9A9LYVMTQAPAXAVPJOZKW'
b'FQNAJTO99'
),
])
with patch(
'cornode.transaction.ProposedBundle._create_signature_fragment_generator',
Mock(return_value=mock_signature_fragment_generator),
):
with patch(
'cornode.commands.extended.get_inputs.GetInputsCommand._execute',
mock_get_inputs,
):
response = self.command(
seed = Seed(
b'TESTVALUEONE9DONTUSEINPRODUCTION99999C9V'
b'C9RHFCQAIGSFICL9HIY9ZEUATFVHFGAEUHSECGQAK'
),
transfers = [
ProposedTransaction(
value = 42,
address = Address(
b'TESTVALUETWO9DONTUSEINPRODUCTION99999XYY'
b'NXZLKBYNFPXA9RUGZVEGVPLLFJEM9ZZOUINE9ONOW'
),
),
],
changeAddress =
Address(
b'TESTVALUEFOUR9DONTUSEINPRODUCTION99999WJ'
b'RBOSBIMNTGDYKUDYYFJFGZOHORYSQPCWJRKHIOVIY',
),
)
self.assertDictEqual(
response,
{
'trytes': [
# Change transaction, Part 1 of 1
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUEFOUR9DONTUSEINPRODUCTION99999WJRBOSBIMNTGDYK'
b'UDYYFJFGZOHORYSQPCWJRKHIOVIYQB9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99C99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 2 of 2
TryteString(
b'ZOJNUMZOBEHLYDSDAVZKXHF9MAHAJICBMJTZZHTQTCACVQAUSSCFUMGCSJTONNKX'
b'FINPOAXQIKSJ9GUV9GXM9KYDCDWUHULIJMSKMOLDZBYE9FTGFMKLODKHFF9YUCPT'
b'YFFM9EDCJDCKRFLZUHGGYNYFJLBFWXCIUF9HMGUQKPUCJ9OQ99FXHSUSRRBEUSSC'
b'KCYPIEAFZJQNXEUYWLEXKZWLRINBEGAZTJMYTUEQTTORMIIQASISHSHZDQJXANFL'
b'KOIRUEJUPZZHUJFWHEXFIZ9OU99SQLDDNLARDFPGYSCMXQCMGPRB9QLM99QUBLTL'
b'TKWYXHVAFUVVAMHEYCCNVEITSPVQWMSEIZJSLPWNGWISKWQNXCNRNOIGRGUHGYWL'
b'OFNXBDCT9JLA9CEKW9BFGOESKGOQLJBTLUMOICBEZDHCR9SZCJUZVXIEAVITFJFD'
b'GNJII9LSW9IQKV99UJWWAACGIRPCZUENXGILUXCMJIGW9REUNA99MWSANWL9KVKK'
b'XCKXLRGDT9NXIGQVZWG9NBQPOQKEEET9ZUSENFPGFDNNHGBITCPASGHOPBNYKKEH'
b'KHVATNVWX9ZGTISUKPKTMWMPCGVVJSGMRJWNFICSFUAVAHIZWA9PDOIXFJGWCPTZ'
b'HUDDUFJVQPBYNJREQ99UHOESTT9FELDMVK9VHZYPRVOWEW9NXTCYDCIMT9UIWGXU'
b'FYILOPOCJFVVEJEJN9ULGXIABFJWWRKAD9NHZBULMWUKESZLCPRQVVKWOHEWSTLO'
b'FNA9KNERURWJPROBBXEWICDKKCQXWYMJUCQLWEUPFXRSNMIJWQUEJUNIKDYJILXC'
b'GCLFETWOZYIUZVJVYVB9YGXSSDXYXSJXTOQZ9CCCAKMCNNKQCYEDGSGTBICCOGEH'
b'RIVMICUQPUUFRFCBF9NUUWSQBTVIYFVWAASTQJZFDDWWUUIHPKTIIVAGGIEQCZUE'
b'VOFDMQLDESMQDPQUSOOKZJ9QLXTAFPXXILFHFUIFJTKSEHXXZBPTZUGLYUZNORFO'
b'EKQDEIWGXZPBXSOGGQFILUJTKDLWVKPVISU9QOATYVKJHLDLOKROZNFAGS9CICXX'
b'IUQQVLLRPPPDYJVSCW9OWIHKADCVSKPWTENYEWQWEHP9DDWOUJDWSTSOGYQPALFM'
b'KCTUGLSXHNYETTMYTS999SYQVQSPHQPKRJSUY9QTABAJOJAAMGVBCSLAAOBXZOJZ'
b'LIFXUYOVXBKHPFVTKKGSIHUXMBDTMGNVL9NXYCHOVTLGDICIWTCIGNRHLBZBVSXM'
b'PBFAWIXPCDJWNDUFHUVLBSPBWICZNYIUJPRRTOCSSCVPNBXEDCMHKFVDMHJTSP9J'
b'I9BXTD9ZILEEOCBMHCQRRDNL9EUKJGJ9MPQGQU9ZFYGVSNOYAEC9NWTCVEJBSXLY'
b'WTUPMXNAAWXSBIAJYSGYHGLYOMAHFTYMICZRDZTQXHAQGVXENKIGW9XZTPBAIMZL'
b'HWAJCGY9ZDNQOTGDRCTXSJCEJVTTMVRYYKWAFYSV9WVEVCFAXJKJNUC9NQHPEXWI'
b'OHOJQEXJNLEW9GLO9AJCJXIEXDONOGKXFJ9OXXXETUEHLBXAJGFPHKAQDCRTKQBX'
b'PZYQZBQODTVIBUTSAEXMBFBMTAXOQZCOHWEWRJEKNKHZXXSO9USZRWUPZAASWDBX'
b'OVAEGSAGYDIOZWSSEAIQVRWFDSOXSRRRQHRCWDJWZXXJOGPZRLKQOA9DOY9RXZNW'
b'BFJTKUOVRRQNSDUOFGCUQNHOBMJSFQZXVBPHHBRRIXZNLXAH9P9EFMGRPGSCFRZI'
b'NEPOQPXPKHTSRJWARXRGJGYMTPUKQISLV9GUC9VTJLOISKGUZCTZEYNDTURLBPXG'
b'NQLVXHAHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHVHE9EEB'
b'QYSNWECSPAJHGLTEUCXALBRVTKMWSWCBPUMZFVSEEFIHBAGJVVQV9QLFEGGYVPNS'
b'DOBZEQGLEFLCQVPDJA9MQDRHYNVZVNTYNJ9GJCXKED9NEWTD9RVMNA9HOHUBLLAS'
b'NQSDLDZKOMFOEGBJZPYVYZCVHYFEGSVEHSWV9WAGMEQIUDZQZUACWYQLTD9LHBVK'
b'KNXXXDWQUWRJKTCDP9CEJOHLLPTWCIKKHHIFAFFDVMFZR9A9LYVMTQAPAXAVPJOZ'
b'KWFQNAJTO99TESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVW999999999999999999999999999999999999'
b'999999999999999999NYBKIVD99B99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 1 of 2
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWG'
b'HMNUXBJTOBKZFDNJEZUKCKWGUHVSU9ZJYAVSQSOFDCOIEP9LCXYLTEFMCYUJAAHL'
b'YUHQP99S9XRWHXHRPZCWHDMIDYW9OQAWUPTFMBTJGDCWRVNVRDPIWISVYNUDWUGB'
b'PNNFZDWRVZ9FGAVSEWFXRXGGLXJTPJTJLC9JYHMFBKYAUJRAMHQHKUUZHRWZIVC9'
b'KFEEXXVNEXJRYUSFV9PEPFUDCNRRTSCZXSTUEGJKDV9UCYNZSBRDYGOKFGYKWVFC'
b'YSWBUJYVGEUXWTDGPWTWURH9RKEZRFCUUBFBPKSFONMDXWGYKWAUWVUOQVBIGQMM'
b'KQVDYAZ9SVFIUUNMHOJGRQVXZGIIPKVNNBKABGKZLRNFK9KSIHTCGYPVCWYGDS9O'
b'IZWLNINYRLGJQCUBWYMAVDWFAURLALQPMRMFRAZCMCPOWM99SGBVEZPAFAXHXNEN'
b'NWXLF9ZVHZIDWBLFKVWKBUYNBXOXTVPDWAGZXIOMDAEKNMRFGZVIGIFOSHGMPIPW'
b'NOWQDMHPKOJTYYECKNGCDDTJVALGPZSX9IH9LEGQSDACLBWKNXUW9BAZSHAISUJD'
b'TPJDOASLVRXFNJJHXQTKMKZUZIMJFPOKHEQXSCJQH9JPRNZHDVVZKWTHWWFNFMHF'
b'XPUIEEA9HPHJTCJJWZPUHKAAWJQQSAIF9HRETYYPXAZ9YCFJRCXTGCOLJQA9HDLF'
b'NTVDMYPRCYPQR9MNBBAMGOJXPRFCUSIIZN9VROZDPMOKZBCILKGB9EPCXOYWLPHF'
b'XTYBCMLRVHWNQDSQUIHHTAUTZCJFQ9CO9GTONKYKMDBSREZC9SUBHYK9JDOBYDBU'
b'BUIO9TRXQLAYHDDSXGJ9NB9FKMUUUS9GANWVMQLIHX9MPJGLTAOMCZTQYDYVOWXH'
b'GHYCV9VDCXHGTCOOUEXIITVKHXCSUSOIRTMEAKMTYZCMAWURNX9JOVDICICKHXQY'
b'BXKWTXWXBZVZWRIDC9YCZVSKYIKJYYMFYQRTWBNJHWXRL9JFSZAXJYYTGDYLTHLW'
b'RMBUEG9QTGNRPVTBGRYFPEJQSIWTLPGV9CCMCO9TCKLKSJEAMFKQMXEYETISVEYD'
b'OSCRZ99RFDPUQPHMQ9NVRUBXITDGFZCYQNFCSULGRHPONWJDVWT9UELEKEPQEAFK'
b'DLDNYPABC9GUASVFJBFZF9Z9CHIUNLJWHKGDYKADLUCRNEPAIWYSX9LT9QWQRKU9'
b'WEVDPKSTSA9PPEVNTBNLN9ZOPETINXGKA9DCOHPDQMMOOOCKYVEZJ9ZJQRJHNCKR'
b'FDRPHUVPGVGQYKZBLOILZTPIX9MIBKTXOJKVAYRLSXDTOEEKLF9WWZGLSGIOQZWC'
b'JJHSBTXYWRDYVEQTCNUENYWDRLZZIVTGCXEAJDRY9OVMXJGCSQSGYFLGYDZUH9EH'
b'UDQTCXLSDPMNDYQRZYRXYXKY9GIYOSIDQPXXHKJKDQLSCUY9FFBTPSTJFEFROCEX'
b'FFYTFYHQROAVTYKQOCOQQWBN9RKJ9JJEURKTVOECYRITTYKNOGCD9OPQ9WDMKRPI'
b'UNRAVUSLFMC9WZWHSESGLDUYHVPAX9YJOFTTFSKFHTOOQQRCPYZKTDVCUZGBOBZK'
b'LVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VDFKWVEWV9WGXPJHUBWYXGECBPQO'
b'POHG9YCVXDWOXTEAOFBCEEAV9JCHUVLIRIMHXMUSZPOMMRBF9PLVLRJYTXTBANBZ'
b'WFQWGNGFGXFOZ9YGMQSZFEJHLFZTTVHRLJPATA9TYCM9LSEWMNEUDNWQFLUXOFUN'
b'VDKSNIIXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVGXBBPBE'
b'BDVGZDBWMDMLPXYJBBRNOMKGPMCG9FTSLMRADFVPUTTEIOUCBLPRYZHGOICNC9BT'
b'9WHJJJPDOSOMLD9EKRGKYUHUMMCAVHGYWOVQXFLTCXAAUDYKGKGKOYHLDCCQSKNH'
b'JHPSXTJVTW9QPFOQ9FDZIDDKIVF9CDYGU9ABRESMDLIBONAQWFVGCNOTEDHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHV'
b'ITGYROZZNQPTESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVWVX9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99A99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Spend transaction, Part 1 of 1
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUETWO9DONTUSEINPRODUCTION99999XYYNXZLKBYNFPXA9'
b'RUGZVEGVPLLFJEM9ZZOUINE9ONOWOB9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99999999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
],
},
)
def test_fail_inputs_implicit_insufficient(self):
"""
Account's total balance is not enough to cover spend amount.
"""
# To keep the unit test focused, we will mock the ``getInputs``
# command that ``prepareTransfer`` calls internally.
#
# References:
# - :py:class:`cornode.commands.extended.prepare_transfer.PrepareTransferCommand`
# - :py:class:`cornode.commands.extended.get_inputs.GetInputsCommand`
mock_get_inputs = Mock(side_effect=BadApiResponse)
with patch(
'cornode.commands.extended.get_inputs.GetInputsCommand._execute',
mock_get_inputs,
):
with self.assertRaises(BadApiResponse):
self.command(
seed = Seed(
b'TESTVALUEONE9DONTUSEINPRODUCTION99999C9V'
b'C9RHFCQAIGSFICL9HIY9ZEUATFVHFGAEUHSECGQAK'
),
transfers = [
ProposedTransaction(
value = 42,
address = Address(
b'TESTVALUETWO9DONTUSEINPRODUCTION99999XYY'
b'NXZLKBYNFPXA9RUGZVEGVPLLFJEM9ZZOUINE9ONOW'
),
),
],
)
def test_pass_change_address_auto_generated(self):
"""
Preparing a bundle with an auto-generated change address.
"""
# To keep the unit test focused, we will mock the ``getNewAddresses``
# command that ``prepareTransfer`` calls internally.
#
# References:
# - :py:class:`cornode.commands.extended.prepare_transfer.PrepareTransferCommand`
# - :py:class:`cornode.commands.extended.get_new_addresses.GetNewAddressesCommand`
mock_get_new_addresses_command = Mock(return_value={
'addresses': [
Address(
trytes =
b'TESTVALUEFOUR9DONTUSEINPRODUCTION99999WJ'
b'RBOSBIMNTGDYKUDYYFJFGZOHORYSQPCWJRKHIOVIY',
key_index = 5,
),
],
})
self.adapter.seed_response('getBalances', {
'balances': [86],
'duration': '1',
'milestone':
'TESTVALUE9DONTUSEINPRODUCTION99999ZNIUXU'
'FIVFBBYQHFYZYIEEWZL9VPMMKIIYTEZRRHXJXKIKF',
})
mock_signature_fragment_generator = MockSignatureFragmentGenerator([
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWGHM'
b'NUXBJTOBKZFDNJEZUKCKWGUHVSU9ZJYAVSQSOFDCOIEP9LCXYLTEFMCYUJAAHLYUHQ'
b'P99S9XRWHXHRPZCWHDMIDYW9OQAWUPTFMBTJGDCWRVNVRDPIWISVYNUDWUGBPNNFZD'
b'WRVZ9FGAVSEWFXRXGGLXJTPJTJLC9JYHMFBKYAUJRAMHQHKUUZHRWZIVC9KFEEXXVN'
b'EXJRYUSFV9PEPFUDCNRRTSCZXSTUEGJKDV9UCYNZSBRDYGOKFGYKWVFCYSWBUJYVGE'
b'UXWTDGPWTWURH9RKEZRFCUUBFBPKSFONMDXWGYKWAUWVUOQVBIGQMMKQVDYAZ9SVFI'
b'UUNMHOJGRQVXZGIIPKVNNBKABGKZLRNFK9KSIHTCGYPVCWYGDS9OIZWLNINYRLGJQC'
b'UBWYMAVDWFAURLALQPMRMFRAZCMCPOWM99SGBVEZPAFAXHXNENNWXLF9ZVHZIDWBLF'
b'KVWKBUYNBXOXTVPDWAGZXIOMDAEKNMRFGZVIGIFOSHGMPIPWNOWQDMHPKOJTYYECKN'
b'GCDDTJVALGPZSX9IH9LEGQSDACLBWKNXUW9BAZSHAISUJDTPJDOASLVRXFNJJHXQTK'
b'MKZUZIMJFPOKHEQXSCJQH9JPRNZHDVVZKWTHWWFNFMHFXPUIEEA9HPHJTCJJWZPUHK'
b'AAWJQQSAIF9HRETYYPXAZ9YCFJRCXTGCOLJQA9HDLFNTVDMYPRCYPQR9MNBBAMGOJX'
b'PRFCUSIIZN9VROZDPMOKZBCILKGB9EPCXOYWLPHFXTYBCMLRVHWNQDSQUIHHTAUTZC'
b'JFQ9CO9GTONKYKMDBSREZC9SUBHYK9JDOBYDBUBUIO9TRXQLAYHDDSXGJ9NB9FKMUU'
b'US9GANWVMQLIHX9MPJGLTAOMCZTQYDYVOWXHGHYCV9VDCXHGTCOOUEXIITVKHXCSUS'
b'OIRTMEAKMTYZCMAWURNX9JOVDICICKHXQYBXKWTXWXBZVZWRIDC9YCZVSKYIKJYYMF'
b'YQRTWBNJHWXRL9JFSZAXJYYTGDYLTHLWRMBUEG9QTGNRPVTBGRYFPEJQSIWTLPGV9C'
b'CMCO9TCKLKSJEAMFKQMXEYETISVEYDOSCRZ99RFDPUQPHMQ9NVRUBXITDGFZCYQNFC'
b'SULGRHPONWJDVWT9UELEKEPQEAFKDLDNYPABC9GUASVFJBFZF9Z9CHIUNLJWHKGDYK'
b'ADLUCRNEPAIWYSX9LT9QWQRKU9WEVDPKSTSA9PPEVNTBNLN9ZOPETINXGKA9DCOHPD'
b'QMMOOOCKYVEZJ9ZJQRJHNCKRFDRPHUVPGVGQYKZBLOILZTPIX9MIBKTXOJKVAYRLSX'
b'DTOEEKLF9WWZGLSGIOQZWCJJHSBTXYWRDYVEQTCNUENYWDRLZZIVTGCXEAJDRY9OVM'
b'XJGCSQSGYFLGYDZUH9EHUDQTCXLSDPMNDYQRZYRXYXKY9GIYOSIDQPXXHKJKDQLSCU'
b'Y9FFBTPSTJFEFROCEXFFYTFYHQROAVTYKQOCOQQWBN9RKJ9JJEURKTVOECYRITTYKN'
b'OGCD9OPQ9WDMKRPIUNRAVUSLFMC9WZWHSESGLDUYHVPAX9YJOFTTFSKFHTOOQQRCPY'
b'ZKTDVCUZGBOBZKLVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VDFKWVEWV9WGXPJH'
b'UBWYXGECBPQOPOHG9YCVXDWOXTEAOFBCEEAV9JCHUVLIRIMHXMUSZPOMMRBF9PLVLR'
b'JYTXTBANBZWFQWGNGFGXFOZ9YGMQSZFEJHLFZTTVHRLJPATA9TYCM9LSEWMNEUDNWQ'
b'FLUXOFUNVDKSNIIXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVG'
b'XBBPBEBDVGZDBWMDMLPXYJBBRNOMKGPMCG9FTSLMRADFVPUTTEIOUCBLPRYZHGOICN'
b'C9BT9WHJJJPDOSOMLD9EKRGKYUHUMMCAVHGYWOVQXFLTCXAAUDYKGKGKOYHLDCCQSK'
b'NHJHPSXTJVTW9QPFOQ9FDZIDDKIVF9CDYGU9ABRESMDLIBONAQWFVGCNOTEDHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHVIT'
b'GYROZZNQP'
),
TryteString(
b'ZOJNUMZOBEHLYDSDAVZKXHF9MAHAJICBMJTZZHTQTCACVQAUSSCFUMGCSJTONNKXFI'
b'NPOAXQIKSJ9GUV9GXM9KYDCDWUHULIJMSKMOLDZBYE9FTGFMKLODKHFF9YUCPTYFFM'
b'9EDCJDCKRFLZUHGGYNYFJLBFWXCIUF9HMGUQKPUCJ9OQ99FXHSUSRRBEUSSCKCYPIE'
b'AFZJQNXEUYWLEXKZWLRINBEGAZTJMYTUEQTTORMIIQASISHSHZDQJXANFLKOIRUEJU'
b'PZZHUJFWHEXFIZ9OU99SQLDDNLARDFPGYSCMXQCMGPRB9QLM99QUBLTLTKWYXHVAFU'
b'VVAMHEYCCNVEITSPVQWMSEIZJSLPWNGWISKWQNXCNRNOIGRGUHGYWLOFNXBDCT9JLA'
b'9CEKW9BFGOESKGOQLJBTLUMOICBEZDHCR9SZCJUZVXIEAVITFJFDGNJII9LSW9IQKV'
b'99UJWWAACGIRPCZUENXGILUXCMJIGW9REUNA99MWSANWL9KVKKXCKXLRGDT9NXIGQV'
b'ZWG9NBQPOQKEEET9ZUSENFPGFDNNHGBITCPASGHOPBNYKKEHKHVATNVWX9ZGTISUKP'
b'KTMWMPCGVVJSGMRJWNFICSFUAVAHIZWA9PDOIXFJGWCPTZHUDDUFJVQPBYNJREQ99U'
b'HOESTT9FELDMVK9VHZYPRVOWEW9NXTCYDCIMT9UIWGXUFYILOPOCJFVVEJEJN9ULGX'
b'IABFJWWRKAD9NHZBULMWUKESZLCPRQVVKWOHEWSTLOFNA9KNERURWJPROBBXEWICDK'
b'KCQXWYMJUCQLWEUPFXRSNMIJWQUEJUNIKDYJILXCGCLFETWOZYIUZVJVYVB9YGXSSD'
b'XYXSJXTOQZ9CCCAKMCNNKQCYEDGSGTBICCOGEHRIVMICUQPUUFRFCBF9NUUWSQBTVI'
b'YFVWAASTQJZFDDWWUUIHPKTIIVAGGIEQCZUEVOFDMQLDESMQDPQUSOOKZJ9QLXTAFP'
b'XXILFHFUIFJTKSEHXXZBPTZUGLYUZNORFOEKQDEIWGXZPBXSOGGQFILUJTKDLWVKPV'
b'ISU9QOATYVKJHLDLOKROZNFAGS9CICXXIUQQVLLRPPPDYJVSCW9OWIHKADCVSKPWTE'
b'NYEWQWEHP9DDWOUJDWSTSOGYQPALFMKCTUGLSXHNYETTMYTS999SYQVQSPHQPKRJSU'
b'Y9QTABAJOJAAMGVBCSLAAOBXZOJZLIFXUYOVXBKHPFVTKKGSIHUXMBDTMGNVL9NXYC'
b'HOVTLGDICIWTCIGNRHLBZBVSXMPBFAWIXPCDJWNDUFHUVLBSPBWICZNYIUJPRRTOCS'
b'SCVPNBXEDCMHKFVDMHJTSP9JI9BXTD9ZILEEOCBMHCQRRDNL9EUKJGJ9MPQGQU9ZFY'
b'GVSNOYAEC9NWTCVEJBSXLYWTUPMXNAAWXSBIAJYSGYHGLYOMAHFTYMICZRDZTQXHAQ'
b'GVXENKIGW9XZTPBAIMZLHWAJCGY9ZDNQOTGDRCTXSJCEJVTTMVRYYKWAFYSV9WVEVC'
b'FAXJKJNUC9NQHPEXWIOHOJQEXJNLEW9GLO9AJCJXIEXDONOGKXFJ9OXXXETUEHLBXA'
b'JGFPHKAQDCRTKQBXPZYQZBQODTVIBUTSAEXMBFBMTAXOQZCOHWEWRJEKNKHZXXSO9U'
b'SZRWUPZAASWDBXOVAEGSAGYDIOZWSSEAIQVRWFDSOXSRRRQHRCWDJWZXXJOGPZRLKQ'
b'OA9DOY9RXZNWBFJTKUOVRRQNSDUOFGCUQNHOBMJSFQZXVBPHHBRRIXZNLXAH9P9EFM'
b'GRPGSCFRZINEPOQPXPKHTSRJWARXRGJGYMTPUKQISLV9GUC9VTJLOISKGUZCTZEYND'
b'TURLBPXGNQLVXHAHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHV'
b'HE9EEBQYSNWECSPAJHGLTEUCXALBRVTKMWSWCBPUMZFVSEEFIHBAGJVVQV9QLFEGGY'
b'VPNSDOBZEQGLEFLCQVPDJA9MQDRHYNVZVNTYNJ9GJCXKED9NEWTD9RVMNA9HOHUBLL'
b'ASNQSDLDZKOMFOEGBJZPYVYZCVHYFEGSVEHSWV9WAGMEQIUDZQZUACWYQLTD9LHBVK'
b'KNXXXDWQUWRJKTCDP9CEJOHLLPTWCIKKHHIFAFFDVMFZR9A9LYVMTQAPAXAVPJOZKW'
b'FQNAJTO99'
),
])
with patch(
'cornode.transaction.ProposedBundle._create_signature_fragment_generator',
Mock(return_value=mock_signature_fragment_generator),
):
with patch(
'cornode.commands.extended.get_new_addresses.GetNewAddressesCommand._execute',
mock_get_new_addresses_command,
):
response = self.command(
seed = Seed(
b'TESTVALUEONE9DONTUSEINPRODUCTION99999C9V'
b'C9RHFCQAIGSFICL9HIY9ZEUATFVHFGAEUHSECGQAK'
),
transfers = [
ProposedTransaction(
value = 42,
address = Address(
b'TESTVALUETWO9DONTUSEINPRODUCTION99999XYY'
b'NXZLKBYNFPXA9RUGZVEGVPLLFJEM9ZZOUINE9ONOW'
),
),
],
inputs = [
Address(
trytes =
b'TESTVALUETHREE9DONTUSEINPRODUCTION99999N'
b'UMQE9RGHNRRSKKAOSD9WEYBHIUM9LWUWKEFSQOCVW',
key_index = 4,
),
],
)
self.assertDictEqual(
response,
{
'trytes': [
# Change transaction, Part 1 of 1
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUEFOUR9DONTUSEINPRODUCTION99999WJRBOSBIMNTGDYK'
b'UDYYFJFGZOHORYSQPCWJRKHIOVIYQB9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99C99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 2 of 2
TryteString(
b'ZOJNUMZOBEHLYDSDAVZKXHF9MAHAJICBMJTZZHTQTCACVQAUSSCFUMGCSJTONNKX'
b'FINPOAXQIKSJ9GUV9GXM9KYDCDWUHULIJMSKMOLDZBYE9FTGFMKLODKHFF9YUCPT'
b'YFFM9EDCJDCKRFLZUHGGYNYFJLBFWXCIUF9HMGUQKPUCJ9OQ99FXHSUSRRBEUSSC'
b'KCYPIEAFZJQNXEUYWLEXKZWLRINBEGAZTJMYTUEQTTORMIIQASISHSHZDQJXANFL'
b'KOIRUEJUPZZHUJFWHEXFIZ9OU99SQLDDNLARDFPGYSCMXQCMGPRB9QLM99QUBLTL'
b'TKWYXHVAFUVVAMHEYCCNVEITSPVQWMSEIZJSLPWNGWISKWQNXCNRNOIGRGUHGYWL'
b'OFNXBDCT9JLA9CEKW9BFGOESKGOQLJBTLUMOICBEZDHCR9SZCJUZVXIEAVITFJFD'
b'GNJII9LSW9IQKV99UJWWAACGIRPCZUENXGILUXCMJIGW9REUNA99MWSANWL9KVKK'
b'XCKXLRGDT9NXIGQVZWG9NBQPOQKEEET9ZUSENFPGFDNNHGBITCPASGHOPBNYKKEH'
b'KHVATNVWX9ZGTISUKPKTMWMPCGVVJSGMRJWNFICSFUAVAHIZWA9PDOIXFJGWCPTZ'
b'HUDDUFJVQPBYNJREQ99UHOESTT9FELDMVK9VHZYPRVOWEW9NXTCYDCIMT9UIWGXU'
b'FYILOPOCJFVVEJEJN9ULGXIABFJWWRKAD9NHZBULMWUKESZLCPRQVVKWOHEWSTLO'
b'FNA9KNERURWJPROBBXEWICDKKCQXWYMJUCQLWEUPFXRSNMIJWQUEJUNIKDYJILXC'
b'GCLFETWOZYIUZVJVYVB9YGXSSDXYXSJXTOQZ9CCCAKMCNNKQCYEDGSGTBICCOGEH'
b'RIVMICUQPUUFRFCBF9NUUWSQBTVIYFVWAASTQJZFDDWWUUIHPKTIIVAGGIEQCZUE'
b'VOFDMQLDESMQDPQUSOOKZJ9QLXTAFPXXILFHFUIFJTKSEHXXZBPTZUGLYUZNORFO'
b'EKQDEIWGXZPBXSOGGQFILUJTKDLWVKPVISU9QOATYVKJHLDLOKROZNFAGS9CICXX'
b'IUQQVLLRPPPDYJVSCW9OWIHKADCVSKPWTENYEWQWEHP9DDWOUJDWSTSOGYQPALFM'
b'KCTUGLSXHNYETTMYTS999SYQVQSPHQPKRJSUY9QTABAJOJAAMGVBCSLAAOBXZOJZ'
b'LIFXUYOVXBKHPFVTKKGSIHUXMBDTMGNVL9NXYCHOVTLGDICIWTCIGNRHLBZBVSXM'
b'PBFAWIXPCDJWNDUFHUVLBSPBWICZNYIUJPRRTOCSSCVPNBXEDCMHKFVDMHJTSP9J'
b'I9BXTD9ZILEEOCBMHCQRRDNL9EUKJGJ9MPQGQU9ZFYGVSNOYAEC9NWTCVEJBSXLY'
b'WTUPMXNAAWXSBIAJYSGYHGLYOMAHFTYMICZRDZTQXHAQGVXENKIGW9XZTPBAIMZL'
b'HWAJCGY9ZDNQOTGDRCTXSJCEJVTTMVRYYKWAFYSV9WVEVCFAXJKJNUC9NQHPEXWI'
b'OHOJQEXJNLEW9GLO9AJCJXIEXDONOGKXFJ9OXXXETUEHLBXAJGFPHKAQDCRTKQBX'
b'PZYQZBQODTVIBUTSAEXMBFBMTAXOQZCOHWEWRJEKNKHZXXSO9USZRWUPZAASWDBX'
b'OVAEGSAGYDIOZWSSEAIQVRWFDSOXSRRRQHRCWDJWZXXJOGPZRLKQOA9DOY9RXZNW'
b'BFJTKUOVRRQNSDUOFGCUQNHOBMJSFQZXVBPHHBRRIXZNLXAH9P9EFMGRPGSCFRZI'
b'NEPOQPXPKHTSRJWARXRGJGYMTPUKQISLV9GUC9VTJLOISKGUZCTZEYNDTURLBPXG'
b'NQLVXHAHUVNGIHVMZOHLEUBDTRFXFXXVRYBRUF9ULNMSZZOZBYDJUWTMHVHE9EEB'
b'QYSNWECSPAJHGLTEUCXALBRVTKMWSWCBPUMZFVSEEFIHBAGJVVQV9QLFEGGYVPNS'
b'DOBZEQGLEFLCQVPDJA9MQDRHYNVZVNTYNJ9GJCXKED9NEWTD9RVMNA9HOHUBLLAS'
b'NQSDLDZKOMFOEGBJZPYVYZCVHYFEGSVEHSWV9WAGMEQIUDZQZUACWYQLTD9LHBVK'
b'KNXXXDWQUWRJKTCDP9CEJOHLLPTWCIKKHHIFAFFDVMFZR9A9LYVMTQAPAXAVPJOZ'
b'KWFQNAJTO99TESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVW999999999999999999999999999999999999'
b'999999999999999999NYBKIVD99B99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Input #1, Part 1 of 2
TryteString(
b'OGTAZHXTC9FFCADHPLXKNQPKBWWOJGDCEKSHUPGLOFGXRNDRUWGKN9TYYKWVEWWG'
b'HMNUXBJTOBKZFDNJEZUKCKWGUHVSU9ZJYAVSQSOFDCOIEP9LCXYLTEFMCYUJAAHL'
b'YUHQP99S9XRWHXHRPZCWHDMIDYW9OQAWUPTFMBTJGDCWRVNVRDPIWISVYNUDWUGB'
b'PNNFZDWRVZ9FGAVSEWFXRXGGLXJTPJTJLC9JYHMFBKYAUJRAMHQHKUUZHRWZIVC9'
b'KFEEXXVNEXJRYUSFV9PEPFUDCNRRTSCZXSTUEGJKDV9UCYNZSBRDYGOKFGYKWVFC'
b'YSWBUJYVGEUXWTDGPWTWURH9RKEZRFCUUBFBPKSFONMDXWGYKWAUWVUOQVBIGQMM'
b'KQVDYAZ9SVFIUUNMHOJGRQVXZGIIPKVNNBKABGKZLRNFK9KSIHTCGYPVCWYGDS9O'
b'IZWLNINYRLGJQCUBWYMAVDWFAURLALQPMRMFRAZCMCPOWM99SGBVEZPAFAXHXNEN'
b'NWXLF9ZVHZIDWBLFKVWKBUYNBXOXTVPDWAGZXIOMDAEKNMRFGZVIGIFOSHGMPIPW'
b'NOWQDMHPKOJTYYECKNGCDDTJVALGPZSX9IH9LEGQSDACLBWKNXUW9BAZSHAISUJD'
b'TPJDOASLVRXFNJJHXQTKMKZUZIMJFPOKHEQXSCJQH9JPRNZHDVVZKWTHWWFNFMHF'
b'XPUIEEA9HPHJTCJJWZPUHKAAWJQQSAIF9HRETYYPXAZ9YCFJRCXTGCOLJQA9HDLF'
b'NTVDMYPRCYPQR9MNBBAMGOJXPRFCUSIIZN9VROZDPMOKZBCILKGB9EPCXOYWLPHF'
b'XTYBCMLRVHWNQDSQUIHHTAUTZCJFQ9CO9GTONKYKMDBSREZC9SUBHYK9JDOBYDBU'
b'BUIO9TRXQLAYHDDSXGJ9NB9FKMUUUS9GANWVMQLIHX9MPJGLTAOMCZTQYDYVOWXH'
b'GHYCV9VDCXHGTCOOUEXIITVKHXCSUSOIRTMEAKMTYZCMAWURNX9JOVDICICKHXQY'
b'BXKWTXWXBZVZWRIDC9YCZVSKYIKJYYMFYQRTWBNJHWXRL9JFSZAXJYYTGDYLTHLW'
b'RMBUEG9QTGNRPVTBGRYFPEJQSIWTLPGV9CCMCO9TCKLKSJEAMFKQMXEYETISVEYD'
b'OSCRZ99RFDPUQPHMQ9NVRUBXITDGFZCYQNFCSULGRHPONWJDVWT9UELEKEPQEAFK'
b'DLDNYPABC9GUASVFJBFZF9Z9CHIUNLJWHKGDYKADLUCRNEPAIWYSX9LT9QWQRKU9'
b'WEVDPKSTSA9PPEVNTBNLN9ZOPETINXGKA9DCOHPDQMMOOOCKYVEZJ9ZJQRJHNCKR'
b'FDRPHUVPGVGQYKZBLOILZTPIX9MIBKTXOJKVAYRLSXDTOEEKLF9WWZGLSGIOQZWC'
b'JJHSBTXYWRDYVEQTCNUENYWDRLZZIVTGCXEAJDRY9OVMXJGCSQSGYFLGYDZUH9EH'
b'UDQTCXLSDPMNDYQRZYRXYXKY9GIYOSIDQPXXHKJKDQLSCUY9FFBTPSTJFEFROCEX'
b'FFYTFYHQROAVTYKQOCOQQWBN9RKJ9JJEURKTVOECYRITTYKNOGCD9OPQ9WDMKRPI'
b'UNRAVUSLFMC9WZWHSESGLDUYHVPAX9YJOFTTFSKFHTOOQQRCPYZKTDVCUZGBOBZK'
b'LVBVBCWTUS9XOBJADZYN9TMLGCKXEXFEQFQ9VDFKWVEWV9WGXPJHUBWYXGECBPQO'
b'POHG9YCVXDWOXTEAOFBCEEAV9JCHUVLIRIMHXMUSZPOMMRBF9PLVLRJYTXTBANBZ'
b'WFQWGNGFGXFOZ9YGMQSZFEJHLFZTTVHRLJPATA9TYCM9LSEWMNEUDNWQFLUXOFUN'
b'VDKSNIIXCXVUYZZOKVYNNQDZVUQEQFWVF9EIQELSWDJXGMQRVUGGVBMRVGXBBPBE'
b'BDVGZDBWMDMLPXYJBBRNOMKGPMCG9FTSLMRADFVPUTTEIOUCBLPRYZHGOICNC9BT'
b'9WHJJJPDOSOMLD9EKRGKYUHUMMCAVHGYWOVQXFLTCXAAUDYKGKGKOYHLDCCQSKNH'
b'JHPSXTJVTW9QPFOQ9FDZIDDKIVF9CDYGU9ABRESMDLIBONAQWFVGCNOTEDHBMCSU'
b'H9GKYZPBX9QJELYYMSGDFU9EVTROODXVUAELBUKKXCDYNMHYBVAVUYGABCRIYOHV'
b'ITGYROZZNQPTESTVALUETHREE9DONTUSEINPRODUCTION99999NUMQE9RGHNRRSK'
b'KAOSD9WEYBHIUM9LWUWKEFSQOCVWVX9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99A99999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
# Spend transaction, Part 1 of 1
TryteString(
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUETWO9DONTUSEINPRODUCTION99999XYYNXZLKBYNFPXA9'
b'RUGZVEGVPLLFJEM9ZZOUINE9ONOWOB9999999999999999999999999999999999'
b'999999999999999999NYBKIVD99999999999C99999999VEUNVMI9BSZTFZMGEZJ'
b'CPMPOTRTUR9PSISHCXAESJQU9CEYAGXVHBAXAFRWHQNAFHGNID9BAOMKSJJDEO99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
],
},
)
def test_pass_message_short(self):
"""
Adding a message to a transaction.
"""
response = self.command(
seed = Seed.random(),
transfers = [
ProposedTransaction(
tag = Tag(b'PYOTA9UNIT9TESTS9'),
value = 0,
address = Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999YMSWGX'
b'VNDMLXPT9HMVAOWUUZMLSJZFWGKDVGXPSQAWAEBJN'
),
message = TryteString.from_string('สวัสดีชาวโลก!'),
),
],
)
self.assertDictEqual(
response,
{
'trytes': [
TryteString(
# Note that the tryte sequence starts with the transaction
# message.
b'HHVFHFHHVFEFHHVFOFHHVFHFHHVFMEHHVFSFHHVFCEHHVFPFHHVFEFHHWFVDHHVF'
b'CFHHVFUDFA999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUE9DONTUSEINPRODUCTION99999YMSWGXVNDMLXPT9HMVA'
b'OWUUZMLSJZFWGKDVGXPSQAWAEBJN999999999999999999999999999PYOTA9UNI'
b'T9TESTS99999999999NYBKIVD99999999999999999999D9XYVJTKVWN9RUQAPIO'
b'JUXXTOQTWNMOKRKLUURUGERIIZLUURHPQWZMSYROAKYLZJEKSAMLRCVWEDINFK99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
],
},
)
def test_pass_message_long(self):
"""
The message is too long to fit into a single transaction.
"""
response = self.command(
seed = Seed.random(),
transfers = [
ProposedTransaction(
tag = Tag(b'PYOTA9UNIT9TESTS9'),
value = 0,
address = Address(
b'TESTVALUE9DONTUSEINPRODUCTION99999YMSWGX'
b'VNDMLXPT9HMVAOWUUZMLSJZFWGKDVGXPSQAWAEBJN'
),
message = TryteString.from_string(
'Вы не можете справиться правду! Сын, мы живем в мире, который '
'имеет стены. И эти стены должны быть охраняют люди с оружием. '
'Кто будет это делать? Вы? Вы, лейтенант Weinberg? У меня есть '
'большая ответственность, чем вы можете понять. Ты плачешь '
'Сантьяго и прокляни морских пехотинцев. У вас есть такой роскоши. '
'У вас есть роскошь, не зная, что я знаю: что смерть Сантьяго, в '
'то время как трагический, вероятно, спас жизнь. И мое '
'существование, в то время как гротеск и непонятными для вас, '
'спасает жизни ... Вы не хотите знать правду. Потому что в '
'глубине души, в тех местах, вы не говорите о на вечеринках, вы '
'хотите меня на этой стене. Вы должны меня на этой стене. Мы '
'используем такие слова, как честь, код, верность ... мы '
'используем эти слова в качестве основы к жизни провел, защищая '
'что-то. Вы можете использовать им, как пуанта. У меня нет ни '
'времени, ни желания, чтобы объясниться с человеком, который '
'поднимается и спит под одеялом самой свободы я обеспечиваю, то '
'ставит под сомнение то, каким образом я предоставить ему! Я бы '
'предпочел, чтобы вы просто сказал спасибо и пошел на своем пути. '
'В противном случае, я предлагаю вам подобрать оружие и встать '
'пост. В любом случае, я не наплевать, что вы думаете, что вы '
'имеете право!'
),
),
],
)
self.assertDictEqual(
response,
{
'trytes': [
# The message is so long that it has to be split across three
# separate transactions!
TryteString(
b'EASGBGTGTDSGNFSGPFSGAGFA9999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'99999999999TESTVALUE9DONTUSEINPRODUCTION99999YMSWGXVNDMLXPT9HMVA'
b'OWUUZMLSJZFWGKDVGXPSQAWAEBJN999999999999999999999999999PYOTA9UNI'
b'T9TESTS99999999999NYBKIVD99B99999999B99999999YJVDLFI9FFXKNVTUKHO'
b'PTZUWZPOTRTHNZ9YZDXFRVBAUGO9APIQQWFSCLGFQMLMVCEPCTBFAVMIIXHUPG99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
TryteString(
b'FSG9GTGHEEASG9GSGNFEATGFETGVDSGAGSGWFEATGUDTGVDSGSFSG9GSGSFSAEAS'
b'GKETGDEEASGRFSGAGSGYFSGTFSG9GTGDEEASGZFSGSFSG9GTGHEEASG9GSGNFEAT'
b'GFETGVDSGAGSGWFEATGUDTGVDSGSFSG9GSGSFSAEASGUETGDEEASGVFTGUDSGBGS'
b'GAGSGYFTGEESGUFTGWDSGSFSGZFEATGVDSGNFSGXFSGVFSGSFEATGUDSGYFSGAGS'
b'GPFSGNFQAEASGXFSGNFSGXFEATG9ESGSFTGUDTGVDTGEEQAEASGXFSGAGSGRFQAE'
b'ASGPFSGSFTGTDSG9GSGAGTGUDTGVDTGEEEASASASAEASGZFTGDEEASGVFTGUDSGB'
b'GSGAGSGYFTGEESGUFTGWDSGSFSGZFEATGFETGVDSGVFEATGUDSGYFSGAGSGPFSGN'
b'FEASGPFEASGXFSGNFTG9ESGSFTGUDTGVDSGPFSGSFEASGAGTGUDSG9GSGAGSGPFT'
b'GDEEASGXFEASGTFSGVFSGUFSG9GSGVFEASGBGTGTDSGAGSGPFSGSFSGYFQAEASGU'
b'FSGNFTGBESGVFTGBESGNFTGHEEATG9ETGVDSGAGRATGVDSGAGSAEASGKETGDEEAS'
b'GZFSGAGSGTFSGSFTGVDSGSFEASGVFTGUDSGBGSGAGSGYFTGEESGUFSGAGSGPFSGN'
b'FTGVDTGEEEASGVFSGZFQAEASGXFSGNFSGXFEASGBGTGWDSGNFSG9GTGVDSGNFSAE'
b'ASGAFEASGZFSGSFSG9GTGHEEASG9GSGSFTGVDEASG9GSGVFEASGPFTGTDSGSFSGZ'
b'FSGSFSG9GSGVFQAEASG9GSGVFEASGTFSGSFSGYFSGNFSG9GSGVFTGHEQAEATG9ET'
b'GVDSGAGSGOFTGDEEASGAGSGOFTGCETGHETGUDSG9GSGVFTGVDTGEETGUDTGHEEAT'
b'GUDEATG9ESGSFSGYFSGAGSGPFSGSFSGXFSGAGSGZFQAEASGXFSGAGTGVDSGAGTGT'
b'DTGDESGWFEASGBGSGAGSGRFSG9GSGVFSGZFSGNFSGSFTGVDTGUDTGHEEASGVFEAT'
b'GUDSGBGSGVFTGVDEASGBGSGAGSGRFEASGAGSGRFSGSFTGHESGYFSGAGSGZFEATGU'
b'DSGNFSGZFSGAGSGWFEATGUDSGPFSGAGSGOFSGAGSGRFTGDEEATGHEEASGAGSGOFS'
b'GSFTGUDSGBGSGSFTG9ESGVFSGPFSGNFTGGEQAEATGVDSGAGEATGUDTGVDSGNFSGP'
b'FSGVFTGVDEASGBGSGAGSGRFEATGUDSGAGSGZFSG9GSGSFSG9GSGVFSGSFEATGVDS'
b'GAGQAEASGXFSGNFSGXFSGVFSGZFEASGAGSGOFTGTDSGNFSGUFSGAGSGZFEATGHEE'
b'ASGBGTGTDSGSFSGRFSGAGTGUDTGVDSGNFSGPFSGVFTGVDTGEEEASGSFSGZFTGWDF'
b'AEASGMFEASGOFTGDEEASGBGTGTDSGSFSGRFSGBGSGAGTG9ESGSFSGYFQAEATG9ET'
b'GVDSGAGSGOFTGDEEASGPFTGDEEASGBGTGTDSGAGTGUDTGVDSGAGEATGUDSGXFSGN'
b'FSGUFSGNFSGYFEATGUDSGBGSGNFTGUDSGVFSGOFSGAGEASGVFEASGBGSGAGTGAES'
b'GSFSGYFEASG9GSGNFEATGUDSGPFSGAGSGSFSGZFEASGBGTGWDTGVDSGVFSAEASGK'
b'EEASGBGTGTDSGAGTGVDSGVFSGPFSG9GSGAGSGZFEATGUDSGYFTGWDTG9ESGNFSGS'
b'FQAEATGHEEASGBGTGTDSGSFSGRFSGYFSGNFSGQFSGNFTGGEEASGPFSGNFSGZFEAS'
b'GBGSGAGSGRFSGAGSGOFTGTDSGNFTGVDTGEEEASGAGTGTDTGWDSGTFSGVFSGSFEAS'
b'GVFEASGPFTGUDTGVDSGNFTGVDTGEEEASGBGSGAGTGUDTGVDSAEASGKEEASGYFTGG'
b'ESGOFSGAGSGZFEATGUDSGYFTGWDTG9ESGNFSGSFQAEATGHEEASG9GSGSFEASG9GS'
b'GNFSGBGSGYFSGSFSGPFSGNFTGVDTGEEQAEATG9ETGVDSGAGEASGPFTGDEEASGRFT'
b'GWDSGZFSGNFSGSFTGVDSGSFQAEATG9ETGVDSGAGEASGPFTGDEEASGVFSGZFSGSFS'
b'GSFTGVDSGSFTESTVALUE9DONTUSEINPRODUCTION99999YMSWGXVNDMLXPT9HMVA'
b'OWUUZMLSJZFWGKDVGXPSQAWAEBJN999999999999999999999999999PYOTA9UNI'
b'T9TESTS99999999999NYBKIVD99A99999999B99999999YJVDLFI9FFXKNVTUKHO'
b'PTZUWZPOTRTHNZ9YZDXFRVBAUGO9APIQQWFSCLGFQMLMVCEPCTBFAVMIIXHUPG99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
TryteString(
b'SGKETGDEEASG9GSGSFEASGZFSGAGSGTFSGSFTGVDSGSFEATGUDSGBGTGTDSGNFSG'
b'PFSGVFTGVDTGEETGUDTGHEEASGBGTGTDSGNFSGPFSGRFTGWDFAEASGZETGDESG9G'
b'QAEASGZFTGDEEASGTFSGVFSGPFSGSFSGZFEASGPFEASGZFSGVFTGTDSGSFQAEASG'
b'XFSGAGTGVDSGAGTGTDTGDESGWFEASGVFSGZFSGSFSGSFTGVDEATGUDTGVDSGSFSG'
b'9GTGDESAEASGQEEATGFETGVDSGVFEATGUDTGVDSGSFSG9GTGDEEASGRFSGAGSGYF'
b'SGTFSG9GTGDEEASGOFTGDETGVDTGEEEASGAGTGYDTGTDSGNFSG9GTGHETGGETGVD'
b'EASGYFTGGESGRFSGVFEATGUDEASGAGTGTDTGWDSGTFSGVFSGSFSGZFSAEASGSETG'
b'VDSGAGEASGOFTGWDSGRFSGSFTGVDEATGFETGVDSGAGEASGRFSGSFSGYFSGNFTGVD'
b'TGEEIBEASGKETGDEIBEASGKETGDEQAEASGYFSGSFSGWFTGVDSGSFSG9GSGNFSG9G'
b'TGVDEAFCTCXCBDQCTCFDVCIBEASGAFEASGZFSGSFSG9GTGHEEASGSFTGUDTGVDTG'
b'EEEASGOFSGAGSGYFTGEETGAESGNFTGHEEASGAGTGVDSGPFSGSFTGVDTGUDTGVDSG'
b'PFSGSFSG9GSG9GSGAGTGUDTGVDTGEEQAEATG9ESGSFSGZFEASGPFTGDEEASGZFSG'
b'AGSGTFSGSFTGVDSGSFEASGBGSGAGSG9GTGHETGVDTGEESAEASG9FTGDEEASGBGSG'
b'YFSGNFTG9ESGSFTGAETGEEEASGZESGNFSG9GTGVDTGEETGHESGQFSGAGEASGVFEA'
b'SGBGTGTDSGAGSGXFSGYFTGHESG9GSGVFEASGZFSGAGTGTDTGUDSGXFSGVFTGYDEA'
b'SGBGSGSFTGYDSGAGTGVDSGVFSG9GTGZDSGSFSGPFSAEASGAFEASGPFSGNFTGUDEA'
b'SGSFTGUDTGVDTGEEEATGVDSGNFSGXFSGAGSGWFEATGTDSGAGTGUDSGXFSGAGTGAE'
b'SGVFSAEASGAFEASGPFSGNFTGUDEASGSFTGUDTGVDTGEEEATGTDSGAGTGUDSGXFSG'
b'AGTGAETGEEQAEASG9GSGSFEASGUFSG9GSGNFTGHEQAEATG9ETGVDSGAGEATGHEEA'
b'SGUFSG9GSGNFTGGEDBEATG9ETGVDSGAGEATGUDSGZFSGSFTGTDTGVDTGEEEASGZE'
b'SGNFSG9GTGVDTGEETGHESGQFSGAGQAEASGPFEATGVDSGAGEASGPFTGTDSGSFSGZF'
b'TGHEEASGXFSGNFSGXFEATGVDTGTDSGNFSGQFSGVFTG9ESGSFTGUDSGXFSGVFSGWF'
b'QAEASGPFSGSFTGTDSGAGTGHETGVDSG9GSGAGQAEATGUDSGBGSGNFTGUDEASGTFSG'
b'VFSGUFSG9GTGEESAEASGQEEASGZFSGAGSGSFEATGUDTGWDTGBESGSFTGUDTGVDSG'
b'PFSGAGSGPFSGNFSG9GSGVFSGSFQAEASGPFEATGVDSGAGEASGPFTGTDSGSFSGZFTG'
b'HEEASGXFSGNFSGXFEASGQFTGTDSGAGTGVDSGSFTGUDSGXFEASGVFEASG9GSGSFSG'
b'BGSGAGSG9GTGHETGVDSG9GTGDESGZFSGVFEASGRFSGYFTGHEEASGPFSGNFTGUDQA'
b'EATGUDSGBGSGNFTGUDSGNFSGSFTGVDEASGTFSGVFSGUFSG9GSGVFEASASASAEASG'
b'KETGDEEASG9GSGSFEATGYDSGAGTGVDSGVFTGVDSGSFEASGUFSG9GSGNFTGVDTGEE'
b'EASGBGTGTDSGNFSGPFSGRFTGWDSAEASGXESGAGTGVDSGAGSGZFTGWDEATG9ETGVD'
b'SGAGEASGPFEASGQFSGYFTGWDSGOFSGVFSG9GSGSFEASGRFTGWDTGAESGVFQAEASG'
b'PFEATGVDSGSFTGYDEASGZFSGSFTGUDTGVDSGNFTGYDQAEASGPFTGDEEASG9GSGSF'
b'EASGQFSGAGSGPFSGAGTGTDSGVFTGVDSGSFEASGAGEASG9GSGNFEASGPFSGSFTG9E'
b'SGSFTGTDSGVFSG9GSGXFSGNFTGYDQAEASGPFTGDEEATGYDSGAGTGVDSGVFTGVDSG'
b'SFEASGZFSGSTESTVALUE9DONTUSEINPRODUCTION99999YMSWGXVNDMLXPT9HMVA'
b'OWUUZMLSJZFWGKDVGXPSQAWAEBJN999999999999999999999999999PYOTA9UNI'
b'T9TESTS99999999999NYBKIVD99999999999B99999999YJVDLFI9FFXKNVTUKHO'
b'PTZUWZPOTRTHNZ9YZDXFRVBAUGO9APIQQWFSCLGFQMLMVCEPCTBFAVMIIXHUPG99'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999999999999999999'
b'9999999999999999999999999999999999999999999999999'
),
],
},
)
class MockSignatureFragmentGenerator(object):
"""
Mocks the behavior of ``SignatureFragmentGenerator`` to speed up unit
tests.
Note that ``SignatureFragmentGenerator`` already has its own test
case, so this approach does not compromise test integrity.
References:
- :py:class:`cornode.crypto.signing.SignatureFragmentGenerator`
- :py:meth:`cornode.transaction.ProposedBundle.sign_inputs`
"""
def __init__(
self,
fragments = None,
length = AddressGenerator.DIGEST_ITERATIONS
):
# type: (Optional[Iterable[TryteString]], int) -> None
"""
:param fragments:
Provide fragments to seed (shortcut for calling :py:meth:`seed`).
:param length:
Length that the generator will report to the bundle, used to
ensure that it iterates the correct number of times.
"""
super(MockSignatureFragmentGenerator, self).__init__()
self.fragments = list(fragments or []) # type: List[TryteString]
self.length = length
def __iter__(self):
return self
def __len__(self):
return self.length
def seed(self, fragment):
# type: (TryteString) -> None
self.fragments.append(fragment)
# noinspection PyUnusedLocal
def __next__(self):
# type: (TryteString) -> TryteString
return self.fragments.pop(0)
if PY2:
next = __next__
| 56.619233
| 90
| 0.779283
| 5,789
| 159,553
| 21.42408
| 0.17706
| 0.256281
| 0.260224
| 0.470635
| 0.902495
| 0.898391
| 0.896843
| 0.893488
| 0.889691
| 0.886337
| 0
| 0.292419
| 0.182748
| 159,553
| 2,817
| 91
| 56.639333
| 0.658706
| 0.031676
| 0
| 0.851928
| 0
| 0
| 0.723549
| 0.709037
| 0
| 1
| 0
| 0
| 0.011895
| 1
| 0.014356
| false
| 0.005742
| 0.005332
| 0.001641
| 0.02379
| 0.00041
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
df8c48bd1ff2245dad0dc0595cba7623da3a3c58
| 11,749
|
py
|
Python
|
tests/pytests/unit/config/test__validate_opts.py
|
haodeon/salt
|
af2964f4ddbf9c5635d1528a495e473996cc7b71
|
[
"Apache-2.0"
] | null | null | null |
tests/pytests/unit/config/test__validate_opts.py
|
haodeon/salt
|
af2964f4ddbf9c5635d1528a495e473996cc7b71
|
[
"Apache-2.0"
] | null | null | null |
tests/pytests/unit/config/test__validate_opts.py
|
haodeon/salt
|
af2964f4ddbf9c5635d1528a495e473996cc7b71
|
[
"Apache-2.0"
] | null | null | null |
"""
Test config option type enforcement
"""
import pytest
import salt.config
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], True), # list
((1, 2, 3), True), # tuple
({"key": "value"}, False), # dict
("str", False), # str
(True, False), # bool
(1, False), # int
(0.123, False), # float
(None, False), # None
],
)
def test_list_types(option_value, expected):
"""
List and tuple type config options return True when the value is a list. All
other types return False
modules_dirs is a list type config option
"""
result = salt.config._validate_opts({"module_dirs": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", True), # str
(True, True), # bool
(1, True), # int
(0.123, True), # float
(None, True), # None
],
)
def test_str_types(option_value, expected):
"""
Str, bool, int, float, and none type config options return True when the
value is a str. All other types return False
user is a str type config option
"""
result = salt.config._validate_opts({"user": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, True), # dict
("str", False), # str
(True, False), # bool
(1, False), # int
(0.123, False), # float
(None, False), # None
],
)
def test_dict_types(option_value, expected):
"""
Dict type config options return True when the value is a dict. All other
types return False
file_roots is a dict type config option
"""
result = salt.config._validate_opts({"file_roots": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", False), # str
(True, True), # bool
(1, False), # int
(0.123, False), # float
(None, False), # None
],
)
def test_bool_types(option_value, expected):
"""
Bool type config options return True when the value is a bool. All other
types return False
local is a bool type config option
"""
result = salt.config._validate_opts({"local": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", False), # str
(True, False), # bool
(1, True), # int
(0.123, False), # float
(None, False), # None
],
)
def test_int_types(option_value, expected):
"""
Int type config options return True when the value is an int. All other
types return False
publish_port is an int type config option
"""
result = salt.config._validate_opts({"publish_port": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", False), # str
(True, False), # bool
(1, True), # int
(0.123, True), # float
(None, False), # None
],
)
def test_float_types(option_value, expected):
"""
Float and int type config options return True when the value is a float. All
other types return False
ssh_timeout is a float type config option
"""
result = salt.config._validate_opts({"ssh_timeout": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", True), # str
(True, True), # bool
(1, True), # int
(0.123, True), # float
(None, True), # None
],
)
def test_none_str_types(option_value, expected):
"""
Some config settings have two types, None and str. In that case str, bool,
int, float, and None type options should evaluate as True. All others should
return False.
saltenv is a None, str type config option
"""
result = salt.config._validate_opts({"saltenv": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", False), # str
(True, False), # bool
(1, True), # int
(0.123, False), # float
(None, True), # None
],
)
def test_none_int_types(option_value, expected):
"""
Some config settings have two types, None and int, which should evaluate as
True. All others should return False.
retry_dns_count is a None, int type config option
"""
result = salt.config._validate_opts({"retry_dns_count": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", False), # str
(True, True), # bool
(1, False), # int
(0.123, False), # float
(None, True), # None
],
)
def test_none_bool_types(option_value, expected):
"""
Some config settings have two types, None and bool which should evaluate as
True. All others should return False.
ipv6 is a None, bool type config option
"""
result = salt.config._validate_opts({"ipv6": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], True), # list
((1, 2, 3), True), # tuple
({"key": "value"}, False), # dict
("str", True), # str
(True, True), # bool
(1, True), # int
(0.123, True), # float
(None, True), # None
],
)
def test_str_list_types(option_value, expected):
"""
Some config settings have two types, str and list. In that case, list,
tuple, str, bool, int, float, and None should evaluate as True. All others
should return False.
master is a str, list type config option
"""
result = salt.config._validate_opts({"master": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", True), # str
(True, True), # bool
(1, True), # int
(0.123, True), # float
(None, True), # None
],
)
def test_str_int_types(option_value, expected):
"""
Some config settings have two types, str and int. In that case, str, bool,
int, float, and None should evaluate as True. All others should return
False.
master_port is a str, int type config option
"""
result = salt.config._validate_opts({"master_port": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, True), # dict
("str", True), # str
(True, True), # bool
(1, True), # int
(0.123, True), # float
(None, True), # None
],
)
def test_str_dict_types(option_value, expected):
"""
Some config settings have two types, str and dict. In that case, dict, str,
bool, int, float, and None should evaluate as True. All others should return
False.
id_function is a str, dict type config option
"""
result = salt.config._validate_opts({"id_function": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], True), # list
((1, 2, 3), True), # tuple
({"key": "value"}, False), # dict
("str", True), # str
(True, True), # bool
(1, True), # int
(0.123, True), # float
(None, True), # None
],
)
def test_str_tuple_types(option_value, expected):
"""
Some config settings have two types, str and tuple. In that case, list,
tuple, str, bool, int, float, and None should evaluate as True. All others
should return False.
log_fmt_logfile is a str, tuple type config option
"""
result = salt.config._validate_opts({"log_fmt_logfile": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, False), # dict
("str", True), # str
(True, True), # bool
(1, True), # int
(0.123, True), # float
(None, True), # None
],
)
def test_str_bool_types(option_value, expected):
"""
Some config settings have two types, str and bool. In that case, str, bool,
int, float, and None should evaluate as True. All others should return
False.
update_url is a str, bool type config option
"""
result = salt.config._validate_opts({"update_url": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, True), # dict
("str", False), # str
(True, True), # bool
(1, False), # int
(0.123, False), # float
(None, False), # None
],
)
def test_dict_bool_types(option_value, expected):
"""
Some config settings have two types, dict and bool which should evaluate as
True. All others should return False.
token_expire_user_override is a dict, bool type config option
"""
result = salt.config._validate_opts({"token_expire_user_override": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], True), # list
((1, 2, 3), True), # tuple
({"key": "value"}, True), # dict
("str", False), # str
(True, False), # bool
(1, False), # int
(0.123, False), # float
(None, False), # None
],
)
def test_dict_list_types(option_value, expected):
"""
Some config settings have two types, dict and list. In that case, list,
tuple, and dict should evaluate as True. All others should return False.
nodegroups is a dict, list type config option
"""
result = salt.config._validate_opts({"nodegroups": option_value})
assert result is expected
@pytest.mark.parametrize(
"option_value,expected",
[
([1, 2, 3], False), # list
((1, 2, 3), False), # tuple
({"key": "value"}, True), # dict
("str", False), # str
(True, True), # bool
(1, False), # int
(0.123, False), # float
(None, True), # None
],
)
def test_dict_bool_none_types(option_value, expected):
"""
Some config settings have three types, dict, bool, and None which should
evaluate as True. All others should return False.
ssl is a dict, bool type config option
"""
result = salt.config._validate_opts({"ssl": option_value})
assert result is expected
| 29.153846
| 85
| 0.569495
| 1,497
| 11,749
| 4.363393
| 0.057448
| 0.085885
| 0.098898
| 0.031843
| 0.90891
| 0.864513
| 0.857012
| 0.848285
| 0.827771
| 0.710196
| 0
| 0.022692
| 0.291089
| 11,749
| 402
| 86
| 29.226368
| 0.761556
| 0.309133
| 0
| 0.744526
| 0
| 0
| 0.094589
| 0.050668
| 0
| 0
| 0
| 0
| 0.062044
| 1
| 0.062044
| false
| 0
| 0.007299
| 0
| 0.069343
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
10dd85de61a14354997831fa97c21ade769fa5d0
| 20,767
|
py
|
Python
|
QuestionnaireColorCueNew/migrations/0002_auto_20200709_1755.py
|
AdityaKapoor74/Supervised_Categorization_Study
|
1ce29de95c8ccc2b848e2d06463719858e57b942
|
[
"MIT"
] | null | null | null |
QuestionnaireColorCueNew/migrations/0002_auto_20200709_1755.py
|
AdityaKapoor74/Supervised_Categorization_Study
|
1ce29de95c8ccc2b848e2d06463719858e57b942
|
[
"MIT"
] | null | null | null |
QuestionnaireColorCueNew/migrations/0002_auto_20200709_1755.py
|
AdityaKapoor74/Supervised_Categorization_Study
|
1ce29de95c8ccc2b848e2d06463719858e57b942
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.2 on 2020-07-09 17:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('QuestionnaireColorCueNew', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Classify_And_Learn_Samples_set1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Classify and Learn Samples Set 1',
},
),
migrations.CreateModel(
name='Classify_And_Learn_Samples_set2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Classify and Learn Samples Set 2',
},
),
migrations.CreateModel(
name='Classify_And_Learn_Samples_set3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Classify and Learn Samples Set 3',
},
),
migrations.CreateModel(
name='Classify_And_Learn_Samples_set4',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Classify and Learn Samples Set 4',
},
),
migrations.CreateModel(
name='Classify_And_Learn_Samples_set5',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Classify and Learn Samples Set 5',
},
),
migrations.CreateModel(
name='Common_Features_Test_set1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
],
options={
'verbose_name_plural': 'Common Features Test Samples Set 1',
},
),
migrations.CreateModel(
name='Common_Features_Test_set2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
],
options={
'verbose_name_plural': 'Common Features Test Samples Set 2',
},
),
migrations.CreateModel(
name='Common_Features_Test_set3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
],
options={
'verbose_name_plural': 'Common Features Test Samples Set 3',
},
),
migrations.CreateModel(
name='Common_Features_Test_set4',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
],
options={
'verbose_name_plural': 'Common Features Test Samples Set 4',
},
),
migrations.CreateModel(
name='Common_Features_Test_set5',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
],
options={
'verbose_name_plural': 'Common Features Test Samples Set 5',
},
),
migrations.CreateModel(
name='Observe_And_Learn_Samples_set1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Observe and Learn Samples Set 1',
},
),
migrations.CreateModel(
name='Observe_And_Learn_Samples_set2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Observe and Learn Samples Set 2',
},
),
migrations.CreateModel(
name='Observe_And_Learn_Samples_set3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Observe and Learn Samples Set 3',
},
),
migrations.CreateModel(
name='Observe_And_Learn_Samples_set4',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Observe and Learn Samples Set 4',
},
),
migrations.CreateModel(
name='Observe_And_Learn_Samples_set5',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Observe and Learn Samples Set 5',
},
),
migrations.CreateModel(
name='Test_set1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Test Samples Set 1',
},
),
migrations.CreateModel(
name='Test_set2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Test Samples Set 2',
},
),
migrations.CreateModel(
name='Test_set3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Test Samples Set 3',
},
),
migrations.CreateModel(
name='Test_set4',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Test Samples Set 4',
},
),
migrations.CreateModel(
name='Test_set5',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_img', models.ImageField(upload_to='images/')),
('sample_label', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'Test Samples Set 5',
},
),
migrations.CreateModel(
name='UserResponse_Common_Features_Test_set1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Common_Features_Test_set1')),
],
options={
'verbose_name_plural': 'User Response for Common Features Test phase set 1',
},
),
migrations.CreateModel(
name='UserResponse_Common_Features_Test_set2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Common_Features_Test_set2')),
],
options={
'verbose_name_plural': 'User Response for Common Features Test phase set 2',
},
),
migrations.CreateModel(
name='UserResponse_Common_Features_Test_set3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Common_Features_Test_set3')),
],
options={
'verbose_name_plural': 'User Response for Common Features Test phase set 3',
},
),
migrations.CreateModel(
name='UserResponse_Common_Features_Test_set4',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Common_Features_Test_set4')),
],
options={
'verbose_name_plural': 'User Response for Common Features Test phase set 4',
},
),
migrations.CreateModel(
name='UserResponse_Common_Features_Test_set5',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Common_Features_Test_set5')),
],
options={
'verbose_name_plural': 'User Response for Common Features Test phase set 5',
},
),
migrations.CreateModel(
name='UserResponse_Test_set1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Test_set1')),
],
options={
'verbose_name_plural': 'User Response for Test phase set 1',
},
),
migrations.CreateModel(
name='UserResponse_Test_set2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Test_set2')),
],
options={
'verbose_name_plural': 'User Response for Test phase set 2',
},
),
migrations.CreateModel(
name='UserResponse_Test_set3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Test_set3')),
],
options={
'verbose_name_plural': 'User Response for Test phase set 3',
},
),
migrations.CreateModel(
name='UserResponse_Test_set4',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Test_set4')),
],
options={
'verbose_name_plural': 'User Response for Test phase set 4',
},
),
migrations.CreateModel(
name='UserResponse_Test_set5',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_option', models.CharField(default=None, max_length=10)),
('iteration', models.IntegerField(default=1)),
('quid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.Test_set5')),
],
options={
'verbose_name_plural': 'User Response for Test phase set 5',
},
),
migrations.CreateModel(
name='UserResponsesForDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, default=None, null=True)),
('set_number', models.CharField(blank=True, default=None, max_length=10, null=True)),
],
options={
'verbose_name_plural': 'User Responses for Description',
},
),
migrations.RemoveField(
model_name='setnumber',
name='user',
),
migrations.AlterModelOptions(
name='userdetails',
options={'verbose_name_plural': 'User Details'},
),
migrations.AddField(
model_name='userdetails',
name='set_num',
field=models.CharField(blank=True, default=None, max_length=10, null=True),
),
migrations.DeleteModel(
name='SetNumber',
),
migrations.AddField(
model_name='userresponsesfordescription',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_test_set5',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_test_set4',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_test_set3',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_test_set2',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_test_set1',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_common_features_test_set5',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_common_features_test_set4',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_common_features_test_set3',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_common_features_test_set2',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
migrations.AddField(
model_name='userresponse_common_features_test_set1',
name='user',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='QuestionnaireColorCueNew.UserDetails'),
),
]
| 48.071759
| 150
| 0.57967
| 2,011
| 20,767
| 5.774242
| 0.049229
| 0.05968
| 0.049604
| 0.066138
| 0.953841
| 0.951085
| 0.950052
| 0.913538
| 0.864967
| 0.861523
| 0
| 0.011127
| 0.294602
| 20,767
| 431
| 151
| 48.183295
| 0.781555
| 0.002167
| 0
| 0.684706
| 1
| 0
| 0.223263
| 0.092857
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004706
| 0
| 0.011765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
80524b063860be3e4320064a4064963c055499ef
| 37,826
|
py
|
Python
|
v3.0/framework/parsetab.py
|
dikujepsen/OpenTran
|
af9654fcf55e394e7bece38e59bbdc3dd343f092
|
[
"MIT"
] | 1
|
2015-02-09T12:56:07.000Z
|
2015-02-09T12:56:07.000Z
|
v3.0/framework/parsetab.py
|
dikujepsen/OpenTran
|
af9654fcf55e394e7bece38e59bbdc3dd343f092
|
[
"MIT"
] | null | null | null |
v3.0/framework/parsetab.py
|
dikujepsen/OpenTran
|
af9654fcf55e394e7bece38e59bbdc3dd343f092
|
[
"MIT"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = '\xcc\xb5(e\x9f\xabO\r,\xe6J\x922\x85M\xb7'
_lr_action_items = {'LOGOR':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,67,-87,-28,-32,-60,-59,-63,-62,-60,-31,67,-66,-53,-51,67,67,67,67,-33,67,67,67,-34,67,67,-35,67,67,-36,67,67,-30,67,-32,-52,-17,-50,67,]),'SHORT':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,5,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,5,5,-69,5,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,5,5,-50,-54,]),'RSHIFT':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,65,-87,-28,-32,-60,-59,-63,-62,-60,-31,65,-66,-53,-51,65,65,65,65,-33,65,65,65,-34,65,65,-35,65,65,-36,65,65,-30,65,-32,-52,-17,-50,65,]),'UNKNOWN':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,8,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,8,8,-69,8,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,8,8,-50,-54,]),'VOID':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,9,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,9,9,-69,9,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,9,9,-50,-54,]),'NE':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,77,-87,-28,-32,-60,-59,-63,-62,-60,-31,77,-66,-53,-51,77,77,77,77,-33,77,77,77,-34,77,77,-35,77,77,-36,77,77,-30,77,-32,-52,-17,-50,77,]),'CHAR':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,13,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,13,13,-69,13,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,13,13,-50,-54,]),'LOGNOT':([0,1,2,4,6,7,10,11,12,14,17,18,19,20,21,22,23,25,28,29,30,31,32,33,35,36,37,40,47,48,49,50,51,52,54,55,56,57,58,60,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,82,83,84,87,88,89,90,91,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,131,132,133,134,135,139,146,],[-89,14,-10,-2,-64,14,-5,-63,-31,-56,-61,-55,-11,-26,-27,-62,-9,-8,-84,-89,14,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-18,-19,14,-20,-21,-25,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,-69,14,14,-70,-66,-53,14,14,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,14,-17,14,14,-50,14,-54,]),'FLOAT_CONST':([0,1,2,4,6,7,10,11,12,14,17,18,19,20,21,22,23,25,28,29,30,31,32,33,35,36,37,40,47,48,49,50,51,52,54,55,56,57,58,60,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,82,83,84,87,88,89,90,91,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,131,132,133,134,135,139,146,],[-89,21,-10,-2,-64,21,-5,-63,-31,-56,-61,-55,-11,-26,-27,-62,-9,-8,-84,-89,21,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-18,-19,21,-20,-21,-25,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,-69,21,21,-70,-66,-53,21,21,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,21,-17,21,21,-50,21,-54,]),'LSHIFT':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,74,-87,-28,-32,-60,-59,-63,-62,-60,-31,74,-66,-53,-51,74,74,74,74,-33,74,74,74,-34,74,74,-35,74,74,-36,74,74,-30,74,-32,-52,-17,-50,74,]),'MINUS':([0,1,2,4,6,7,10,11,12,14,17,18,19,20,21,22,23,25,28,29,30,31,32,33,35,36,37,40,47,48,49,50,51,52,54,55,56,57,58,60,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,87,88,89,90,91,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,122,124,125,126,129,131,132,133,134,135,136,139,146,],[-89,18,-10,-2,-64,18,-5,-63,-31,-56,-61,-55,-11,-26,-27,-62,-9,-8,76,-89,18,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-18,-19,18,-20,-21,-25,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,-31,76,-69,18,18,-70,-66,-53,18,18,-51,76,76,76,76,-33,76,76,76,-34,76,76,-35,76,76,-36,76,76,-65,-30,76,-32,-67,-88,-52,18,-17,18,18,-50,76,18,-54,]),'DIVIDE':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,66,-87,-28,-32,-60,-59,-63,-62,-60,-31,66,-66,-53,-51,66,66,66,66,-33,66,66,66,-34,66,66,66,66,66,66,66,66,-30,66,-32,-52,-17,-50,66,]),'COMMENT':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,79,82,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,135,146,],[-89,19,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,19,-69,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,-50,-54,]),'INT_CONST':([0,1,2,4,6,7,10,11,12,14,17,18,19,20,21,22,23,25,28,29,30,31,32,33,35,36,37,40,47,48,49,50,51,52,54,55,56,57,58,60,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,82,83,84,87,88,89,90,91,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,131,132,133,134,135,139,146,],[-89,20,-10,-2,-64,20,-5,-63,-31,-56,-61,-55,-11,-26,-27,-62,-9,-8,-84,-89,20,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-18,-19,20,-20,-21,-25,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,-69,20,20,-70,-66,-53,20,20,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,20,-17,20,20,-50,20,-54,]),'LE':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,72,-87,-28,-32,-60,-59,-63,-62,-60,-31,72,-66,-53,-51,72,72,72,72,-33,72,72,72,-34,72,72,-35,72,72,-36,72,72,-30,72,-32,-52,-17,-50,72,]),'RPAREN':([6,12,17,20,21,32,33,35,48,49,50,51,80,84,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,120,121,122,123,124,129,130,132,133,134,135,137,138,140,143,144,145,],[-64,-31,-61,-26,-27,-87,-28,-32,-59,-63,-62,-60,118,-89,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-30,132,-16,-14,-15,-32,-52,-71,-17,-89,-89,-50,-13,-12,142,-57,-58,-29,]),'SEMI':([6,11,12,17,20,21,26,28,32,33,34,35,44,48,49,50,51,59,88,89,92,93,94,97,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,119,128,129,132,135,136,],[-64,52,-31,-61,-26,-27,60,-84,-87,-28,82,-32,87,-59,-63,-62,-60,-71,-66,-53,-51,-24,-72,131,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-30,-22,-23,-52,-17,-50,139,]),'UNSIGNED':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,43,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,43,43,-69,43,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,43,43,-50,-54,]),'LONG':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,15,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,15,15,-69,15,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,15,15,-50,-54,]),'LT':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,71,-87,-28,-32,-60,-59,-63,-62,-60,-31,71,-66,-53,-51,71,71,71,71,-33,71,71,71,-34,71,71,-35,71,71,-36,71,71,-30,71,-32,-52,-17,-50,71,]),'PLUS':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,73,-87,-28,-32,-60,-59,-63,-62,-60,-31,73,-66,-53,-51,73,73,73,73,-33,73,73,73,-34,73,73,-35,73,73,-36,73,73,-30,73,-32,-52,-17,-50,73,]),'COMMA':([6,17,20,21,32,33,48,49,50,51,88,89,92,123,124,129,130,132,135,],[-64,-61,-26,-27,-87,-28,-59,-63,-62,-60,-66,-53,-51,133,134,-52,-71,-17,-50,]),'TIMESEQUALS':([22,32,34,47,59,89,92,95,98,99,129,130,135,],[58,-87,58,58,-71,-53,-51,58,58,58,-52,-71,-50,]),'$end':([0,1,2,3,4,6,10,11,12,17,19,20,21,22,23,25,28,31,32,33,35,36,37,40,47,48,49,50,51,52,60,82,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,135,146,],[-89,-1,-10,0,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,-69,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,-50,-54,]),'GT':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,64,-87,-28,-32,-60,-59,-63,-62,-60,-31,64,-66,-53,-51,64,64,64,64,-33,64,64,64,-34,64,64,-35,64,64,-36,64,64,-30,64,-32,-52,-17,-50,64,]),'RBRACE':([2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,79,82,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,135,146,],[-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,117,-69,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,-50,-54,]),'FOR':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,79,82,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,135,146,],[-89,27,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,27,-69,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,-50,-54,]),'PLUSPLUS':([6,17,20,21,32,33,48,49,50,51,88,89,92,129,132,135,141,],[-64,-61,-26,-27,-87,-28,-59,-63,-62,-60,-66,-53,-51,-52,-17,-50,143,]),'EQUALS':([22,32,34,47,59,89,92,95,98,99,129,130,135,],[54,-87,54,54,-71,-53,-51,54,54,54,-52,-71,-50,]),'TIMES':([5,6,8,9,11,12,13,15,16,17,20,21,22,28,32,33,35,39,41,42,43,45,46,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-77,-64,-75,-73,-63,-31,-76,-79,53,-61,-26,-27,-62,70,-87,-28,-32,-78,-80,-82,-83,-81,-74,-60,-59,-63,-62,-60,-31,70,-66,-53,-51,70,70,70,70,-33,70,70,70,-34,70,70,70,70,70,70,70,70,-30,70,-32,-52,-17,-50,70,]),'PLUSEQUALS':([22,32,34,47,59,89,92,95,98,99,129,130,135,],[55,-87,55,55,-71,-53,-51,55,55,55,-52,-71,-50,]),'GE':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,69,-87,-28,-32,-60,-59,-63,-62,-60,-31,69,-66,-53,-51,69,69,69,69,-33,69,69,69,-34,69,69,-35,69,69,-36,69,69,-30,69,-32,-52,-17,-50,69,]),'LPAREN':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,27,28,29,30,31,32,33,34,35,36,37,40,47,48,49,50,51,52,54,55,56,57,58,59,60,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,82,83,84,87,88,89,90,91,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,131,132,133,134,135,146,],[-89,30,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,61,-84,-89,30,-4,-87,-28,84,-32,-6,-3,-7,84,-59,-63,-62,84,-68,-18,-19,30,-20,-21,-71,-25,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,-69,30,30,-70,-66,-53,30,30,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,30,-17,30,30,-50,-54,]),'MINUSMINUS':([6,17,20,21,32,33,48,49,50,51,88,89,92,129,132,135,141,],[-64,-61,-26,-27,-87,-28,-59,-63,-62,-60,-66,-53,-51,-52,-17,-50,144,]),'INCLUDE':([38,],[86,]),'EQ':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,75,-87,-28,-32,-60,-59,-63,-62,-60,-31,75,-66,-53,-51,75,75,75,75,-33,75,75,75,-34,75,75,-35,75,75,-36,75,75,-30,75,-32,-52,-17,-50,75,]),'ID':([0,1,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,28,29,30,31,32,33,35,36,37,39,40,41,42,43,45,46,47,48,49,50,51,52,53,54,55,56,57,58,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,82,83,84,87,88,89,90,91,92,96,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,131,132,133,134,135,139,146,],[-89,32,-10,-2,-77,-64,32,-75,-73,-5,-63,-31,-76,-56,-79,-85,-61,-55,-11,-26,-27,-62,-9,32,-8,-84,-89,32,-4,-87,-28,-32,-6,-3,-78,-7,-80,-82,-83,-81,-74,-60,-59,-63,-62,-60,-68,-86,-18,-19,32,-20,-21,-25,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,-69,32,32,-70,-66,-53,32,32,-51,32,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,32,-17,32,32,-50,32,-54,]),'AND':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,62,-87,-28,-32,-60,-59,-63,-62,-60,-31,62,-66,-53,-51,62,62,62,62,-33,62,62,62,-34,62,62,-35,62,62,-36,62,62,-30,62,-32,-52,-17,-50,62,]),'LBRACKET':([32,47,51,59,92,99,135,],[-87,90,90,90,90,90,-50,]),'LBRACE':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,79,82,85,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,135,142,146,],[-89,29,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,29,-69,29,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,-50,29,-54,]),'STRING_LITERAL':([0,1,2,4,6,7,10,11,12,14,17,18,19,20,21,22,23,25,28,29,30,31,32,33,35,36,37,40,47,48,49,50,51,52,54,55,56,57,58,60,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,82,83,84,86,87,88,89,90,91,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,131,132,133,134,135,139,146,],[-89,33,-10,-2,-64,33,-5,-63,-31,-56,-61,-55,-11,-26,-27,-62,-9,-8,-84,-89,33,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-18,-19,33,-20,-21,-25,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,-69,33,33,126,-70,-66,-53,33,33,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,33,-17,33,33,-50,33,-54,]),'PPHASH':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,79,82,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,135,146,],[-89,38,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,38,-69,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,-50,-54,]),'LOGAND':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,68,-87,-28,-32,-60,-59,-63,-62,-60,-31,68,-66,-53,-51,68,68,68,68,-33,68,68,68,-34,68,68,-35,68,68,-36,68,68,-30,68,-32,-52,-17,-50,68,]),'INT':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,39,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,39,39,-69,39,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,39,39,-50,-54,]),'DOUBLE':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,45,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,45,45,-69,45,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,45,45,-50,-54,]),'FLOAT':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,41,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,41,41,-69,41,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,41,41,-50,-54,]),'SIGNED':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,42,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,42,42,-69,42,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,42,42,-50,-54,]),'MINUSEQUALS':([22,32,34,47,59,89,92,95,98,99,129,130,135,],[57,-87,57,57,-71,-53,-51,57,57,57,-52,-71,-50,]),'SIZE_T':([0,1,2,4,6,10,11,12,17,19,20,21,22,23,25,28,29,31,32,33,35,36,37,40,47,48,49,50,51,52,60,61,79,82,84,87,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,125,126,129,132,133,134,135,146,],[-89,46,-10,-2,-64,-5,-63,-31,-61,-11,-26,-27,-62,-9,-8,-84,-89,-4,-87,-28,-32,-6,-3,-7,-60,-59,-63,-62,-60,-68,-25,46,46,-69,46,-70,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-65,-30,-67,-88,-52,-17,46,46,-50,-54,]),'RBRACKET':([6,12,17,20,21,28,32,33,35,48,49,50,51,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,127,129,132,135,],[-64,-31,-61,-26,-27,-84,-87,-28,-32,-59,-63,-62,-60,-66,-53,-51,-39,-38,-45,-41,-33,-42,-43,-47,-34,-44,-46,-35,-40,-48,-36,-49,-37,-30,135,-52,-17,-50,]),'OR':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,63,-87,-28,-32,-60,-59,-63,-62,-60,-31,63,-66,-53,-51,63,63,63,63,-33,63,63,63,-34,63,63,-35,63,63,-36,63,63,-30,63,-32,-52,-17,-50,63,]),'MOD':([6,11,12,17,20,21,22,28,32,33,35,47,48,49,50,51,80,81,88,89,92,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,118,122,124,129,132,135,136,],[-64,-63,-31,-61,-26,-27,-62,78,-87,-28,-32,-60,-59,-63,-62,-60,-31,78,-66,-53,-51,78,78,78,78,-33,78,78,78,-34,78,78,-35,78,78,-36,78,78,-30,78,-32,-52,-17,-50,78,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'comment':([1,79,],[4,4,]),'unary_token_before':([1,7,30,56,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,131,133,134,139,],[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,]),'constant':([1,7,30,56,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,131,133,134,139,],[17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,]),'unary_expression':([1,7,30,56,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,131,133,134,139,],[6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,]),'declaration':([1,79,],[31,31,]),'unary_token_after':([141,],[145,]),'function_call':([1,7,30,56,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,131,133,134,139,],[11,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,11,49,49,49,49,49,49,49,49,]),'binop_expression':([1,30,56,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,131,133,134,],[12,80,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,]),'increment':([139,],[140,]),'native_type':([1,61,79,84,133,134,],[16,16,16,16,16,16,]),'arglist':([34,47,51,],[85,88,88,]),'arg_params':([84,133,134,],[120,137,138,]),'array_reference':([1,7,30,56,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,131,133,134,139,],[22,50,50,50,95,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,22,50,50,50,50,50,50,50,50,]),'subscript':([47,51,59,92,99,],[92,92,92,92,92,]),'include':([1,79,],[23,23,]),'type':([1,61,79,84,133,134,],[24,96,24,96,96,96,]),'empty':([0,29,84,133,134,],[2,2,121,121,121,]),'assignment_operator':([22,34,47,95,98,99,],[56,83,91,56,83,91,]),'for_loop':([1,79,],[25,25,]),'assignment_expression':([1,61,79,],[26,97,26,]),'binop':([1,30,56,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,131,133,134,],[28,81,28,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,28,28,122,28,28,136,122,122,]),'compound':([1,79,85,142,],[10,10,125,146,]),'typeid':([1,61,79,84,133,134,],[34,98,34,123,123,123,]),'term':([1,7,30,56,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,131,133,134,139,],[35,48,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,124,35,35,35,124,124,141,]),'assignment_expression_semi':([1,79,],[36,36,]),'subscript_list':([47,51,59,92,99,],[89,89,94,129,89,]),'function_declaration':([1,79,],[37,37,]),'expr':([1,56,79,83,90,91,],[40,93,40,119,127,128,]),'top_level':([0,29,],[1,79,]),'array_typeid':([1,79,],[44,44,]),'identifier':([1,7,24,30,56,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,83,84,90,91,96,131,133,134,139,],[47,51,59,51,51,99,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,47,51,51,51,51,130,51,51,51,51,]),'first':([0,],[3,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> first","S'",1,None,None,None),
('first -> top_level','first',1,'p_first','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',10),
('top_level -> top_level comment','top_level',2,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',16),
('top_level -> top_level function_declaration','top_level',2,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',17),
('top_level -> top_level declaration','top_level',2,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',18),
('top_level -> top_level compound','top_level',2,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',19),
('top_level -> top_level assignment_expression_semi','top_level',2,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',20),
('top_level -> top_level expr','top_level',2,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',21),
('top_level -> top_level for_loop','top_level',2,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',22),
('top_level -> top_level include','top_level',2,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',23),
('top_level -> empty','top_level',1,'p_top_level','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',24),
('comment -> COMMENT','comment',1,'p_comment','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',33),
('arg_params -> term COMMA arg_params','arg_params',3,'p_arg_params','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',38),
('arg_params -> typeid COMMA arg_params','arg_params',3,'p_arg_params','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',39),
('arg_params -> binop','arg_params',1,'p_arg_params','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',40),
('arg_params -> typeid','arg_params',1,'p_arg_params','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',41),
('arg_params -> empty','arg_params',1,'p_arg_params','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',42),
('arglist -> LPAREN arg_params RPAREN','arglist',3,'p_arglist','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',51),
('assignment_operator -> EQUALS','assignment_operator',1,'p_assignment_operator','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',56),
('assignment_operator -> PLUSEQUALS','assignment_operator',1,'p_assignment_operator','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',57),
('assignment_operator -> MINUSEQUALS','assignment_operator',1,'p_assignment_operator','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',58),
('assignment_operator -> TIMESEQUALS','assignment_operator',1,'p_assignment_operator','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',59),
('assignment_expression -> typeid assignment_operator expr','assignment_expression',3,'p_assignment_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',65),
('assignment_expression -> identifier assignment_operator expr','assignment_expression',3,'p_assignment_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',66),
('assignment_expression -> array_reference assignment_operator expr','assignment_expression',3,'p_assignment_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',67),
('assignment_expression_semi -> assignment_expression SEMI','assignment_expression_semi',2,'p_assignment_expression_semi','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',73),
('constant -> INT_CONST','constant',1,'p_constant','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',78),
('constant -> FLOAT_CONST','constant',1,'p_constant','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',79),
('constant -> STRING_LITERAL','constant',1,'p_constant','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',80),
('increment -> term unary_token_after','increment',2,'p_increment','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',86),
('binop -> LPAREN binop_expression RPAREN','binop',3,'p_binop','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',91),
('binop -> binop_expression','binop',1,'p_binop','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',92),
('binop_expression -> term','binop_expression',1,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',101),
('binop_expression -> binop DIVIDE binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',102),
('binop_expression -> binop TIMES binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',103),
('binop_expression -> binop PLUS binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',104),
('binop_expression -> binop MINUS binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',105),
('binop_expression -> binop MOD binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',106),
('binop_expression -> binop OR binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',107),
('binop_expression -> binop AND binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',108),
('binop_expression -> binop LSHIFT binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',109),
('binop_expression -> binop RSHIFT binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',110),
('binop_expression -> binop LOGOR binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',111),
('binop_expression -> binop LOGAND binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',112),
('binop_expression -> binop LT binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',113),
('binop_expression -> binop GT binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',114),
('binop_expression -> binop LE binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',115),
('binop_expression -> binop GE binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',116),
('binop_expression -> binop EQ binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',117),
('binop_expression -> binop NE binop','binop_expression',3,'p_binop_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',118),
('subscript -> LBRACKET expr RBRACKET','subscript',3,'p_subscript','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',129),
('subscript_list -> subscript','subscript_list',1,'p_subscript_list','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',134),
('subscript_list -> subscript subscript_list','subscript_list',2,'p_subscript_list','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',135),
('array_reference -> identifier subscript_list','array_reference',2,'p_array_reference','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',143),
('for_loop -> FOR LPAREN assignment_expression SEMI binop SEMI increment RPAREN compound','for_loop',9,'p_for_loop','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',149),
('unary_token_before -> MINUS','unary_token_before',1,'p_unary_token_before','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',155),
('unary_token_before -> LOGNOT','unary_token_before',1,'p_unary_token_before','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',156),
('unary_token_after -> PLUSPLUS','unary_token_after',1,'p_unary_token_after','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',162),
('unary_token_after -> MINUSMINUS','unary_token_after',1,'p_unary_token_after','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',163),
('unary_expression -> unary_token_before term','unary_expression',2,'p_unary_expression','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',169),
('term -> identifier','term',1,'p_term','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',174),
('term -> constant','term',1,'p_term','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',175),
('term -> array_reference','term',1,'p_term','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',176),
('term -> function_call','term',1,'p_term','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',177),
('term -> unary_expression','term',1,'p_term','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',178),
('compound -> LBRACE top_level RBRACE','compound',3,'p_compound','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',184),
('function_call -> identifier arglist','function_call',2,'p_func_call','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',189),
('function_declaration -> typeid arglist compound','function_declaration',3,'p_func_decl_1','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',199),
('function_declaration -> function_call SEMI','function_declaration',2,'p_func_decl_3','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',204),
('declaration -> typeid SEMI','declaration',2,'p_decl_1','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',209),
('declaration -> array_typeid SEMI','declaration',2,'p_decl_2','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',214),
('typeid -> type identifier','typeid',2,'p_typeid','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',219),
('array_typeid -> type identifier subscript_list','array_typeid',3,'p_array_typeid','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',224),
('native_type -> VOID','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',229),
('native_type -> SIZE_T','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',230),
('native_type -> UNKNOWN','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',231),
('native_type -> CHAR','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',232),
('native_type -> SHORT','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',233),
('native_type -> INT','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',234),
('native_type -> LONG','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',235),
('native_type -> FLOAT','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',236),
('native_type -> DOUBLE','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',237),
('native_type -> SIGNED','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',238),
('native_type -> UNSIGNED','native_type',1,'p_native_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',239),
('expr -> binop','expr',1,'p_expr','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',245),
('type -> native_type','type',1,'p_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',251),
('type -> native_type TIMES','type',2,'p_type','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',252),
('identifier -> ID','identifier',1,'p_identifier','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',260),
('include -> PPHASH INCLUDE STRING_LITERAL','include',3,'p_include','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',265),
('empty -> <empty>','empty',0,'p_empty','/home/jacob/PycharmProjects/OpenTran/OpenTran/src/framework/lan/lan_parser.py',270),
]
| 317.865546
| 20,459
| 0.667689
| 8,406
| 37,826
| 2.942779
| 0.03783
| 0.032381
| 0.086348
| 0.115131
| 0.763593
| 0.754295
| 0.746614
| 0.740591
| 0.738327
| 0.726604
| 0
| 0.366198
| 0.017528
| 37,826
| 118
| 20,460
| 320.559322
| 0.299438
| 0.001639
| 0
| 0.018349
| 1
| 0
| 0.334746
| 0.195127
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
33983c176699a31b2b26f734bfaddd2c48c690ea
| 7,333
|
py
|
Python
|
isiscb/isisdata/migrations/0085_auto_20190902_1843.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 4
|
2016-01-25T20:35:33.000Z
|
2020-04-07T15:39:52.000Z
|
isiscb/isisdata/migrations/0085_auto_20190902_1843.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 41
|
2015-08-19T17:34:41.000Z
|
2022-03-11T23:19:01.000Z
|
isiscb/isisdata/migrations/0085_auto_20190902_1843.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 2
|
2020-11-25T20:18:18.000Z
|
2021-06-24T15:15:41.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-09-02 18:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('isisdata', '0084_cachedtimeline_recalculate'),
]
operations = [
migrations.CreateModel(
name='CitationSubtype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, help_text=b'Name of the new subtype.', max_length=1000)),
('unique_name', models.CharField(db_index=True, help_text=b'Unique name of a subtype, use to reference a subtype.', max_length=1000)),
('description', models.TextField(blank=True, help_text=b"A brief description that will be displayed to help identify the authority. Such as, brief bio or a scope note. For classification terms will be text like 'Classification term from the XXX classification schema.'", null=True)),
('related_citation_type', models.CharField(blank=True, choices=[(b'BO', b'Book'), (b'AR', b'Article'), (b'CH', b'Chapter'), (b'RE', b'Review'), (b'ES', b'Essay Review'), (b'TH', b'Thesis'), (b'EV', b'Event'), (b'WO', b'Web Object'), (b'MO', b'Multimedia Object'), (b'AO', b'Archive Object'), (b'DR', b'Digital Resource'), (b'PC', b'Personal Recognition')], help_text=b'Type of which this object is a subtype, e.g. Review or Chapter.', max_length=2, null=True, verbose_name=b'citation type')),
],
),
migrations.AlterField(
model_name='acrelation',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'AU', b'Author'), (b'ED', b'Editor'), (b'AD', b'Advisor'), (b'CO', b'Contributor'), (b'TR', b'Translator'), (b'SU', b'Subject'), (b'CA', b'Category'), (b'PU', b'Publisher'), (b'SC', b'School'), (b'IN', b'Institution'), (b'ME', b'Meeting'), (b'PE', b'Periodical'), (b'BS', b'Book Series'), (b'CM', b'Committee Member'), (b'OR', b'Organizer'), (b'IV', b'Interviewer'), (b'GU', b'Guest'), (b'CR', b'Creator'), (b'PR', b'Producer'), (b'DI', b'Director'), (b'WR', b'Writer'), (b'PF', b'Performer'), (b'CL', b'Collector'), (b'AR', b'Archivist'), (b'RE', b'Researcher'), (b'DE', b'Developer'), (b'CP', b'Compiler'), (b'AW', b'Awardee'), (b'OF', b'Officer'), (b'HO', b'Host'), (b'DS', b'Distributor'), (b'AC', b'Archival Repository'), (b'MI', b'Maintaining Institution'), (b'PG', b'Presenting Group')], help_text=b'Used to specify the nature of the relationship between authority (as the subject) and the citation (as the object).', max_length=2, null=True, verbose_name=b'relationship type'),
),
migrations.AlterField(
model_name='cachedtimelinetitle',
name='citation_type',
field=models.CharField(blank=True, choices=[(b'BO', b'Book'), (b'AR', b'Article'), (b'CH', b'Chapter'), (b'RE', b'Review'), (b'ES', b'Essay Review'), (b'TH', b'Thesis'), (b'EV', b'Event'), (b'WO', b'Web Object'), (b'MO', b'Multimedia Object'), (b'AO', b'Archive Object'), (b'DR', b'Digital Resource'), (b'PC', b'Personal Recognition')], max_length=2, null=True, verbose_name=b'type'),
),
migrations.AlterField(
model_name='ccrelation',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'IC', b'Includes Chapter'), (b'ISA', b'Includes Series Article'), (b'ICO', b'Includes Citation Object'), (b'RO', b'Is Review Of'), (b'RE', b'Responds To'), (b'AS', b'Is Associated With'), (b'RB', b'Is Reviewed By')], help_text=b'Type of relationship between two citation records.', max_length=3, null=True),
),
migrations.AlterField(
model_name='citation',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'BO', b'Book'), (b'AR', b'Article'), (b'CH', b'Chapter'), (b'RE', b'Review'), (b'ES', b'Essay Review'), (b'TH', b'Thesis'), (b'EV', b'Event'), (b'WO', b'Web Object'), (b'MO', b'Multimedia Object'), (b'AO', b'Archive Object'), (b'DR', b'Digital Resource'), (b'PC', b'Personal Recognition')], help_text=b'This list can be extended to the resource types specified by Doublin Core Recource Types http://dublincore.org/documents/resource-typelist/', max_length=2, null=True, verbose_name=b'type'),
),
migrations.AlterField(
model_name='historicalacrelation',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'AU', b'Author'), (b'ED', b'Editor'), (b'AD', b'Advisor'), (b'CO', b'Contributor'), (b'TR', b'Translator'), (b'SU', b'Subject'), (b'CA', b'Category'), (b'PU', b'Publisher'), (b'SC', b'School'), (b'IN', b'Institution'), (b'ME', b'Meeting'), (b'PE', b'Periodical'), (b'BS', b'Book Series'), (b'CM', b'Committee Member'), (b'OR', b'Organizer'), (b'IV', b'Interviewer'), (b'GU', b'Guest'), (b'CR', b'Creator'), (b'PR', b'Producer'), (b'DI', b'Director'), (b'WR', b'Writer'), (b'PF', b'Performer'), (b'CL', b'Collector'), (b'AR', b'Archivist'), (b'RE', b'Researcher'), (b'DE', b'Developer'), (b'CP', b'Compiler'), (b'AW', b'Awardee'), (b'OF', b'Officer'), (b'HO', b'Host'), (b'DS', b'Distributor'), (b'AC', b'Archival Repository'), (b'MI', b'Maintaining Institution'), (b'PG', b'Presenting Group')], help_text=b'Used to specify the nature of the relationship between authority (as the subject) and the citation (as the object).', max_length=2, null=True, verbose_name=b'relationship type'),
),
migrations.AlterField(
model_name='historicalccrelation',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'IC', b'Includes Chapter'), (b'ISA', b'Includes Series Article'), (b'ICO', b'Includes Citation Object'), (b'RO', b'Is Review Of'), (b'RE', b'Responds To'), (b'AS', b'Is Associated With'), (b'RB', b'Is Reviewed By')], help_text=b'Type of relationship between two citation records.', max_length=3, null=True),
),
migrations.AlterField(
model_name='historicalcitation',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'BO', b'Book'), (b'AR', b'Article'), (b'CH', b'Chapter'), (b'RE', b'Review'), (b'ES', b'Essay Review'), (b'TH', b'Thesis'), (b'EV', b'Event'), (b'WO', b'Web Object'), (b'MO', b'Multimedia Object'), (b'AO', b'Archive Object'), (b'DR', b'Digital Resource'), (b'PC', b'Personal Recognition')], help_text=b'This list can be extended to the resource types specified by Doublin Core Recource Types http://dublincore.org/documents/resource-typelist/', max_length=2, null=True, verbose_name=b'type'),
),
migrations.AddField(
model_name='citation',
name='subtype',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='isisdata.CitationSubtype'),
),
migrations.AddField(
model_name='historicalcitation',
name='subtype',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='isisdata.CitationSubtype'),
),
]
| 101.847222
| 1,043
| 0.626756
| 1,077
| 7,333
| 4.209842
| 0.222841
| 0.021614
| 0.01985
| 0.042347
| 0.774813
| 0.766211
| 0.766211
| 0.748125
| 0.724746
| 0.707543
| 0
| 0.006223
| 0.167326
| 7,333
| 71
| 1,044
| 103.28169
| 0.736325
| 0.00941
| 0
| 0.578125
| 1
| 0.078125
| 0.420879
| 0.013772
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.046875
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
33abc1b2e459be4d0bb096c17d1e75660f9b98e3
| 107
|
py
|
Python
|
backend/recipes/admin/__init__.py
|
hnthh/foodgram-project-react
|
3383c6a116fded11b4a764b95e6ca4ead03444f3
|
[
"MIT"
] | 1
|
2022-02-09T10:42:45.000Z
|
2022-02-09T10:42:45.000Z
|
backend/recipes/admin/__init__.py
|
hnthh/foodgram
|
3383c6a116fded11b4a764b95e6ca4ead03444f3
|
[
"MIT"
] | null | null | null |
backend/recipes/admin/__init__.py
|
hnthh/foodgram
|
3383c6a116fded11b4a764b95e6ca4ead03444f3
|
[
"MIT"
] | null | null | null |
import recipes.admin.favorite
import recipes.admin.recipe
import recipes.admin.shopping_cart # noqa: F401
| 26.75
| 48
| 0.831776
| 15
| 107
| 5.866667
| 0.6
| 0.443182
| 0.613636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030928
| 0.093458
| 107
| 3
| 49
| 35.666667
| 0.876289
| 0.093458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
33ff6e10353644d91c7aee35525298a075ff7079
| 4,351
|
py
|
Python
|
torchid/tmp/lstmfitter.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | 17
|
2019-11-15T06:27:05.000Z
|
2021-10-02T14:24:25.000Z
|
torchid/tmp/lstmfitter.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | null | null | null |
torchid/tmp/lstmfitter.py
|
marcosfelt/sysid-neural-structures-fitting
|
80eda427251e8cce1d2a565b5cbca533252315e4
|
[
"MIT"
] | 4
|
2020-09-03T17:01:34.000Z
|
2021-11-05T04:09:24.000Z
|
from __future__ import print_function
import torch
import torch.nn as nn
class LSTMSimulator(nn.Module):
def __init__(self, n_input = 1, n_hidden_1 = 64, n_hidden_2 = 32, n_output = 1):
self.n_input = n_input
self.n_hidden_1 = n_hidden_1
self.n_hidden_2 = n_hidden_2
self.n_output = n_output
super(LSTMSimulator, self).__init__()
self.lstm1 = nn.LSTMCell(self.n_input, self.n_hidden_1) # input size, hidden size
self.lstm2 = nn.LSTMCell(self.n_hidden_1, self.n_hidden_2)
self.linear = nn.Linear(self.n_hidden_2, self.n_output)
def forward(self, input):
batch_size = input.size(0)
outputs = []
h_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
c_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
h_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
c_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
seq_len = input.size(1)
for t in range(seq_len): #, input_t in enumerate(input.chunk(input.size(1), dim=1)):
input_t = input[:, t, :]
h_t, c_t = self.lstm1(input_t, (h_t, c_t)) # input_t, (hidden_state_t, cell_state_t) -> hidden_state_{t+1}, cell_state_{t+1}
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
outputs = torch.stack(outputs, 1)#.squeeze(2)
return outputs
class LSTMAutoRegressive(nn.Module):
def __init__(self, n_input = 1, n_hidden_1 = 64, n_hidden_2 = 32, n_output = 1):
self.n_input = n_input
self.n_hidden_1 = n_hidden_1
self.n_hidden_2 = n_hidden_2
self.n_output = n_output
super(LSTMAutoRegressive, self).__init__()
self.lstm1 = nn.LSTMCell(self.n_input + self.n_output, self.n_hidden_1) # input size, hidden size
self.lstm2 = nn.LSTMCell(self.n_hidden_1, self.n_hidden_2)
self.linear = nn.Linear(self.n_hidden_2, self.n_output)
def forward(self, input, delayed_output): # future=... to predict in the future!
batch_size = input.size(0)
outputs = []
h_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
c_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
h_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
c_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
seq_len = input.size(1)
for t in range(seq_len): #, input_t in enumerate(input.chunk(input.size(1), dim=1)):
input_t = input[:, t, :]
delayed_output_t = delayed_output[:,t,:]
feature_t = torch.stack((input_t, delayed_output_t), 1).squeeze(-1)
h_t, c_t = self.lstm1(feature_t, (h_t, c_t)) # input_t, (hidden_state_t, cell_state_t) -> hidden_state_{t+1}, cell_state_{t+1}
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
outputs = torch.stack(outputs, 1)#.squeeze(2)
return outputs
def forward_sim(self, input, delayed_output_t=None):
batch_size = input.size(0)
outputs = []
h_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
c_t = torch.zeros(batch_size, self.n_hidden_1)#, dtype=torch.double)
h_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
c_t2 = torch.zeros(batch_size, self.n_hidden_2)#, dtype=torch.double)
if delayed_output_t is None:
delayed_output_t = torch.zeros(batch_size, self.n_output)
seq_len = input.size(1)
for t in range(seq_len): #, input_t in enumerate(input.chunk(input.size(1), dim=1)):
input_t = input[:, t, :]
#delayed_output_t = delayed_output[:,t,:]
feature_t = torch.stack((input_t, delayed_output_t), 1).squeeze(-1)
h_t, c_t = self.lstm1(feature_t, (h_t, c_t)) # input_t, (hidden_state_t, cell_state_t) -> hidden_state_{t+1}, cell_state_{t+1}
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
delayed_output_t = output
outputs += [output]
outputs = torch.stack(outputs, 1)#.squeeze(2)
return outputs
| 45.8
| 138
| 0.632498
| 690
| 4,351
| 3.65942
| 0.089855
| 0.071287
| 0.104554
| 0.097822
| 0.891089
| 0.891089
| 0.88396
| 0.874059
| 0.874059
| 0.874059
| 0
| 0.03108
| 0.238336
| 4,351
| 94
| 139
| 46.287234
| 0.730839
| 0.189152
| 0
| 0.779221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064935
| false
| 0
| 0.038961
| 0
| 0.168831
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1d66ad1ccace7774ab24b91862bc0444b0787b33
| 103
|
py
|
Python
|
venv/lib/python3.8/site-packages/waitress/tests/fixtureapps/runner.py
|
ayfallen/urler
|
d7bb5c83018a75cb4af2bbb7178bcf364b61f68f
|
[
"MIT"
] | 978
|
2015-01-10T08:33:32.000Z
|
2022-03-29T07:37:48.000Z
|
venv/lib/python3.8/site-packages/waitress/tests/fixtureapps/runner.py
|
ayfallen/urler
|
d7bb5c83018a75cb4af2bbb7178bcf364b61f68f
|
[
"MIT"
] | 257
|
2015-01-13T09:06:02.000Z
|
2022-03-31T21:59:34.000Z
|
venv/lib/python3.8/site-packages/waitress/tests/fixtureapps/runner.py
|
ayfallen/urler
|
d7bb5c83018a75cb4af2bbb7178bcf364b61f68f
|
[
"MIT"
] | 155
|
2015-01-02T20:31:41.000Z
|
2022-03-06T08:54:17.000Z
|
def app(): # pragma: no cover
return None
def returns_app(): # pragma: no cover
return app
| 14.714286
| 38
| 0.631068
| 15
| 103
| 4.266667
| 0.533333
| 0.28125
| 0.34375
| 0.5
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.271845
| 103
| 6
| 39
| 17.166667
| 0.853333
| 0.320388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
1d6c744ecbff32e6cdf1b7f6e7f60292fdec869d
| 112
|
py
|
Python
|
testrep/stuff.py
|
ErnestoGP/testrep
|
2c421780e04836181ed6c9a74c821a0d1400b1fb
|
[
"MIT"
] | null | null | null |
testrep/stuff.py
|
ErnestoGP/testrep
|
2c421780e04836181ed6c9a74c821a0d1400b1fb
|
[
"MIT"
] | null | null | null |
testrep/stuff.py
|
ErnestoGP/testrep
|
2c421780e04836181ed6c9a74c821a0d1400b1fb
|
[
"MIT"
] | null | null | null |
def test_success():
assert True
def add_and_del(startnum, addnum, delnum):
return startnum + addnum - delnum
| 18.666667
| 42
| 0.758929
| 16
| 112
| 5.125
| 0.75
| 0.341463
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151786
| 112
| 5
| 43
| 22.4
| 0.863158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.5
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d57f46e12caa1261a43e2a1e95d470e6a7f8ebba
| 43,270
|
py
|
Python
|
sdk/python/pulumi_nomad/volume.py
|
pulumi/pulumi-nomad
|
00cc556d40bc1895f9ce10cb221fd21d1ef15350
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-07-25T19:12:40.000Z
|
2022-03-17T17:52:35.000Z
|
sdk/python/pulumi_nomad/volume.py
|
pulumi/pulumi-nomad
|
00cc556d40bc1895f9ce10cb221fd21d1ef15350
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2021-11-10T15:49:13.000Z
|
2022-03-31T15:36:18.000Z
|
sdk/python/pulumi_nomad/volume.py
|
pulumi/pulumi-nomad
|
00cc556d40bc1895f9ce10cb221fd21d1ef15350
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-07-25T19:12:49.000Z
|
2021-07-25T19:12:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VolumeArgs', 'Volume']
@pulumi.input_type
class VolumeArgs:
def __init__(__self__, *,
external_id: pulumi.Input[str],
plugin_id: pulumi.Input[str],
volume_id: pulumi.Input[str],
access_mode: Optional[pulumi.Input[str]] = None,
attachment_mode: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input['VolumeCapabilityArgs']]]] = None,
context: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
deregister_on_destroy: Optional[pulumi.Input[bool]] = None,
mount_options: Optional[pulumi.Input['VolumeMountOptionsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Volume resource.
:param pulumi.Input[str] external_id: The ID of the physical volume from the storage provider.
:param pulumi.Input[str] plugin_id: The ID of the CSI plugin that manages this volume.
:param pulumi.Input[str] volume_id: The unique ID of the volume, how jobs will refer to the volume.
:param pulumi.Input[str] access_mode: Defines whether a volume should be available concurrently.
:param pulumi.Input[str] attachment_mode: The storage API that will be used by the volume.
:param pulumi.Input[Sequence[pulumi.Input['VolumeCapabilityArgs']]] capabilities: Capabilities intended to be used in a job. At least one capability must be provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] context: An optional key-value map of strings passed directly to the CSI plugin to validate the volume.
:param pulumi.Input[bool] deregister_on_destroy: If true, the volume will be deregistered on destroy.
:param pulumi.Input['VolumeMountOptionsArgs'] mount_options: Options for mounting 'block-device' volumes without a pre-formatted file system.
:param pulumi.Input[str] name: The display name of the volume.
:param pulumi.Input[str] namespace: The namespace in which to create the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] secrets: An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
:param pulumi.Input[str] type: The type of the volume. Currently, only 'csi' is supported.
"""
pulumi.set(__self__, "external_id", external_id)
pulumi.set(__self__, "plugin_id", plugin_id)
pulumi.set(__self__, "volume_id", volume_id)
if access_mode is not None:
warnings.warn("""use capability instead""", DeprecationWarning)
pulumi.log.warn("""access_mode is deprecated: use capability instead""")
if access_mode is not None:
pulumi.set(__self__, "access_mode", access_mode)
if attachment_mode is not None:
warnings.warn("""use capability instead""", DeprecationWarning)
pulumi.log.warn("""attachment_mode is deprecated: use capability instead""")
if attachment_mode is not None:
pulumi.set(__self__, "attachment_mode", attachment_mode)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if context is not None:
pulumi.set(__self__, "context", context)
if deregister_on_destroy is not None:
pulumi.set(__self__, "deregister_on_destroy", deregister_on_destroy)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> pulumi.Input[str]:
"""
The ID of the physical volume from the storage provider.
"""
return pulumi.get(self, "external_id")
@external_id.setter
def external_id(self, value: pulumi.Input[str]):
pulumi.set(self, "external_id", value)
@property
@pulumi.getter(name="pluginId")
def plugin_id(self) -> pulumi.Input[str]:
"""
The ID of the CSI plugin that manages this volume.
"""
return pulumi.get(self, "plugin_id")
@plugin_id.setter
def plugin_id(self, value: pulumi.Input[str]):
pulumi.set(self, "plugin_id", value)
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Input[str]:
"""
The unique ID of the volume, how jobs will refer to the volume.
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_id", value)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> Optional[pulumi.Input[str]]:
"""
Defines whether a volume should be available concurrently.
"""
return pulumi.get(self, "access_mode")
@access_mode.setter
def access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_mode", value)
@property
@pulumi.getter(name="attachmentMode")
def attachment_mode(self) -> Optional[pulumi.Input[str]]:
"""
The storage API that will be used by the volume.
"""
return pulumi.get(self, "attachment_mode")
@attachment_mode.setter
def attachment_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attachment_mode", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VolumeCapabilityArgs']]]]:
"""
Capabilities intended to be used in a job. At least one capability must be provided.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VolumeCapabilityArgs']]]]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def context(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to validate the volume.
"""
return pulumi.get(self, "context")
@context.setter
def context(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "context", value)
@property
@pulumi.getter(name="deregisterOnDestroy")
def deregister_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
If true, the volume will be deregistered on destroy.
"""
return pulumi.get(self, "deregister_on_destroy")
@deregister_on_destroy.setter
def deregister_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deregister_on_destroy", value)
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[pulumi.Input['VolumeMountOptionsArgs']]:
"""
Options for mounting 'block-device' volumes without a pre-formatted file system.
"""
return pulumi.get(self, "mount_options")
@mount_options.setter
def mount_options(self, value: Optional[pulumi.Input['VolumeMountOptionsArgs']]):
pulumi.set(self, "mount_options", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace in which to create the volume.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
"""
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "secrets", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the volume. Currently, only 'csi' is supported.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _VolumeState:
def __init__(__self__, *,
access_mode: Optional[pulumi.Input[str]] = None,
attachment_mode: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input['VolumeCapabilityArgs']]]] = None,
context: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
controller_required: Optional[pulumi.Input[bool]] = None,
controllers_expected: Optional[pulumi.Input[int]] = None,
controllers_healthy: Optional[pulumi.Input[int]] = None,
deregister_on_destroy: Optional[pulumi.Input[bool]] = None,
external_id: Optional[pulumi.Input[str]] = None,
mount_options: Optional[pulumi.Input['VolumeMountOptionsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
nodes_expected: Optional[pulumi.Input[int]] = None,
nodes_healthy: Optional[pulumi.Input[int]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
plugin_id: Optional[pulumi.Input[str]] = None,
plugin_provider: Optional[pulumi.Input[str]] = None,
plugin_provider_version: Optional[pulumi.Input[str]] = None,
schedulable: Optional[pulumi.Input[bool]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Volume resources.
:param pulumi.Input[str] access_mode: Defines whether a volume should be available concurrently.
:param pulumi.Input[str] attachment_mode: The storage API that will be used by the volume.
:param pulumi.Input[Sequence[pulumi.Input['VolumeCapabilityArgs']]] capabilities: Capabilities intended to be used in a job. At least one capability must be provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] context: An optional key-value map of strings passed directly to the CSI plugin to validate the volume.
:param pulumi.Input[bool] deregister_on_destroy: If true, the volume will be deregistered on destroy.
:param pulumi.Input[str] external_id: The ID of the physical volume from the storage provider.
:param pulumi.Input['VolumeMountOptionsArgs'] mount_options: Options for mounting 'block-device' volumes without a pre-formatted file system.
:param pulumi.Input[str] name: The display name of the volume.
:param pulumi.Input[str] namespace: The namespace in which to create the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
:param pulumi.Input[str] plugin_id: The ID of the CSI plugin that manages this volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] secrets: An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
:param pulumi.Input[str] type: The type of the volume. Currently, only 'csi' is supported.
:param pulumi.Input[str] volume_id: The unique ID of the volume, how jobs will refer to the volume.
"""
if access_mode is not None:
warnings.warn("""use capability instead""", DeprecationWarning)
pulumi.log.warn("""access_mode is deprecated: use capability instead""")
if access_mode is not None:
pulumi.set(__self__, "access_mode", access_mode)
if attachment_mode is not None:
warnings.warn("""use capability instead""", DeprecationWarning)
pulumi.log.warn("""attachment_mode is deprecated: use capability instead""")
if attachment_mode is not None:
pulumi.set(__self__, "attachment_mode", attachment_mode)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if context is not None:
pulumi.set(__self__, "context", context)
if controller_required is not None:
pulumi.set(__self__, "controller_required", controller_required)
if controllers_expected is not None:
pulumi.set(__self__, "controllers_expected", controllers_expected)
if controllers_healthy is not None:
pulumi.set(__self__, "controllers_healthy", controllers_healthy)
if deregister_on_destroy is not None:
pulumi.set(__self__, "deregister_on_destroy", deregister_on_destroy)
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if nodes_expected is not None:
pulumi.set(__self__, "nodes_expected", nodes_expected)
if nodes_healthy is not None:
pulumi.set(__self__, "nodes_healthy", nodes_healthy)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if plugin_id is not None:
pulumi.set(__self__, "plugin_id", plugin_id)
if plugin_provider is not None:
pulumi.set(__self__, "plugin_provider", plugin_provider)
if plugin_provider_version is not None:
pulumi.set(__self__, "plugin_provider_version", plugin_provider_version)
if schedulable is not None:
pulumi.set(__self__, "schedulable", schedulable)
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
if type is not None:
pulumi.set(__self__, "type", type)
if volume_id is not None:
pulumi.set(__self__, "volume_id", volume_id)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> Optional[pulumi.Input[str]]:
"""
Defines whether a volume should be available concurrently.
"""
return pulumi.get(self, "access_mode")
@access_mode.setter
def access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_mode", value)
@property
@pulumi.getter(name="attachmentMode")
def attachment_mode(self) -> Optional[pulumi.Input[str]]:
"""
The storage API that will be used by the volume.
"""
return pulumi.get(self, "attachment_mode")
@attachment_mode.setter
def attachment_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attachment_mode", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VolumeCapabilityArgs']]]]:
"""
Capabilities intended to be used in a job. At least one capability must be provided.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VolumeCapabilityArgs']]]]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def context(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to validate the volume.
"""
return pulumi.get(self, "context")
@context.setter
def context(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "context", value)
@property
@pulumi.getter(name="controllerRequired")
def controller_required(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "controller_required")
@controller_required.setter
def controller_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "controller_required", value)
@property
@pulumi.getter(name="controllersExpected")
def controllers_expected(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "controllers_expected")
@controllers_expected.setter
def controllers_expected(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "controllers_expected", value)
@property
@pulumi.getter(name="controllersHealthy")
def controllers_healthy(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "controllers_healthy")
@controllers_healthy.setter
def controllers_healthy(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "controllers_healthy", value)
@property
@pulumi.getter(name="deregisterOnDestroy")
def deregister_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
If true, the volume will be deregistered on destroy.
"""
return pulumi.get(self, "deregister_on_destroy")
@deregister_on_destroy.setter
def deregister_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "deregister_on_destroy", value)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the physical volume from the storage provider.
"""
return pulumi.get(self, "external_id")
@external_id.setter
def external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_id", value)
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[pulumi.Input['VolumeMountOptionsArgs']]:
"""
Options for mounting 'block-device' volumes without a pre-formatted file system.
"""
return pulumi.get(self, "mount_options")
@mount_options.setter
def mount_options(self, value: Optional[pulumi.Input['VolumeMountOptionsArgs']]):
pulumi.set(self, "mount_options", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace in which to create the volume.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="nodesExpected")
def nodes_expected(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "nodes_expected")
@nodes_expected.setter
def nodes_expected(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "nodes_expected", value)
@property
@pulumi.getter(name="nodesHealthy")
def nodes_healthy(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "nodes_healthy")
@nodes_healthy.setter
def nodes_healthy(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "nodes_healthy", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="pluginId")
def plugin_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the CSI plugin that manages this volume.
"""
return pulumi.get(self, "plugin_id")
@plugin_id.setter
def plugin_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugin_id", value)
@property
@pulumi.getter(name="pluginProvider")
def plugin_provider(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "plugin_provider")
@plugin_provider.setter
def plugin_provider(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugin_provider", value)
@property
@pulumi.getter(name="pluginProviderVersion")
def plugin_provider_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "plugin_provider_version")
@plugin_provider_version.setter
def plugin_provider_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugin_provider_version", value)
@property
@pulumi.getter
def schedulable(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "schedulable")
@schedulable.setter
def schedulable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "schedulable", value)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
"""
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "secrets", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the volume. Currently, only 'csi' is supported.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID of the volume, how jobs will refer to the volume.
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_id", value)
class Volume(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_mode: Optional[pulumi.Input[str]] = None,
attachment_mode: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeCapabilityArgs']]]]] = None,
context: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
deregister_on_destroy: Optional[pulumi.Input[bool]] = None,
external_id: Optional[pulumi.Input[str]] = None,
mount_options: Optional[pulumi.Input[pulumi.InputType['VolumeMountOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
plugin_id: Optional[pulumi.Input[str]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a Volume resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_mode: Defines whether a volume should be available concurrently.
:param pulumi.Input[str] attachment_mode: The storage API that will be used by the volume.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeCapabilityArgs']]]] capabilities: Capabilities intended to be used in a job. At least one capability must be provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] context: An optional key-value map of strings passed directly to the CSI plugin to validate the volume.
:param pulumi.Input[bool] deregister_on_destroy: If true, the volume will be deregistered on destroy.
:param pulumi.Input[str] external_id: The ID of the physical volume from the storage provider.
:param pulumi.Input[pulumi.InputType['VolumeMountOptionsArgs']] mount_options: Options for mounting 'block-device' volumes without a pre-formatted file system.
:param pulumi.Input[str] name: The display name of the volume.
:param pulumi.Input[str] namespace: The namespace in which to create the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
:param pulumi.Input[str] plugin_id: The ID of the CSI plugin that manages this volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] secrets: An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
:param pulumi.Input[str] type: The type of the volume. Currently, only 'csi' is supported.
:param pulumi.Input[str] volume_id: The unique ID of the volume, how jobs will refer to the volume.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VolumeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Volume resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param VolumeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VolumeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_mode: Optional[pulumi.Input[str]] = None,
attachment_mode: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeCapabilityArgs']]]]] = None,
context: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
deregister_on_destroy: Optional[pulumi.Input[bool]] = None,
external_id: Optional[pulumi.Input[str]] = None,
mount_options: Optional[pulumi.Input[pulumi.InputType['VolumeMountOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
plugin_id: Optional[pulumi.Input[str]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VolumeArgs.__new__(VolumeArgs)
if access_mode is not None and not opts.urn:
warnings.warn("""use capability instead""", DeprecationWarning)
pulumi.log.warn("""access_mode is deprecated: use capability instead""")
__props__.__dict__["access_mode"] = access_mode
if attachment_mode is not None and not opts.urn:
warnings.warn("""use capability instead""", DeprecationWarning)
pulumi.log.warn("""attachment_mode is deprecated: use capability instead""")
__props__.__dict__["attachment_mode"] = attachment_mode
__props__.__dict__["capabilities"] = capabilities
__props__.__dict__["context"] = context
__props__.__dict__["deregister_on_destroy"] = deregister_on_destroy
if external_id is None and not opts.urn:
raise TypeError("Missing required property 'external_id'")
__props__.__dict__["external_id"] = external_id
__props__.__dict__["mount_options"] = mount_options
__props__.__dict__["name"] = name
__props__.__dict__["namespace"] = namespace
__props__.__dict__["parameters"] = parameters
if plugin_id is None and not opts.urn:
raise TypeError("Missing required property 'plugin_id'")
__props__.__dict__["plugin_id"] = plugin_id
__props__.__dict__["secrets"] = secrets
__props__.__dict__["type"] = type
if volume_id is None and not opts.urn:
raise TypeError("Missing required property 'volume_id'")
__props__.__dict__["volume_id"] = volume_id
__props__.__dict__["controller_required"] = None
__props__.__dict__["controllers_expected"] = None
__props__.__dict__["controllers_healthy"] = None
__props__.__dict__["nodes_expected"] = None
__props__.__dict__["nodes_healthy"] = None
__props__.__dict__["plugin_provider"] = None
__props__.__dict__["plugin_provider_version"] = None
__props__.__dict__["schedulable"] = None
super(Volume, __self__).__init__(
'nomad:index/volume:Volume',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_mode: Optional[pulumi.Input[str]] = None,
attachment_mode: Optional[pulumi.Input[str]] = None,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeCapabilityArgs']]]]] = None,
context: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
controller_required: Optional[pulumi.Input[bool]] = None,
controllers_expected: Optional[pulumi.Input[int]] = None,
controllers_healthy: Optional[pulumi.Input[int]] = None,
deregister_on_destroy: Optional[pulumi.Input[bool]] = None,
external_id: Optional[pulumi.Input[str]] = None,
mount_options: Optional[pulumi.Input[pulumi.InputType['VolumeMountOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
nodes_expected: Optional[pulumi.Input[int]] = None,
nodes_healthy: Optional[pulumi.Input[int]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
plugin_id: Optional[pulumi.Input[str]] = None,
plugin_provider: Optional[pulumi.Input[str]] = None,
plugin_provider_version: Optional[pulumi.Input[str]] = None,
schedulable: Optional[pulumi.Input[bool]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None) -> 'Volume':
"""
Get an existing Volume resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_mode: Defines whether a volume should be available concurrently.
:param pulumi.Input[str] attachment_mode: The storage API that will be used by the volume.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeCapabilityArgs']]]] capabilities: Capabilities intended to be used in a job. At least one capability must be provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] context: An optional key-value map of strings passed directly to the CSI plugin to validate the volume.
:param pulumi.Input[bool] deregister_on_destroy: If true, the volume will be deregistered on destroy.
:param pulumi.Input[str] external_id: The ID of the physical volume from the storage provider.
:param pulumi.Input[pulumi.InputType['VolumeMountOptionsArgs']] mount_options: Options for mounting 'block-device' volumes without a pre-formatted file system.
:param pulumi.Input[str] name: The display name of the volume.
:param pulumi.Input[str] namespace: The namespace in which to create the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
:param pulumi.Input[str] plugin_id: The ID of the CSI plugin that manages this volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] secrets: An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
:param pulumi.Input[str] type: The type of the volume. Currently, only 'csi' is supported.
:param pulumi.Input[str] volume_id: The unique ID of the volume, how jobs will refer to the volume.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VolumeState.__new__(_VolumeState)
__props__.__dict__["access_mode"] = access_mode
__props__.__dict__["attachment_mode"] = attachment_mode
__props__.__dict__["capabilities"] = capabilities
__props__.__dict__["context"] = context
__props__.__dict__["controller_required"] = controller_required
__props__.__dict__["controllers_expected"] = controllers_expected
__props__.__dict__["controllers_healthy"] = controllers_healthy
__props__.__dict__["deregister_on_destroy"] = deregister_on_destroy
__props__.__dict__["external_id"] = external_id
__props__.__dict__["mount_options"] = mount_options
__props__.__dict__["name"] = name
__props__.__dict__["namespace"] = namespace
__props__.__dict__["nodes_expected"] = nodes_expected
__props__.__dict__["nodes_healthy"] = nodes_healthy
__props__.__dict__["parameters"] = parameters
__props__.__dict__["plugin_id"] = plugin_id
__props__.__dict__["plugin_provider"] = plugin_provider
__props__.__dict__["plugin_provider_version"] = plugin_provider_version
__props__.__dict__["schedulable"] = schedulable
__props__.__dict__["secrets"] = secrets
__props__.__dict__["type"] = type
__props__.__dict__["volume_id"] = volume_id
return Volume(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> pulumi.Output[Optional[str]]:
"""
Defines whether a volume should be available concurrently.
"""
return pulumi.get(self, "access_mode")
@property
@pulumi.getter(name="attachmentMode")
def attachment_mode(self) -> pulumi.Output[Optional[str]]:
"""
The storage API that will be used by the volume.
"""
return pulumi.get(self, "attachment_mode")
@property
@pulumi.getter
def capabilities(self) -> pulumi.Output[Optional[Sequence['outputs.VolumeCapability']]]:
"""
Capabilities intended to be used in a job. At least one capability must be provided.
"""
return pulumi.get(self, "capabilities")
@property
@pulumi.getter
def context(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to validate the volume.
"""
return pulumi.get(self, "context")
@property
@pulumi.getter(name="controllerRequired")
def controller_required(self) -> pulumi.Output[bool]:
return pulumi.get(self, "controller_required")
@property
@pulumi.getter(name="controllersExpected")
def controllers_expected(self) -> pulumi.Output[int]:
return pulumi.get(self, "controllers_expected")
@property
@pulumi.getter(name="controllersHealthy")
def controllers_healthy(self) -> pulumi.Output[int]:
return pulumi.get(self, "controllers_healthy")
@property
@pulumi.getter(name="deregisterOnDestroy")
def deregister_on_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
If true, the volume will be deregistered on destroy.
"""
return pulumi.get(self, "deregister_on_destroy")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> pulumi.Output[str]:
"""
The ID of the physical volume from the storage provider.
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> pulumi.Output[Optional['outputs.VolumeMountOptions']]:
"""
Options for mounting 'block-device' volumes without a pre-formatted file system.
"""
return pulumi.get(self, "mount_options")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The display name of the volume.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> pulumi.Output[Optional[str]]:
"""
The namespace in which to create the volume.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="nodesExpected")
def nodes_expected(self) -> pulumi.Output[int]:
return pulumi.get(self, "nodes_expected")
@property
@pulumi.getter(name="nodesHealthy")
def nodes_healthy(self) -> pulumi.Output[int]:
return pulumi.get(self, "nodes_healthy")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="pluginId")
def plugin_id(self) -> pulumi.Output[str]:
"""
The ID of the CSI plugin that manages this volume.
"""
return pulumi.get(self, "plugin_id")
@property
@pulumi.getter(name="pluginProvider")
def plugin_provider(self) -> pulumi.Output[str]:
return pulumi.get(self, "plugin_provider")
@property
@pulumi.getter(name="pluginProviderVersion")
def plugin_provider_version(self) -> pulumi.Output[str]:
return pulumi.get(self, "plugin_provider_version")
@property
@pulumi.getter
def schedulable(self) -> pulumi.Output[bool]:
return pulumi.get(self, "schedulable")
@property
@pulumi.getter
def secrets(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
"""
return pulumi.get(self, "secrets")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the volume. Currently, only 'csi' is supported.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Output[str]:
"""
The unique ID of the volume, how jobs will refer to the volume.
"""
return pulumi.get(self, "volume_id")
| 46.031915
| 192
| 0.655373
| 5,094
| 43,270
| 5.365724
| 0.042992
| 0.10866
| 0.078367
| 0.057147
| 0.913475
| 0.89123
| 0.866791
| 0.845096
| 0.830242
| 0.785497
| 0
| 0.00003
| 0.234966
| 43,270
| 939
| 193
| 46.080937
| 0.825665
| 0.238502
| 0
| 0.749196
| 1
| 0
| 0.119253
| 0.020482
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162379
| false
| 0.001608
| 0.011254
| 0.025723
| 0.273312
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d599b011971e18cab79f9ad19239cd619908addb
| 4,059
|
bzl
|
Python
|
third_party/dependency_analyzer/src/test/analyzer_test_scala_2.bzl
|
wiwa/rules_scala
|
3dd5d8110d56cfc19722532866cbfc039a6a9612
|
[
"Apache-2.0"
] | null | null | null |
third_party/dependency_analyzer/src/test/analyzer_test_scala_2.bzl
|
wiwa/rules_scala
|
3dd5d8110d56cfc19722532866cbfc039a6a9612
|
[
"Apache-2.0"
] | null | null | null |
third_party/dependency_analyzer/src/test/analyzer_test_scala_2.bzl
|
wiwa/rules_scala
|
3dd5d8110d56cfc19722532866cbfc039a6a9612
|
[
"Apache-2.0"
] | null | null | null |
load("//scala:scala.bzl", "scala_test")
def analyzer_tests_scala_2():
common_jvm_flags = [
"-Dplugin.jar.location=$(execpath //third_party/dependency_analyzer/src/main:dependency_analyzer)",
"-Dscala.library.location=$(rootpath @io_bazel_rules_scala_scala_library)",
"-Dscala.reflect.location=$(rootpath @io_bazel_rules_scala_scala_reflect)",
]
scala_test(
name = "ast_used_jar_finder_test",
size = "small",
srcs = [
"io/bazel/rulesscala/dependencyanalyzer/AstUsedJarFinderTest.scala",
],
jvm_flags = common_jvm_flags,
deps = [
"//src/java/io/bazel/rulesscala/io_utils",
"//third_party/dependency_analyzer/src/main:dependency_analyzer",
"//third_party/dependency_analyzer/src/main:scala_version",
"//third_party/utils/src/test:test_util",
"@io_bazel_rules_scala_scala_compiler",
"@io_bazel_rules_scala_scala_library",
"@io_bazel_rules_scala_scala_reflect",
],
)
scala_test(
name = "scala_version_test",
size = "small",
srcs = [
"io/bazel/rulesscala/dependencyanalyzer/ScalaVersionTest.scala",
],
deps = [
"//third_party/dependency_analyzer/src/main:scala_version",
"@io_bazel_rules_scala_scala_library",
"@io_bazel_rules_scala_scala_reflect",
],
)
scala_test(
name = "scalac_dependency_test",
size = "small",
srcs = [
"io/bazel/rulesscala/dependencyanalyzer/ScalacDependencyTest.scala",
],
jvm_flags = common_jvm_flags,
unused_dependency_checker_mode = "off",
deps = [
"//src/java/io/bazel/rulesscala/io_utils",
"//third_party/dependency_analyzer/src/main:dependency_analyzer",
"//third_party/utils/src/test:test_util",
"@io_bazel_rules_scala_scala_compiler",
"@io_bazel_rules_scala_scala_library",
"@io_bazel_rules_scala_scala_reflect",
],
)
scala_test(
name = "strict_deps_test",
size = "small",
srcs = [
"io/bazel/rulesscala/dependencyanalyzer/StrictDepsTest.scala",
],
jvm_flags = common_jvm_flags + [
"-Dguava.jar.location=$(rootpath @com_google_guava_guava_21_0_with_file//jar)",
"-Dapache.commons.jar.location=$(location @org_apache_commons_commons_lang_3_5_without_file//:linkable_org_apache_commons_commons_lang_3_5_without_file)",
],
unused_dependency_checker_mode = "off",
deps = [
"//third_party/dependency_analyzer/src/main:dependency_analyzer",
"//third_party/utils/src/test:test_util",
"@com_google_guava_guava_21_0_with_file//jar",
"@io_bazel_rules_scala_scala_compiler",
"@io_bazel_rules_scala_scala_library",
"@io_bazel_rules_scala_scala_reflect",
"@org_apache_commons_commons_lang_3_5_without_file//:linkable_org_apache_commons_commons_lang_3_5_without_file",
],
)
scala_test(
name = "unused_dependency_checker_test",
size = "small",
srcs = [
"io/bazel/rulesscala/dependencyanalyzer/UnusedDependencyCheckerTest.scala",
],
jvm_flags = common_jvm_flags + [
"-Dapache.commons.jar.location=$(location @org_apache_commons_commons_lang_3_5_without_file//:linkable_org_apache_commons_commons_lang_3_5_without_file)",
],
unused_dependency_checker_mode = "off",
deps = [
"//third_party/dependency_analyzer/src/main:dependency_analyzer",
"//third_party/utils/src/test:test_util",
"@io_bazel_rules_scala_scala_compiler",
"@io_bazel_rules_scala_scala_library",
"@io_bazel_rules_scala_scala_reflect",
"@org_apache_commons_commons_lang_3_5_without_file//:linkable_org_apache_commons_commons_lang_3_5_without_file",
],
)
| 40.59
| 166
| 0.646465
| 446
| 4,059
| 5.349776
| 0.156951
| 0.067477
| 0.080469
| 0.113998
| 0.861274
| 0.861274
| 0.800503
| 0.776194
| 0.609807
| 0.564543
| 0
| 0.007507
| 0.245134
| 4,059
| 99
| 167
| 41
| 0.771214
| 0
| 0
| 0.741935
| 0
| 0
| 0.605075
| 0.580192
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010753
| false
| 0
| 0
| 0
| 0.010753
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5a2cab748c4b1228529e88bc73c25b3c36ec241
| 28,502
|
py
|
Python
|
tests/test_p/test_p_int.py
|
SimLeek/coordencode
|
092783b07fe9f025a7104c6cb8979a639387e52a
|
[
"MIT"
] | 1
|
2019-09-10T10:25:00.000Z
|
2019-09-10T10:25:00.000Z
|
tests/test_p/test_p_int.py
|
SimLeek/coordencode
|
092783b07fe9f025a7104c6cb8979a639387e52a
|
[
"MIT"
] | null | null | null |
tests/test_p/test_p_int.py
|
SimLeek/coordencode
|
092783b07fe9f025a7104c6cb8979a639387e52a
|
[
"MIT"
] | null | null | null |
import numpy as np
from pnums import PInt
from pnums.p_int import layer_or, layer_xor
def test_create_1d():
a = PInt(123456789, confidence=0.6)
np.testing.assert_array_almost_equal(
[
[
0.4,
0.4,
0.4,
0.4,
0.4,
0.6,
0.6,
0.6,
0.4,
0.6,
0.4,
0.6,
0.6,
0.4,
0.6,
0.6,
0.6,
0.6,
0.4,
0.4,
0.6,
0.6,
0.4,
0.6,
0.4,
0.4,
0.4,
0.6,
0.4,
0.6,
0.4,
0.6,
],
[
0.6,
0.6,
0.6,
0.6,
0.6,
0.4,
0.4,
0.4,
0.6,
0.4,
0.6,
0.4,
0.4,
0.6,
0.4,
0.4,
0.4,
0.4,
0.6,
0.6,
0.4,
0.4,
0.6,
0.4,
0.6,
0.6,
0.6,
0.4,
0.6,
0.4,
0.6,
0.4,
],
],
a.tensor,
)
def test_create_2d():
a = PInt(123, 234, bits=8, confidence=0.6)
np.testing.assert_array_almost_equal(
[
[
[
0.133333,
0.6000,
0.6000,
0.133333,
0.6000,
0.133333,
0.6000,
0.133333,
],
[
0.133333,
0.133333,
0.133333,
0.6000,
0.133333,
0.133333,
0.133333,
0.6000,
],
],
[
[
0.6000,
0.133333,
0.133333,
0.133333,
0.133333,
0.133333,
0.133333,
0.133333,
],
[
0.133333,
0.133333,
0.133333,
0.133333,
0.133333,
0.6000,
0.133333,
0.133333,
],
],
],
a.tensor,
)
def test_create_3d():
a = PInt(14, 11, 7, bits=4, confidence=0.6)
np.testing.assert_array_almost_equal(
[
[
[
[0.057143, 0.057143, 0.6000, 0.057143],
[0.6000, 0.057143, 0.057143, 0.057143],
],
[
[0.057143, 0.6000, 0.057143, 0.057143],
[0.057143, 0.057143, 0.057143, 0.057143],
],
],
[
[
[0.057143, 0.057143, 0.057143, 0.6000],
[0.057143, 0.057143, 0.057143, 0.057143],
],
[
[0.057143, 0.057143, 0.057143, 0.057143],
[0.057143, 0.057143, 0.057143, 0.057143],
],
],
],
a.tensor,
)
def test_add_1d():
a = PInt(10, bits=5, confidence=0.8)
b = PInt(6, bits=5, confidence=0.9)
c = a + b
np.testing.assert_array_almost_equal(
[
[0.8504768, 0.753892, 0.669292, 0.45831996, 0.442],
[0.84952325, 0.9461081, 1.0307081, 1.24168, 1.258],
],
c.tensor,
)
c = c.normalize()
np.testing.assert_array_almost_equal(
[
[0.50028044, 0.44346586, 0.39370114, 0.26959997, 0.26],
[0.49971953, 0.5565342, 0.60629886, 0.73039997, 0.74],
],
c.tensor,
)
q = c.quantize()
np.testing.assert_array_almost_equal(
[[1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0]], q.tensor
)
i = float(c)
assert i == 16
i = c.asfloat(method='average')
assert i == 13.926218450069427
d = PInt(0, bits=5)
d.tensor = a.tensor + b.tensor
i = d.asfloat(method='average')
assert i == 10.050000339746475
a = PInt(10, bits=5)
b = PInt(6, bits=5)
c = a + b
i = int(c)
assert i == 16
a = PInt(1, bits=5)
b = PInt(1, bits=5)
c = a + b
i = int(c)
assert i == 2
def test_add_2d():
a = PInt(10, 11, bits=8)
b = PInt(6, 13, bits=8)
c = a + b
np.testing.assert_array_almost_equal(
[
[
[0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0],
[2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0],
],
],
c.tensor,
)
a = PInt(10, 11, bits=8, confidence=0.8)
b = PInt(6, 13, bits=8, confidence=0.9)
c = a + b
np.testing.assert_array_almost_equal(
[
[
[
0.15804069,
0.16226967,
0.20636845,
0.6811368,
0.31998023,
0.21316226,
0.16209187,
0.1548889,
],
[
0.16720827,
0.17999499,
0.23011242,
0.2770214,
0.30978364,
0.30276603,
0.15728992,
0.1548889,
],
],
[
[
0.16867277,
0.18810391,
0.27501187,
0.5256308,
0.6511188,
0.44122377,
0.35417086,
0.1548889,
],
[
1.2060783,
1.1696315,
0.9885073,
0.21621099,
0.41911733,
0.742848,
1.0264474,
1.2353333,
],
],
],
c.tensor,
)
np.testing.assert_array_almost_equal(
[
[
[
0.04648256,
0.04772637,
0.0606966,
0.20033436,
0.09411184,
0.06269478,
0.04767408,
0.04555556,
],
[
0.0491789,
0.0529397,
0.06768012,
0.08147689,
0.09111284,
0.08904883,
0.04626174,
0.04555556,
],
],
[
[
0.04960964,
0.05532468,
0.08088584,
0.1545973,
0.19150554,
0.1297717,
0.10416789,
0.04555556,
],
[
0.3547289,
0.34400925,
0.29073742,
0.06359147,
0.12326981,
0.2184847,
0.3018963,
0.3633333,
],
],
],
c.normalize(0.5).tensor,
)
q = c.quantize()
np.testing.assert_array_almost_equal(
[
[
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0],
],
],
q.tensor,
)
assert c.asfloat() == (16, 24)
i = c.asfloat(method='average')
assert i == (59.34417091310024, 65.10842560231686) # artifacts of addition on higher bits.
d = PInt(0, 0, bits=5)
d.tensor = a.tensor + b.tensor
i = d.asfloat(method='average')
assert i == (27.733335733413696, 31.20000195503235)
a = PInt(1, 2, bits=6)
b = PInt(2, 1, bits=6)
c = a + b
np.testing.assert_array_almost_equal(
[
[[0.0, 0.0, 0.0, 0.0, 2.0, 2.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 0.0, 0.0]],
],
c.tensor,
)
assert c.asfloat() == (3, 3)
def test_xor_3d():
a = PInt(10, 11, 12, bits=8)
b = PInt(6, 13, 7, bits=8)
c = a ^ b
assert c.asfloat() == (12, 6, 11)
np.testing.assert_array_almost_equal(
[
[
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
],
[
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0],
[2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0],
],
],
],
c.tensor,
)
a = PInt(10, 11, 12, bits=8, confidence=0.8)
b = PInt(6, 13, 7, bits=8, confidence=0.7)
c = a ^ b
assert c.asfloat() == (12, 6, 11)
np.testing.assert_array_almost_equal(
[
[
[
[
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
],
[
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.8528572,
0.09244898,
0.09244898,
],
],
[
[
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.8528572,
0.09244898,
0.09244898,
0.09244898,
],
[
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
],
],
],
[
[
[
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.8528572,
0.09244898,
],
[
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
],
],
[
[
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
0.8528572,
],
[
0.8528572,
0.8528572,
0.8528572,
0.8528572,
0.09244898,
0.09244898,
0.09244898,
0.09244898,
],
],
],
],
c.tensor,
)
np.testing.assert_array_almost_equal(
[
[
[
[
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
],
[
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.5685714,
0.06163265,
0.06163265,
],
],
[
[
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.5685714,
0.06163265,
0.06163265,
0.06163265,
],
[
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
],
],
],
[
[
[
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.5685714,
0.06163265,
],
[
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
],
],
[
[
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
0.5685714,
],
[
0.5685714,
0.5685714,
0.5685714,
0.5685714,
0.06163265,
0.06163265,
0.06163265,
0.06163265,
],
],
],
],
c.normalize(1.0).tensor,
)
def test_not_3d():
a = PInt(0, 0, 0, bits=1)
c = ~a
assert c.asfloat() == (1, 1, 1)
a = PInt(0, 0, 1, bits=1)
c = ~a
assert c.asfloat() == (1, 1, 0)
a = PInt(0, 1, 0, bits=1)
c = ~a
assert c.asfloat() == (1, 0, 1)
a = PInt(1, 0, 0, bits=1)
c = ~a
assert c.asfloat() == (0, 1, 1)
a = PInt(1, 1, 0, bits=1)
c = ~a
assert c.asfloat() == (0, 0, 1)
a = PInt(1, 0, 1, bits=1)
c = ~a
assert c.asfloat() == (0, 1, 0)
a = PInt(0, 1, 1, bits=1)
c = ~a
assert c.asfloat() == (1, 0, 0)
a = PInt(1, 1, 1, bits=1)
c = ~a
assert c.asfloat() == (0, 0, 0)
# final
a = PInt(10, 11, 12, bits=4)
c = ~a
assert c.asfloat() == (5, 4, 3)
np.testing.assert_array_almost_equal(
[
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
],
[
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0]],
],
],
c.tensor,
)
a = PInt(10, 11, 12, bits=4, confidence=0.9)
c = ~a
assert c.asfloat() == (5, 4, 3)
a = PInt(10, 11, 12, bits=4, confidence=0.8)
c = ~a
assert c.asfloat() == (5, 4, 3)
a = PInt(10, 11, 12, bits=4, confidence=0.6)
c = ~a
assert c.asfloat() == (5, 4, 3)
def test_or_3d():
a = PInt(0, 0, 0, bits=2)
b = PInt(0, 0, 0, bits=2)
c = a | b
assert c.asfloat() == (0, 0, 0)
a = PInt(0, 0, 1, bits=2)
b = PInt(0, 0, 0, bits=2)
c = a | b
assert c.asfloat() == (0, 0, 1)
a = PInt(0, 0, 1, bits=2)
b = PInt(0, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (0, 0, 1)
a = PInt(0, 1, 0, bits=2)
b = PInt(0, 1, 0, bits=2)
c = a | b
assert c.asfloat() == (0, 1, 0)
a = PInt(1, 0, 0, bits=2)
b = PInt(1, 0, 0, bits=2)
c = a | b
assert c.asfloat() == (1, 0, 0)
a = PInt(1, 1, 0, bits=2)
b = PInt(1, 1, 0, bits=2)
c = a | b
assert c.asfloat() == (1, 1, 0)
a = PInt(1, 0, 1, bits=2)
b = PInt(1, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (1, 0, 1)
a = PInt(0, 1, 1, bits=2)
b = PInt(0, 1, 1, bits=2)
c = a | b
assert c.asfloat() == (0, 1, 1)
a = PInt(1, 1, 1, bits=2)
b = PInt(1, 1, 1, bits=2)
c = a | b
assert c.asfloat() == (1, 1, 1)
# 001
# 010
a = PInt(0, 0, 1, bits=2)
b = PInt(0, 1, 0, bits=2)
c = a | b
assert c.asfloat() == (0, 1, 1)
a = PInt(0, 1, 0, bits=2)
b = PInt(0, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (0, 1, 1)
# 001
# 100
a = PInt(0, 0, 1, bits=2)
b = PInt(0, 1, 0, bits=2)
c = a | b
assert c.asfloat() == (0, 1, 1)
a = PInt(0, 1, 0, bits=2)
b = PInt(0, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (0, 1, 1)
# 001
# 110
a = PInt(0, 0, 1, bits=2)
b = PInt(1, 1, 0, bits=2)
c = a | b
assert c.asfloat() == (1, 1, 1)
a = PInt(1, 1, 0, bits=2)
b = PInt(0, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (1, 1, 1)
# 001
# 101
a = PInt(0, 0, 1, bits=2)
b = PInt(1, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (1, 0, 1)
a = PInt(1, 0, 1, bits=2)
b = PInt(0, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (1, 0, 1)
# 001
# 011
a = PInt(0, 0, 1, bits=2)
b = PInt(0, 1, 1, bits=2)
c = a | b
assert c.asfloat() == (0, 1, 1)
a = PInt(0, 1, 1, bits=2)
b = PInt(0, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (0, 1, 1)
# 001
# 111
a = PInt(0, 0, 1, bits=2)
b = PInt(1, 1, 1, bits=2)
c = a | b
assert c.asfloat() == (1, 1, 1)
a = PInt(1, 1, 1, bits=2)
b = PInt(0, 0, 1, bits=2)
c = a | b
assert c.asfloat() == (1, 1, 1)
# final
a = PInt(10, 11, 12, bits=4)
b = PInt(6, 13, 7, bits=4)
c = a | b
assert c.asfloat() == (14, 15, 15)
np.testing.assert_array_almost_equal(
[
[
[[2.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
],
[
[[0.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
],
],
c.tensor,
)
a = PInt(10, 11, 12, bits=4, confidence=1)
b = PInt(6, 13, 7, bits=4, confidence=0.9)
c = a | b
assert c.asfloat() == (14, 15, 15)
a = PInt(10, 11, 12, bits=4, confidence=0.9)
b = PInt(6, 13, 7, bits=4, confidence=0.8)
c = a | b
assert c.asfloat() == (14, 15, 15)
a = PInt(10, 11, 12, bits=4, confidence=0.6)
b = PInt(6, 13, 7, bits=4, confidence=0.7)
c = a | b
assert c.asfloat() == (14, 15, 15)
np.testing.assert_array_almost_equal(
[
[
[
[0.88930607, 1.0007346, 0.8659591, 0.34171426],
[0.12628572, 0.02865306, 0.14963265, 0.08914286],
],
[
[0.02865306, 0.08914286, 0.22391835, 0.02865306],
[0.00955102, 0.00955102, 0.00955102, 0.00955102],
],
],
[
[
[0.12628572, 0.08914286, 0.02865306, 0.74816334],
[0.10718367, 0.00955102, 0.00955102, 0.07004082],
],
[
[0.00955102, 0.07004082, 0.00955102, 0.00955102],
[0.00318367, 0.00318367, 0.00318367, 0.00318367],
],
],
],
c.tensor,
)
np.testing.assert_array_almost_equal(
[
[
[
[0.6840816, 0.7697959, 0.6661225, 0.2628571],
[0.09714286, 0.02204082, 0.11510205, 0.06857143],
],
[
[0.02204082, 0.06857144, 0.1722449, 0.02204082],
[0.00734694, 0.00734694, 0.00734694, 0.00734694],
],
],
[
[
[0.09714286, 0.06857144, 0.02204082, 0.5755102],
[0.08244897, 0.00734694, 0.00734694, 0.05387755],
],
[
[0.00734694, 0.05387756, 0.00734694, 0.00734694],
[0.00244898, 0.00244898, 0.00244898, 0.00244898],
],
],
],
c.normalize(1.0).tensor,
)
def test_overall_confidence():
a = PInt(10, 11, 12, bits=8, confidence=0.7)
b = PInt(6, 13, 7, bits=8, confidence=0.8)
c = a + b
acon = a.overall_confidence()
bcon = b.overall_confidence()
ccon = c.overall_confidence()
print(acon)
print(bcon)
print(ccon)
def test_add_3d():
a = PInt(10, 11, 12, bits=8)
b = PInt(6, 13, 7, bits=8)
c = a + b
assert c.asfloat() == (16, 24, 19)
np.testing.assert_array_almost_equal(
[
[
[
[0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
],
[
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0],
[2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.0, 0.0],
],
],
],
c.tensor,
)
a = PInt(10, 11, bits=8, confidence=0.8)
b = PInt(6, 13, bits=8, confidence=0.9)
c = a + b
assert c.asfloat() == (16, 24)
np.testing.assert_array_almost_equal(
[
[
[
0.15804069,
0.16226967,
0.20636845,
0.6811368,
0.31998023,
0.21316226,
0.16209187,
0.1548889,
],
[
0.16720827,
0.17999499,
0.23011242,
0.2770214,
0.30978364,
0.30276603,
0.15728992,
0.1548889,
],
],
[
[
0.16867277,
0.18810391,
0.27501187,
0.5256308,
0.6511188,
0.44122377,
0.35417086,
0.1548889,
],
[
1.2060783,
1.1696315,
0.9885073,
0.21621099,
0.41911733,
0.742848,
1.0264474,
1.2353333,
],
],
],
c.tensor,
)
np.testing.assert_array_almost_equal(
[
[
[
0.04648256,
0.04772637,
0.0606966,
0.20033436,
0.09411184,
0.06269478,
0.04767408,
0.04555556,
],
[
0.0491789,
0.0529397,
0.06768012,
0.08147689,
0.09111284,
0.08904883,
0.04626174,
0.04555556,
],
],
[
[
0.04960964,
0.05532468,
0.08088584,
0.1545973,
0.19150554,
0.1297717,
0.10416789,
0.04555556,
],
[
0.3547289,
0.34400925,
0.29073742,
0.06359147,
0.12326981,
0.2184847,
0.3018963,
0.3633333,
],
],
],
c.normalize(0.5).tensor,
)
q = c.quantize()
np.testing.assert_array_almost_equal(
[
[
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0],
],
],
q.tensor,
)
a = PInt(1, 2, bits=6)
b = PInt(2, 1, bits=6)
c = a + b
np.testing.assert_array_almost_equal(
[
[[0.0, 0.0, 0.0, 0.0, 2.0, 2.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 0.0, 0.0]],
],
c.tensor,
)
def test_layer_xor():
a = np.asarray([0, 1])
b = np.asarray([1, 0])
c = np.asarray([0.7, 0.3])
d = np.asarray([0.5, 0.5])
ab = layer_xor(a, b)
bc = layer_xor(b, c)
cc = layer_xor(c, c)
bd = layer_xor(b, d)
a = np.asarray([[0, 1], [0, 0]])
b = np.asarray([[1, 0], [0, 0]])
c = np.asarray([[0.4, 0.1], [0.3, 0.2]])
d = np.asarray([[0.25, 0.25], [0.25, 0.25]])
ab = layer_xor(a, b)
ac = layer_xor(a, c)
cc = layer_xor(c, c)
bd = layer_xor(b, d)
def test_layer_or():
a = np.asarray([0, 1])
b = np.asarray([1, 0])
c = np.asarray([0.7, 0.3])
d = np.asarray([0.5, 0.5])
ab = layer_or(a, b)
bc = layer_or(b, c)
cc = layer_or(c, c)
bd = layer_or(b, d)
a = np.asarray([[0, 1], [0, 0]])
b = np.asarray([[1, 0], [0, 0]])
c = np.asarray([[0.4, 0.1], [0.3, 0.2]])
d = np.asarray([[0.25, 0.25], [0.25, 0.25]])
ab = layer_or(a, b)
ac = layer_or(a, c)
cc = layer_or(c, c)
bd = layer_or(b, d)
| 26.005474
| 95
| 0.307838
| 3,271
| 28,502
| 2.647203
| 0.065423
| 0.137891
| 0.182238
| 0.222659
| 0.880702
| 0.85645
| 0.842014
| 0.806213
| 0.792355
| 0.732186
| 0
| 0.397191
| 0.555329
| 28,502
| 1,095
| 96
| 26.029224
| 0.286019
| 0.003403
| 0
| 0.747219
| 0
| 0
| 0.000986
| 0
| 0
| 0
| 0
| 0
| 0.073812
| 1
| 0.012133
| false
| 0
| 0.003033
| 0
| 0.015167
| 0.003033
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
6369b61f38fdfb8a6ae5f4798239cb81decbc256
| 452,558
|
py
|
Python
|
pyboto3/wafregional.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/wafregional.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/wafregional.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def associate_web_acl(WebACLId=None, ResourceArn=None):
"""
Associates a web ACL with a resource, either an application load balancer or Amazon API Gateway stage.
See also: AWS API Documentation
Exceptions
:example: response = client.associate_web_acl(
WebACLId='string',
ResourceArn='string'
)
:type WebACLId: string
:param WebACLId: [REQUIRED]\nA unique identifier (ID) for the web ACL.\n
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe ARN (Amazon Resource Name) of the resource to be protected, either an application load balancer or Amazon API Gateway stage.\nThe ARN should be in one of the following formats:\n\nFor an Application Load Balancer: ``arn:aws:elasticloadbalancing:region :account-id :loadbalancer/app/load-balancer-name /load-balancer-id ``\nFor an Amazon API Gateway stage: ``arn:aws:apigateway:region ::/restapis/api-id /stages/stage-name ``\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFUnavailableEntityException
:return: {}
:returns:
(dict) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def create_byte_match_set(Name=None, ChangeToken=None):
"""
Creates a ByteMatchSet . You then use UpdateByteMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a ByteMatchSet that matches any requests with User-Agent headers that contain the string BadBot . You can then configure AWS WAF to reject those requests.
To create and configure a ByteMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_byte_match_set(
Name='string',
ChangeToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the ByteMatchSet . You can\'t change Name after you create a ByteMatchSet .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ByteMatchSet': {
'ByteMatchSetId': 'string',
'Name': 'string',
'ByteMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TargetString': b'bytes',
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
ByteMatchSet (dict) --
A ByteMatchSet that contains no ByteMatchTuple objects.
ByteMatchSetId (string) --
The ByteMatchSetId for a ByteMatchSet . You use ByteMatchSetId to get information about a ByteMatchSet (see GetByteMatchSet ), update a ByteMatchSet (see UpdateByteMatchSet ), insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet ).
ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets .
Name (string) --
A friendly name or description of the ByteMatchSet . You can\'t change Name after you create a ByteMatchSet .
ByteMatchTuples (list) --
Specifies the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to search, such as a specified header or a query string. For more information, see FieldToMatch .
Type (string) --
The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --
When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TargetString (bytes) --
The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch . The maximum length of the value is 50 bytes.
Valid values depend on the values that you specified for FieldToMatch :
HEADER : The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch , for example, the value of the User-Agent or Referer header.
METHOD : The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.
URI : The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but instead of inspecting a single parameter, AWS WAF inspects all parameters within the query string for the value or regex pattern that you specify in TargetString .
If TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.
If you\'re using the AWS WAF API
Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.
For example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of TargetString .
If you\'re using the AWS CLI or one of the AWS SDKs
The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.
TextTransformation (string) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.
You can only specify a single type of TextTransformation.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want to perform any text transformations.
PositionalConstraint (string) --
Within the portion of a web request that you want to search (for example, in the query string, if any), specify where you want AWS WAF to search. Valid values include the following:
CONTAINS
The specified part of the web request must include the value of TargetString , but the location doesn\'t matter.
CONTAINS_WORD
The specified part of the web request must include the value of TargetString , and TargetString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means one of the following:
TargetString exactly matches the value of the specified part of the web request, such as the value of a header.
TargetString is at the beginning of the specified part of the web request and is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; .
TargetString is at the end of the specified part of the web request and is preceded by a character other than an alphanumeric character or underscore (_), for example, ;BadBot .
TargetString is in the middle of the specified part of the web request and is preceded and followed by characters other than alphanumeric characters or underscore (_), for example, -BadBot; .
EXACTLY
The value of the specified part of the web request must exactly match the value of TargetString .
STARTS_WITH
The value of TargetString must appear at the beginning of the specified part of the web request.
ENDS_WITH
The value of TargetString must appear at the end of the specified part of the web request.
ChangeToken (string) --
The ChangeToken that you used to submit the CreateByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFLimitsExceededException
:return: {
'ByteMatchSet': {
'ByteMatchSetId': 'string',
'Name': 'string',
'ByteMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TargetString': b'bytes',
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
]
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description of the ByteMatchSet . You can\'t change Name after you create a ByteMatchSet .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def create_geo_match_set(Name=None, ChangeToken=None):
"""
Creates an GeoMatchSet , which you use to specify which web requests you want to allow or block based on the country that the requests originate from. For example, if you\'re receiving a lot of requests from one or more countries and you want to block the requests, you can create an GeoMatchSet that contains those countries and then configure AWS WAF to block the requests.
To create and configure a GeoMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_geo_match_set(
Name='string',
ChangeToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the GeoMatchSet . You can\'t change Name after you create the GeoMatchSet .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'GeoMatchSet': {
'GeoMatchSetId': 'string',
'Name': 'string',
'GeoMatchConstraints': [
{
'Type': 'Country',
'Value': 'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW'
},
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
GeoMatchSet (dict) --
The GeoMatchSet returned in the CreateGeoMatchSet response. The GeoMatchSet contains no GeoMatchConstraints .
GeoMatchSetId (string) --
The GeoMatchSetId for an GeoMatchSet . You use GeoMatchSetId to get information about a GeoMatchSet (see GeoMatchSet ), update a GeoMatchSet (see UpdateGeoMatchSet ), insert a GeoMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a GeoMatchSet from AWS WAF (see DeleteGeoMatchSet ).
GeoMatchSetId is returned by CreateGeoMatchSet and by ListGeoMatchSets .
Name (string) --
A friendly name or description of the GeoMatchSet . You can\'t change the name of an GeoMatchSet after you create it.
GeoMatchConstraints (list) --
An array of GeoMatchConstraint objects, which contain the country that you want AWS WAF to search for.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The country from which web requests originate that you want AWS WAF to search for.
Type (string) --
The type of geographical area you want AWS WAF to search for. Currently Country is the only valid value.
Value (string) --
The country that you want AWS WAF to search for.
ChangeToken (string) --
The ChangeToken that you used to submit the CreateGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFLimitsExceededException
:return: {
'GeoMatchSet': {
'GeoMatchSetId': 'string',
'Name': 'string',
'GeoMatchConstraints': [
{
'Type': 'Country',
'Value': 'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW'
},
]
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description of the GeoMatchSet . You can\'t change Name after you create the GeoMatchSet .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def create_ip_set(Name=None, ChangeToken=None):
"""
Creates an IPSet , which you use to specify which web requests that you want to allow or block based on the IP addresses that the requests originate from. For example, if you\'re receiving a lot of requests from one or more individual IP addresses or one or more ranges of IP addresses and you want to block the requests, you can create an IPSet that contains those IP addresses and then configure AWS WAF to block the requests.
To create and configure an IPSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example creates an IP match set named MyIPSetFriendlyName.
Expected Output:
:example: response = client.create_ip_set(
Name='string',
ChangeToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the IPSet . You can\'t change Name after you create the IPSet .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'IPSet': {
'IPSetId': 'string',
'Name': 'string',
'IPSetDescriptors': [
{
'Type': 'IPV4'|'IPV6',
'Value': 'string'
},
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
IPSet (dict) --
The IPSet returned in the CreateIPSet response.
IPSetId (string) --
The IPSetId for an IPSet . You use IPSetId to get information about an IPSet (see GetIPSet ), update an IPSet (see UpdateIPSet ), insert an IPSet into a Rule or delete one from a Rule (see UpdateRule ), and delete an IPSet from AWS WAF (see DeleteIPSet ).
IPSetId is returned by CreateIPSet and by ListIPSets .
Name (string) --
A friendly name or description of the IPSet . You can\'t change the name of an IPSet after you create it.
IPSetDescriptors (list) --
The IP address type (IPV4 or IPV6 ) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the IP address type (IPV4 or IPV6 ) and the IP address range (in CIDR format) that web requests originate from.
Type (string) --
Specify IPV4 or IPV6 .
Value (string) --
Specify an IPv4 address by using CIDR notation. For example:
To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24 .
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing .
Specify an IPv6 address by using CIDR notation. For example:
To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64 .
ChangeToken (string) --
The ChangeToken that you used to submit the CreateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example creates an IP match set named MyIPSetFriendlyName.
response = client.create_ip_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
Name='MyIPSetFriendlyName',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'IPSet': {
'IPSetDescriptors': [
{
'Type': 'IPV4',
'Value': '192.0.2.44/32',
},
],
'IPSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
'Name': 'MyIPSetFriendlyName',
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'IPSet': {
'IPSetId': 'string',
'Name': 'string',
'IPSetDescriptors': [
{
'Type': 'IPV4'|'IPV6',
'Value': 'string'
},
]
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description of the IPSet . You can\'t change Name after you create the IPSet .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def create_rate_based_rule(Name=None, MetricName=None, RateKey=None, RateLimit=None, ChangeToken=None, Tags=None):
"""
Creates a RateBasedRule . The RateBasedRule contains a RateLimit , which specifies the maximum number of requests that AWS WAF allows from a specified IP address in a five-minute period. The RateBasedRule also contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to count or block if these requests exceed the RateLimit .
If you add more than one predicate to a RateBasedRule , a request not only must exceed the RateLimit , but it also must match all the conditions to be counted or blocked. For example, suppose you add the following to a RateBasedRule :
Further, you specify a RateLimit of 1,000.
You then add the RateBasedRule to a WebACL and specify that you want to block requests that meet the conditions in the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot . Further, requests that match these two conditions must be received at a rate of more than 1,000 requests every five minutes. If both conditions are met and the rate is exceeded, AWS WAF blocks the requests. If the rate drops below 1,000 for a five-minute period, AWS WAF no longer blocks the requests.
As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule :
Further, you specify a RateLimit of 1,000.
By adding this RateBasedRule to a WebACL , you could limit requests to your login page without affecting the rest of your site.
To create and configure a RateBasedRule , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_rate_based_rule(
Name='string',
MetricName='string',
RateKey='IP',
RateLimit=123,
ChangeToken='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the RateBasedRule . You can\'t change the name of a RateBasedRule after you create it.\n
:type MetricName: string
:param MetricName: [REQUIRED]\nA friendly name or description for the metrics for this RateBasedRule . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including 'All' and 'Default_Action.' You can\'t change the name of the metric after you create the RateBasedRule .\n
:type RateKey: string
:param RateKey: [REQUIRED]\nThe field that AWS WAF uses to determine if requests are likely arriving from a single source and thus subject to rate monitoring. The only valid value for RateKey is IP . IP indicates that requests that arrive from the same IP address are subject to the RateLimit that is specified in the RateBasedRule .\n
:type RateLimit: integer
:param RateLimit: [REQUIRED]\nThe maximum number of requests, which have an identical value in the field that is specified by RateKey , allowed in a five-minute period. If the number of requests exceeds the RateLimit and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe ChangeToken that you used to submit the CreateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .\n
:type Tags: list
:param Tags: \n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nA tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to 'customer' and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.\nTagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.\n\nKey (string) -- [REQUIRED]\nValue (string) -- [REQUIRED]\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Rule': {
'RuleId': 'string',
'Name': 'string',
'MetricName': 'string',
'MatchPredicates': [
{
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
},
],
'RateKey': 'IP',
'RateLimit': 123
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
Rule (dict) --
The RateBasedRule that is returned in the CreateRateBasedRule response.
RuleId (string) --
A unique identifier for a RateBasedRule . You use RuleId to get more information about a RateBasedRule (see GetRateBasedRule ), update a RateBasedRule (see UpdateRateBasedRule ), insert a RateBasedRule into a WebACL or delete one from a WebACL (see UpdateWebACL ), or delete a RateBasedRule from AWS WAF (see DeleteRateBasedRule ).
Name (string) --
A friendly name or description for a RateBasedRule . You can\'t change the name of a RateBasedRule after you create it.
MetricName (string) --
A friendly name or description for the metrics for a RateBasedRule . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change the name of the metric after you create the RateBasedRule .
MatchPredicates (list) --
The Predicates object contains one Predicate element for each ByteMatchSet , IPSet , or SqlInjectionMatchSet object that you want to include in a RateBasedRule .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.
Negated (boolean) --
Set Negated to False if you want AWS WAF to allow, block, or count requests based on the settings in the specified ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow or block requests based on that IP address.
Set Negated to True if you want AWS WAF to allow or block a request based on the negation of the settings in the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow, block, or count requests based on all IP addresses except 192.0.2.44 .
Type (string) --
The type of predicate in a Rule , such as ByteMatch or IPSet .
DataId (string) --
A unique identifier for a predicate in a Rule , such as ByteMatchSetId or IPSetId . The ID is returned by the corresponding Create or List command.
RateKey (string) --
The field that AWS WAF uses to determine if requests are likely arriving from single source and thus subject to rate monitoring. The only valid value for RateKey is IP . IP indicates that requests arriving from the same IP address are subject to the RateLimit that is specified in the RateBasedRule .
RateLimit (integer) --
The maximum number of requests, which have an identical value in the field specified by the RateKey , allowed in a five-minute period. If the number of requests exceeds the RateLimit and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.
ChangeToken (string) --
The ChangeToken that you used to submit the CreateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
WAFRegional.Client.exceptions.WAFBadRequestException
:return: {
'Rule': {
'RuleId': 'string',
'Name': 'string',
'MetricName': 'string',
'MatchPredicates': [
{
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
},
],
'RateKey': 'IP',
'RateLimit': 123
},
'ChangeToken': 'string'
}
:returns:
A ByteMatchSet with FieldToMatch of URI
A PositionalConstraint of STARTS_WITH
A TargetString of login
"""
pass
def create_regex_match_set(Name=None, ChangeToken=None):
"""
Creates a RegexMatchSet . You then use UpdateRegexMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a RegexMatchSet that contains a RegexMatchTuple that looks for any requests with User-Agent headers that match a RegexPatternSet with pattern B[a@]dB[o0]t . You can then configure AWS WAF to reject those requests.
To create and configure a RegexMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_regex_match_set(
Name='string',
ChangeToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the RegexMatchSet . You can\'t change Name after you create a RegexMatchSet .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'RegexMatchSet': {
'RegexMatchSetId': 'string',
'Name': 'string',
'RegexMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'RegexPatternSetId': 'string'
},
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
RegexMatchSet (dict) --
A RegexMatchSet that contains no RegexMatchTuple objects.
RegexMatchSetId (string) --
The RegexMatchSetId for a RegexMatchSet . You use RegexMatchSetId to get information about a RegexMatchSet (see GetRegexMatchSet ), update a RegexMatchSet (see UpdateRegexMatchSet ), insert a RegexMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a RegexMatchSet from AWS WAF (see DeleteRegexMatchSet ).
RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets .
Name (string) --
A friendly name or description of the RegexMatchSet . You can\'t change Name after you create a RegexMatchSet .
RegexMatchTuples (list) --
Contains an array of RegexMatchTuple objects. Each RegexMatchTuple object contains:
The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.
The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet .
Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. Each RegexMatchTuple object contains:
The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.
The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet .
Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.
FieldToMatch (dict) --
Specifies where in a web request to look for the RegexPatternSet .
Type (string) --
The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --
When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TextTransformation (string) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on RegexPatternSet before inspecting a request for a match.
You can only specify a single type of TextTransformation.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want to perform any text transformations.
RegexPatternSetId (string) --
The RegexPatternSetId for a RegexPatternSet . You use RegexPatternSetId to get information about a RegexPatternSet (see GetRegexPatternSet ), update a RegexPatternSet (see UpdateRegexPatternSet ), insert a RegexPatternSet into a RegexMatchSet or delete one from a RegexMatchSet (see UpdateRegexMatchSet ), and delete an RegexPatternSet from AWS WAF (see DeleteRegexPatternSet ).
RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .
ChangeToken (string) --
The ChangeToken that you used to submit the CreateRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFLimitsExceededException
:return: {
'RegexMatchSet': {
'RegexMatchSetId': 'string',
'Name': 'string',
'RegexMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'RegexPatternSetId': 'string'
},
]
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description of the RegexMatchSet . You can\'t change Name after you create a RegexMatchSet .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def create_regex_pattern_set(Name=None, ChangeToken=None):
"""
Creates a RegexPatternSet . You then use UpdateRegexPatternSet to specify the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t . You can then configure AWS WAF to reject those requests.
To create and configure a RegexPatternSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_regex_pattern_set(
Name='string',
ChangeToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the RegexPatternSet . You can\'t change Name after you create a RegexPatternSet .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'RegexPatternSet': {
'RegexPatternSetId': 'string',
'Name': 'string',
'RegexPatternStrings': [
'string',
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
RegexPatternSet (dict) --
A RegexPatternSet that contains no objects.
RegexPatternSetId (string) --
The identifier for the RegexPatternSet . You use RegexPatternSetId to get information about a RegexPatternSet , update a RegexPatternSet , remove a RegexPatternSet from a RegexMatchSet , and delete a RegexPatternSet from AWS WAF.
RegexMatchSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .
Name (string) --
A friendly name or description of the RegexPatternSet . You can\'t change Name after you create a RegexPatternSet .
RegexPatternStrings (list) --
Specifies the regular expression (regex) patterns that you want AWS WAF to search for, such as B[a@]dB[o0]t .
(string) --
ChangeToken (string) --
The ChangeToken that you used to submit the CreateRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFLimitsExceededException
:return: {
'RegexPatternSet': {
'RegexPatternSetId': 'string',
'Name': 'string',
'RegexPatternStrings': [
'string',
]
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description of the RegexPatternSet . You can\'t change Name after you create a RegexPatternSet .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def create_rule(Name=None, MetricName=None, ChangeToken=None, Tags=None):
"""
Creates a Rule , which contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to block. If you add more than one predicate to a Rule , a request must match all of the specifications to be allowed or blocked. For example, suppose that you add the following to a Rule :
You then add the Rule to a WebACL and specify that you want to blocks requests that satisfy the Rule . For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot .
To create and configure a Rule , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example creates a rule named WAFByteHeaderRule.
Expected Output:
:example: response = client.create_rule(
Name='string',
MetricName='string',
ChangeToken='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the Rule . You can\'t change the name of a Rule after you create it.\n
:type MetricName: string
:param MetricName: [REQUIRED]\nA friendly name or description for the metrics for this Rule . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including 'All' and 'Default_Action.' You can\'t change the name of the metric after you create the Rule .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Tags: list
:param Tags: \n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nA tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to 'customer' and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.\nTagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.\n\nKey (string) -- [REQUIRED]\nValue (string) -- [REQUIRED]\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Rule': {
'RuleId': 'string',
'Name': 'string',
'MetricName': 'string',
'Predicates': [
{
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
},
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
Rule (dict) --
The Rule returned in the CreateRule response.
RuleId (string) --
A unique identifier for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).
RuleId is returned by CreateRule and by ListRules .
Name (string) --
The friendly name or description for the Rule . You can\'t change the name of a Rule after you create it.
MetricName (string) --
A friendly name or description for the metrics for this Rule . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change MetricName after you create the Rule .
Predicates (list) --
The Predicates object contains one Predicate element for each ByteMatchSet , IPSet , or SqlInjectionMatchSet object that you want to include in a Rule .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.
Negated (boolean) --
Set Negated to False if you want AWS WAF to allow, block, or count requests based on the settings in the specified ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow or block requests based on that IP address.
Set Negated to True if you want AWS WAF to allow or block a request based on the negation of the settings in the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow, block, or count requests based on all IP addresses except 192.0.2.44 .
Type (string) --
The type of predicate in a Rule , such as ByteMatch or IPSet .
DataId (string) --
A unique identifier for a predicate in a Rule , such as ByteMatchSetId or IPSetId . The ID is returned by the corresponding Create or List command.
ChangeToken (string) --
The ChangeToken that you used to submit the CreateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
WAFRegional.Client.exceptions.WAFBadRequestException
Examples
The following example creates a rule named WAFByteHeaderRule.
response = client.create_rule(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
MetricName='WAFByteHeaderRule',
Name='WAFByteHeaderRule',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'Rule': {
'MetricName': 'WAFByteHeaderRule',
'Name': 'WAFByteHeaderRule',
'Predicates': [
{
'DataId': 'MyByteMatchSetID',
'Negated': False,
'Type': 'ByteMatch',
},
],
'RuleId': 'WAFRule-1-Example',
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Rule': {
'RuleId': 'string',
'Name': 'string',
'MetricName': 'string',
'Predicates': [
{
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
},
]
},
'ChangeToken': 'string'
}
:returns:
Create and update the predicates that you want to include in the Rule . For more information, see CreateByteMatchSet , CreateIPSet , and CreateSqlInjectionMatchSet .
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.
Submit a CreateRule request.
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.
Submit an UpdateRule request to specify the predicates that you want to include in the Rule .
Create and update a WebACL that contains the Rule . For more information, see CreateWebACL .
"""
pass
def create_rule_group(Name=None, MetricName=None, ChangeToken=None, Tags=None):
"""
Creates a RuleGroup . A rule group is a collection of predefined rules that you add to a web ACL. You use UpdateRuleGroup to add rules to the rule group.
Rule groups are subject to the following limits:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_rule_group(
Name='string',
MetricName='string',
ChangeToken='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the RuleGroup . You can\'t change Name after you create a RuleGroup .\n
:type MetricName: string
:param MetricName: [REQUIRED]\nA friendly name or description for the metrics for this RuleGroup . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including 'All' and 'Default_Action.' You can\'t change the name of the metric after you create the RuleGroup .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Tags: list
:param Tags: \n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nA tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to 'customer' and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.\nTagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.\n\nKey (string) -- [REQUIRED]\nValue (string) -- [REQUIRED]\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'RuleGroup': {
'RuleGroupId': 'string',
'Name': 'string',
'MetricName': 'string'
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
RuleGroup (dict) --
An empty RuleGroup .
RuleGroupId (string) --
A unique identifier for a RuleGroup . You use RuleGroupId to get more information about a RuleGroup (see GetRuleGroup ), update a RuleGroup (see UpdateRuleGroup ), insert a RuleGroup into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a RuleGroup from AWS WAF (see DeleteRuleGroup ).
RuleGroupId is returned by CreateRuleGroup and by ListRuleGroups .
Name (string) --
The friendly name or description for the RuleGroup . You can\'t change the name of a RuleGroup after you create it.
MetricName (string) --
A friendly name or description for the metrics for this RuleGroup . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change the name of the metric after you create the RuleGroup .
ChangeToken (string) --
The ChangeToken that you used to submit the CreateRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
WAFRegional.Client.exceptions.WAFBadRequestException
:return: {
'RuleGroup': {
'RuleGroupId': 'string',
'Name': 'string',
'MetricName': 'string'
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description of the RuleGroup . You can\'t change Name after you create a RuleGroup .
MetricName (string) -- [REQUIRED]
A friendly name or description for the metrics for this RuleGroup . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change the name of the metric after you create the RuleGroup .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
Tags (list) --
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to "customer" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.
Tagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.
Key (string) -- [REQUIRED]
Value (string) -- [REQUIRED]
"""
pass
def create_size_constraint_set(Name=None, ChangeToken=None):
"""
Creates a SizeConstraintSet . You then use UpdateSizeConstraintSet to identify the part of a web request that you want AWS WAF to check for length, such as the length of the User-Agent header or the length of the query string. For example, you can create a SizeConstraintSet that matches any requests that have a query string that is longer than 100 bytes. You can then configure AWS WAF to reject those requests.
To create and configure a SizeConstraintSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example creates size constraint set named MySampleSizeConstraintSet.
Expected Output:
:example: response = client.create_size_constraint_set(
Name='string',
ChangeToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the SizeConstraintSet . You can\'t change Name after you create a SizeConstraintSet .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'SizeConstraintSet': {
'SizeConstraintSetId': 'string',
'Name': 'string',
'SizeConstraints': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123
},
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
SizeConstraintSet (dict) --
A SizeConstraintSet that contains no SizeConstraint objects.
SizeConstraintSetId (string) --
A unique identifier for a SizeConstraintSet . You use SizeConstraintSetId to get information about a SizeConstraintSet (see GetSizeConstraintSet ), update a SizeConstraintSet (see UpdateSizeConstraintSet ), insert a SizeConstraintSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a SizeConstraintSet from AWS WAF (see DeleteSizeConstraintSet ).
SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets .
Name (string) --
The name, if any, of the SizeConstraintSet .
SizeConstraints (list) --
Specifies the parts of web requests that you want to inspect the size of.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size , ComparisonOperator , and FieldToMatch to build an expression in the form of "Size ComparisonOperator size in bytes of FieldToMatch ". If that expression is true, the SizeConstraint is considered to match.
FieldToMatch (dict) --
Specifies where in a web request to look for the size constraint.
Type (string) --
The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --
When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TextTransformation (string) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.
You can only specify a single type of TextTransformation.
Note that if you choose BODY for the value of Type , you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.
NONE
Specify NONE if you don\'t want to perform any text transformations.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
ComparisonOperator (string) --
The type of comparison you want AWS WAF to perform. AWS WAF uses this in combination with the provided Size and FieldToMatch to build an expression in the form of "Size ComparisonOperator size in bytes of FieldToMatch ". If that expression is true, the SizeConstraint is considered to match.
EQ : Used to test if the Size is equal to the size of the FieldToMatch
NE : Used to test if the Size is not equal to the size of the FieldToMatch
LE : Used to test if the Size is less than or equal to the size of the FieldToMatch
LT : Used to test if the Size is strictly less than the size of the FieldToMatch
GE : Used to test if the Size is greater than or equal to the size of the FieldToMatch
GT : Used to test if the Size is strictly greater than the size of the FieldToMatch
Size (integer) --
The size in bytes that you want AWS WAF to compare against the size of the specified FieldToMatch . AWS WAF uses this in combination with ComparisonOperator and FieldToMatch to build an expression in the form of "Size ComparisonOperator size in bytes of FieldToMatch ". If that expression is true, the SizeConstraint is considered to match.
Valid values for size are 0 - 21474836480 bytes (0 - 20 GB).
If you specify URI for the value of Type , the / in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.
ChangeToken (string) --
The ChangeToken that you used to submit the CreateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example creates size constraint set named MySampleSizeConstraintSet.
response = client.create_size_constraint_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
Name='MySampleSizeConstraintSet',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'SizeConstraintSet': {
'Name': 'MySampleSizeConstraintSet',
'SizeConstraintSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
'SizeConstraints': [
{
'ComparisonOperator': 'GT',
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'Size': 0,
'TextTransformation': 'NONE',
},
],
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'SizeConstraintSet': {
'SizeConstraintSetId': 'string',
'Name': 'string',
'SizeConstraints': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123
},
]
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description of the SizeConstraintSet . You can\'t change Name after you create a SizeConstraintSet .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def create_sql_injection_match_set(Name=None, ChangeToken=None):
"""
Creates a SqlInjectionMatchSet , which you use to allow, block, or count requests that contain snippets of SQL code in a specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.
To create and configure a SqlInjectionMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example creates a SQL injection match set named MySQLInjectionMatchSet.
Expected Output:
:example: response = client.create_sql_injection_match_set(
Name='string',
ChangeToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description for the SqlInjectionMatchSet that you\'re creating. You can\'t change Name after you create the SqlInjectionMatchSet .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'SqlInjectionMatchSet': {
'SqlInjectionMatchSetId': 'string',
'Name': 'string',
'SqlInjectionMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
The response to a CreateSqlInjectionMatchSet request.
SqlInjectionMatchSet (dict) --
A SqlInjectionMatchSet .
SqlInjectionMatchSetId (string) --
A unique identifier for a SqlInjectionMatchSet . You use SqlInjectionMatchSetId to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet ), update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet ), insert a SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet ).
SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets .
Name (string) --
The name, if any, of the SqlInjectionMatchSet .
SqlInjectionMatchTuples (list) --
Specifies the parts of web requests that you want to inspect for snippets of malicious SQL code.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.
FieldToMatch (dict) --
Specifies where in a web request to look for snippets of malicious SQL code.
Type (string) --
The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --
When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TextTransformation (string) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.
You can only specify a single type of TextTransformation.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want to perform any text transformations.
ChangeToken (string) --
The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example creates a SQL injection match set named MySQLInjectionMatchSet.
response = client.create_sql_injection_match_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
Name='MySQLInjectionMatchSet',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'SqlInjectionMatchSet': {
'Name': 'MySQLInjectionMatchSet',
'SqlInjectionMatchSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
'SqlInjectionMatchTuples': [
{
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'TextTransformation': 'URL_DECODE',
},
],
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'SqlInjectionMatchSet': {
'SqlInjectionMatchSetId': 'string',
'Name': 'string',
'SqlInjectionMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description for the SqlInjectionMatchSet that you\'re creating. You can\'t change Name after you create the SqlInjectionMatchSet .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def create_web_acl(Name=None, MetricName=None, DefaultAction=None, ChangeToken=None, Tags=None):
"""
Creates a WebACL , which contains the Rules that identify the CloudFront web requests that you want to allow, block, or count. AWS WAF evaluates Rules in order based on the value of Priority for each Rule .
You also specify a default action, either ALLOW or BLOCK . If a web request doesn\'t match any of the Rules in a WebACL , AWS WAF responds to the request with the default action.
To create and configure a WebACL , perform the following steps:
For more information about how to use the AWS WAF API, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example creates a web ACL named CreateExample.
Expected Output:
:example: response = client.create_web_acl(
Name='string',
MetricName='string',
DefaultAction={
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
ChangeToken='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description of the WebACL . You can\'t change Name after you create the WebACL .\n
:type MetricName: string
:param MetricName: [REQUIRED]\nA friendly name or description for the metrics for this WebACL .The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including 'All' and 'Default_Action.' You can\'t change MetricName after you create the WebACL .\n
:type DefaultAction: dict
:param DefaultAction: [REQUIRED]\nThe action that you want AWS WAF to take when a request doesn\'t match the criteria specified in any of the Rule objects that are associated with the WebACL .\n\nType (string) -- [REQUIRED]Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:\n\nALLOW : AWS WAF allows requests\nBLOCK : AWS WAF blocks requests\nCOUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .\n\n\n\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Tags: list
:param Tags: \n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nA tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to 'customer' and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.\nTagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.\n\nKey (string) -- [REQUIRED]\nValue (string) -- [REQUIRED]\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'WebACL': {
'WebACLId': 'string',
'Name': 'string',
'MetricName': 'string',
'DefaultAction': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'Rules': [
{
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
},
],
'WebACLArn': 'string'
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
WebACL (dict) --
The WebACL returned in the CreateWebACL response.
WebACLId (string) --
A unique identifier for a WebACL . You use WebACLId to get information about a WebACL (see GetWebACL ), update a WebACL (see UpdateWebACL ), and delete a WebACL from AWS WAF (see DeleteWebACL ).
WebACLId is returned by CreateWebACL and by ListWebACLs .
Name (string) --
A friendly name or description of the WebACL . You can\'t change the name of a WebACL after you create it.
MetricName (string) --
A friendly name or description for the metrics for this WebACL . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change MetricName after you create the WebACL .
DefaultAction (dict) --
The action to perform if none of the Rules contained in the WebACL match. The action is specified by the WafAction object.
Type (string) --
Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:
ALLOW : AWS WAF allows requests
BLOCK : AWS WAF blocks requests
COUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .
Rules (list) --
An array that contains the action for each Rule in a WebACL , the priority of the Rule , and the ID of the Rule .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL , and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW , BLOCK , or COUNT ).
To specify whether to insert or delete a Rule , use the Action parameter in the WebACLUpdate data type.
Priority (integer) --
Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL , the values don\'t need to be consecutive.
RuleId (string) --
The RuleId for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).
RuleId is returned by CreateRule and by ListRules .
Action (dict) --
Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule . Valid values for Action include the following:
ALLOW : CloudFront responds with the requested object.
BLOCK : CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT : AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case, you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
Type (string) --
Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:
ALLOW : AWS WAF allows requests
BLOCK : AWS WAF blocks requests
COUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .
OverrideAction (dict) --
Use the OverrideAction to test your RuleGroup .
Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None , the RuleGroup will block a request if any individual rule in the RuleGroup matches the request and is configured to block that request. However if you first want to test the RuleGroup , set the OverrideAction to Count . The RuleGroup will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests .
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
Type (string) --
COUNT overrides the action specified by the individual rule within a RuleGroup . If set to NONE , the rule\'s action will take place.
Type (string) --
The rule type, either REGULAR , as defined by Rule , RATE_BASED , as defined by RateBasedRule , or GROUP , as defined by RuleGroup . The default is REGULAR. Although this field is optional, be aware that if you try to add a RATE_BASED rule to a web ACL without setting the type, the UpdateWebACL request will fail because the request tries to add a REGULAR rule with the specified ID, which does not exist.
ExcludedRules (list) --
An array of rules to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup .
Sometimes it is necessary to troubleshoot rule groups that are blocking traffic unexpectedly (false positives). One troubleshooting technique is to identify the specific rule within the rule group that is blocking the legitimate traffic and then disable (exclude) that particular rule. You can exclude rules from both your own rule groups and AWS Marketplace rule groups that have been associated with a web ACL.
Specifying ExcludedRules does not remove those rules from the rule group. Rather, it changes the action for the rules to COUNT . Therefore, requests that match an ExcludedRule are counted but not blocked. The RuleGroup owner will receive COUNT metrics for each ExcludedRule .
If you want to exclude rules from a rule group that is already associated with a web ACL, perform the following steps:
Use the AWS WAF logs to identify the IDs of the rules that you want to exclude. For more information about the logs, see Logging Web ACL Traffic Information .
Submit an UpdateWebACL request that has two actions:
The first action deletes the existing rule group from the web ACL. That is, in the UpdateWebACL request, the first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be the rule group that contains the rules that you want to exclude.
The second action inserts the same rule group back in, but specifying the rules to exclude. That is, the second Updates:Action should be INSERT , Updates:ActivatedRule:RuleId should be the rule group that you just removed, and ExcludedRules should contain the rules that you want to exclude.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup . The rule must belong to the RuleGroup that is specified by the ActivatedRule .
RuleId (string) --
The unique identifier for the rule to exclude from the rule group.
WebACLArn (string) --
Tha Amazon Resource Name (ARN) of the web ACL.
ChangeToken (string) --
The ChangeToken that you used to submit the CreateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
WAFRegional.Client.exceptions.WAFBadRequestException
Examples
The following example creates a web ACL named CreateExample.
response = client.create_web_acl(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
DefaultAction={
'Type': 'ALLOW',
},
MetricName='CreateExample',
Name='CreateExample',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'WebACL': {
'DefaultAction': {
'Type': 'ALLOW',
},
'MetricName': 'CreateExample',
'Name': 'CreateExample',
'Rules': [
{
'Action': {
'Type': 'ALLOW',
},
'Priority': 1,
'RuleId': 'WAFRule-1-Example',
},
],
'WebACLId': 'example-46da-4444-5555-example',
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'WebACL': {
'WebACLId': 'string',
'Name': 'string',
'MetricName': 'string',
'DefaultAction': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'Rules': [
{
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
},
],
'WebACLArn': 'string'
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description of the WebACL . You can\'t change Name after you create the WebACL .
MetricName (string) -- [REQUIRED]
A friendly name or description for the metrics for this WebACL .The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change MetricName after you create the WebACL .
DefaultAction (dict) -- [REQUIRED]
The action that you want AWS WAF to take when a request doesn\'t match the criteria specified in any of the Rule objects that are associated with the WebACL .
Type (string) -- [REQUIRED]Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:
ALLOW : AWS WAF allows requests
BLOCK : AWS WAF blocks requests
COUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
Tags (list) --
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to "customer" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.
Tagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.
Key (string) -- [REQUIRED]
Value (string) -- [REQUIRED]
"""
pass
def create_web_acl_migration_stack(WebACLId=None, S3BucketName=None, IgnoreUnsupportedType=None):
"""
Creates an AWS CloudFormation WAFV2 template for the specified web ACL in the specified Amazon S3 bucket. Then, in CloudFormation, you create a stack from the template, to create the web ACL and its resources in AWS WAFV2. Use this to migrate your AWS WAF Classic web ACL to the latest version of AWS WAF.
This is part of a larger migration procedure for web ACLs from AWS WAF Classic to the latest version of AWS WAF. For the full procedure, including caveats and manual steps to complete the migration and switch over to the new web ACL, see Migrating your AWS WAF Classic resources to AWS WAF in the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_web_acl_migration_stack(
WebACLId='string',
S3BucketName='string',
IgnoreUnsupportedType=True|False
)
:type WebACLId: string
:param WebACLId: [REQUIRED]\nThe UUID of the WAF Classic web ACL that you want to migrate to WAF v2.\n
:type S3BucketName: string
:param S3BucketName: [REQUIRED]\nThe name of the Amazon S3 bucket to store the CloudFormation template in. The S3 bucket must be configured as follows for the migration:\n\nThe bucket name must start with aws-waf-migration- . For example, aws-waf-migration-my-web-acl .\nThe bucket must be in the Region where you are deploying the template. For example, for a web ACL in us-west-2, you must use an Amazon S3 bucket in us-west-2 and you must deploy the template stack to us-west-2.\nThe bucket policies must permit the migration process to write data. For listings of the bucket policies, see the Examples section.\n\n
:type IgnoreUnsupportedType: boolean
:param IgnoreUnsupportedType: [REQUIRED]\nIndicates whether to exclude entities that can\'t be migrated or to stop the migration. Set this to true to ignore unsupported entities in the web ACL during the migration. Otherwise, if AWS WAF encounters unsupported entities, it stops the process and throws an exception.\n
:rtype: dict
ReturnsResponse Syntax
{
'S3ObjectUrl': 'string'
}
Response Structure
(dict) --
S3ObjectUrl (string) --
The URL of the template created in Amazon S3.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFEntityMigrationException
:return: {
'S3ObjectUrl': 'string'
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFEntityMigrationException
"""
pass
def create_xss_match_set(Name=None, ChangeToken=None):
"""
Creates an XssMatchSet , which you use to allow, block, or count requests that contain cross-site scripting attacks in the specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.
To create and configure an XssMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example creates an XSS match set named MySampleXssMatchSet.
Expected Output:
:example: response = client.create_xss_match_set(
Name='string',
ChangeToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nA friendly name or description for the XssMatchSet that you\'re creating. You can\'t change Name after you create the XssMatchSet .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'XssMatchSet': {
'XssMatchSetId': 'string',
'Name': 'string',
'XssMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'ChangeToken': 'string'
}
Response Structure
(dict) --
The response to a CreateXssMatchSet request.
XssMatchSet (dict) --
An XssMatchSet .
XssMatchSetId (string) --
A unique identifier for an XssMatchSet . You use XssMatchSetId to get information about an XssMatchSet (see GetXssMatchSet ), update an XssMatchSet (see UpdateXssMatchSet ), insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet ).
XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets .
Name (string) --
The name, if any, of the XssMatchSet .
XssMatchTuples (list) --
Specifies the parts of web requests that you want to inspect for cross-site scripting attacks.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.
FieldToMatch (dict) --
Specifies where in a web request to look for cross-site scripting attacks.
Type (string) --
The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --
When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TextTransformation (string) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.
You can only specify a single type of TextTransformation.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want to perform any text transformations.
ChangeToken (string) --
The ChangeToken that you used to submit the CreateXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example creates an XSS match set named MySampleXssMatchSet.
response = client.create_xss_match_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
Name='MySampleXssMatchSet',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'XssMatchSet': {
'Name': 'MySampleXssMatchSet',
'XssMatchSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
'XssMatchTuples': [
{
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'TextTransformation': 'URL_DECODE',
},
],
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'XssMatchSet': {
'XssMatchSetId': 'string',
'Name': 'string',
'XssMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'ChangeToken': 'string'
}
:returns:
Name (string) -- [REQUIRED]
A friendly name or description for the XssMatchSet that you\'re creating. You can\'t change Name after you create the XssMatchSet .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_byte_match_set(ByteMatchSetId=None, ChangeToken=None):
"""
Permanently deletes a ByteMatchSet . You can\'t delete a ByteMatchSet if it\'s still used in any Rules or if it still includes any ByteMatchTuple objects (any filters).
If you just want to remove a ByteMatchSet from a Rule , use UpdateRule .
To permanently delete a ByteMatchSet , perform the following steps:
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a byte match set with the ID exampleIDs3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.delete_byte_match_set(
ByteMatchSetId='string',
ChangeToken='string'
)
:type ByteMatchSetId: string
:param ByteMatchSetId: [REQUIRED]\nThe ByteMatchSetId of the ByteMatchSet that you want to delete. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
Examples
The following example deletes a byte match set with the ID exampleIDs3t-46da-4fdb-b8d5-abc321j569j5.
response = client.delete_byte_match_set(
ByteMatchSetId='exampleIDs3t-46da-4fdb-b8d5-abc321j569j5',
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
ByteMatchSetId (string) -- [REQUIRED]
The ByteMatchSetId of the ByteMatchSet that you want to delete. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_geo_match_set(GeoMatchSetId=None, ChangeToken=None):
"""
Permanently deletes a GeoMatchSet . You can\'t delete a GeoMatchSet if it\'s still used in any Rules or if it still includes any countries.
If you just want to remove a GeoMatchSet from a Rule , use UpdateRule .
To permanently delete a GeoMatchSet from AWS WAF, perform the following steps:
See also: AWS API Documentation
Exceptions
:example: response = client.delete_geo_match_set(
GeoMatchSetId='string',
ChangeToken='string'
)
:type GeoMatchSetId: string
:param GeoMatchSetId: [REQUIRED]\nThe GeoMatchSetID of the GeoMatchSet that you want to delete. GeoMatchSetId is returned by CreateGeoMatchSet and by ListGeoMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
:return: {
'ChangeToken': 'string'
}
:returns:
GeoMatchSetId (string) -- [REQUIRED]
The GeoMatchSetID of the GeoMatchSet that you want to delete. GeoMatchSetId is returned by CreateGeoMatchSet and by ListGeoMatchSets .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_ip_set(IPSetId=None, ChangeToken=None):
"""
Permanently deletes an IPSet . You can\'t delete an IPSet if it\'s still used in any Rules or if it still includes any IP addresses.
If you just want to remove an IPSet from a Rule , use UpdateRule .
To permanently delete an IPSet from AWS WAF, perform the following steps:
See also: AWS API Documentation
Exceptions
Examples
The following example deletes an IP match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.delete_ip_set(
IPSetId='string',
ChangeToken='string'
)
:type IPSetId: string
:param IPSetId: [REQUIRED]\nThe IPSetId of the IPSet that you want to delete. IPSetId is returned by CreateIPSet and by ListIPSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
Examples
The following example deletes an IP match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.delete_ip_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
IPSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
IPSetId (string) -- [REQUIRED]
The IPSetId of the IPSet that you want to delete. IPSetId is returned by CreateIPSet and by ListIPSets .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_logging_configuration(ResourceArn=None):
"""
Permanently deletes the LoggingConfiguration from the specified web ACL.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_logging_configuration(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration .\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFStaleDataException
:return: {}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFStaleDataException
"""
pass
def delete_permission_policy(ResourceArn=None):
"""
Permanently deletes an IAM policy from the specified RuleGroup.
The user making the request must be the owner of the RuleGroup.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_permission_policy(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the RuleGroup from which you want to delete the policy.\nThe user making the request must be the owner of the RuleGroup.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonexistentItemException
"""
pass
def delete_rate_based_rule(RuleId=None, ChangeToken=None):
"""
Permanently deletes a RateBasedRule . You can\'t delete a rule if it\'s still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.
If you just want to remove a rule from a WebACL , use UpdateWebACL .
To permanently delete a RateBasedRule from AWS WAF, perform the following steps:
See also: AWS API Documentation
Exceptions
:example: response = client.delete_rate_based_rule(
RuleId='string',
ChangeToken='string'
)
:type RuleId: string
:param RuleId: [REQUIRED]\nThe RuleId of the RateBasedRule that you want to delete. RuleId is returned by CreateRateBasedRule and by ListRateBasedRules .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
:return: {
'ChangeToken': 'string'
}
:returns:
RuleId (string) -- [REQUIRED]
The RuleId of the RateBasedRule that you want to delete. RuleId is returned by CreateRateBasedRule and by ListRateBasedRules .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_regex_match_set(RegexMatchSetId=None, ChangeToken=None):
"""
Permanently deletes a RegexMatchSet . You can\'t delete a RegexMatchSet if it\'s still used in any Rules or if it still includes any RegexMatchTuples objects (any filters).
If you just want to remove a RegexMatchSet from a Rule , use UpdateRule .
To permanently delete a RegexMatchSet , perform the following steps:
See also: AWS API Documentation
Exceptions
:example: response = client.delete_regex_match_set(
RegexMatchSetId='string',
ChangeToken='string'
)
:type RegexMatchSetId: string
:param RegexMatchSetId: [REQUIRED]\nThe RegexMatchSetId of the RegexMatchSet that you want to delete. RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
:return: {
'ChangeToken': 'string'
}
:returns:
RegexMatchSetId (string) -- [REQUIRED]
The RegexMatchSetId of the RegexMatchSet that you want to delete. RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_regex_pattern_set(RegexPatternSetId=None, ChangeToken=None):
"""
Permanently deletes a RegexPatternSet . You can\'t delete a RegexPatternSet if it\'s still used in any RegexMatchSet or if the RegexPatternSet is not empty.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_regex_pattern_set(
RegexPatternSetId='string',
ChangeToken='string'
)
:type RegexPatternSetId: string
:param RegexPatternSetId: [REQUIRED]\nThe RegexPatternSetId of the RegexPatternSet that you want to delete. RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
:return: {
'ChangeToken': 'string'
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
"""
pass
def delete_rule(RuleId=None, ChangeToken=None):
"""
Permanently deletes a Rule . You can\'t delete a Rule if it\'s still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.
If you just want to remove a Rule from a WebACL , use UpdateWebACL .
To permanently delete a Rule from AWS WAF, perform the following steps:
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a rule with the ID WAFRule-1-Example.
Expected Output:
:example: response = client.delete_rule(
RuleId='string',
ChangeToken='string'
)
:type RuleId: string
:param RuleId: [REQUIRED]\nThe RuleId of the Rule that you want to delete. RuleId is returned by CreateRule and by ListRules .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
Examples
The following example deletes a rule with the ID WAFRule-1-Example.
response = client.delete_rule(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
RuleId='WAFRule-1-Example',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
RuleId (string) -- [REQUIRED]
The RuleId of the Rule that you want to delete. RuleId is returned by CreateRule and by ListRules .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_rule_group(RuleGroupId=None, ChangeToken=None):
"""
Permanently deletes a RuleGroup . You can\'t delete a RuleGroup if it\'s still used in any WebACL objects or if it still includes any rules.
If you just want to remove a RuleGroup from a WebACL , use UpdateWebACL .
To permanently delete a RuleGroup from AWS WAF, perform the following steps:
See also: AWS API Documentation
Exceptions
:example: response = client.delete_rule_group(
RuleGroupId='string',
ChangeToken='string'
)
:type RuleGroupId: string
:param RuleGroupId: [REQUIRED]\nThe RuleGroupId of the RuleGroup that you want to delete. RuleGroupId is returned by CreateRuleGroup and by ListRuleGroups .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
:return: {
'ChangeToken': 'string'
}
:returns:
RuleGroupId (string) -- [REQUIRED]
The RuleGroupId of the RuleGroup that you want to delete. RuleGroupId is returned by CreateRuleGroup and by ListRuleGroups .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_size_constraint_set(SizeConstraintSetId=None, ChangeToken=None):
"""
Permanently deletes a SizeConstraintSet . You can\'t delete a SizeConstraintSet if it\'s still used in any Rules or if it still includes any SizeConstraint objects (any filters).
If you just want to remove a SizeConstraintSet from a Rule , use UpdateRule .
To permanently delete a SizeConstraintSet , perform the following steps:
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a size constraint set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.delete_size_constraint_set(
SizeConstraintSetId='string',
ChangeToken='string'
)
:type SizeConstraintSetId: string
:param SizeConstraintSetId: [REQUIRED]\nThe SizeConstraintSetId of the SizeConstraintSet that you want to delete. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
Examples
The following example deletes a size constraint set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.delete_size_constraint_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
SizeConstraintSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
SizeConstraintSetId (string) -- [REQUIRED]
The SizeConstraintSetId of the SizeConstraintSet that you want to delete. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_sql_injection_match_set(SqlInjectionMatchSetId=None, ChangeToken=None):
"""
Permanently deletes a SqlInjectionMatchSet . You can\'t delete a SqlInjectionMatchSet if it\'s still used in any Rules or if it still contains any SqlInjectionMatchTuple objects.
If you just want to remove a SqlInjectionMatchSet from a Rule , use UpdateRule .
To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following steps:
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a SQL injection match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.delete_sql_injection_match_set(
SqlInjectionMatchSetId='string',
ChangeToken='string'
)
:type SqlInjectionMatchSetId: string
:param SqlInjectionMatchSetId: [REQUIRED]\nThe SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to delete. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
The response to a request to delete a SqlInjectionMatchSet from AWS WAF.
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
Examples
The following example deletes a SQL injection match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.delete_sql_injection_match_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
SqlInjectionMatchSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
SqlInjectionMatchSetId (string) -- [REQUIRED]
The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to delete. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_web_acl(WebACLId=None, ChangeToken=None):
"""
Permanently deletes a WebACL . You can\'t delete a WebACL if it still contains any Rules .
To delete a WebACL , perform the following steps:
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a web ACL with the ID example-46da-4444-5555-example.
Expected Output:
:example: response = client.delete_web_acl(
WebACLId='string',
ChangeToken='string'
)
:type WebACLId: string
:param WebACLId: [REQUIRED]\nThe WebACLId of the WebACL that you want to delete. WebACLId is returned by CreateWebACL and by ListWebACLs .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
Examples
The following example deletes a web ACL with the ID example-46da-4444-5555-example.
response = client.delete_web_acl(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
WebACLId='example-46da-4444-5555-example',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
WebACLId (string) -- [REQUIRED]
The WebACLId of the WebACL that you want to delete. WebACLId is returned by CreateWebACL and by ListWebACLs .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def delete_xss_match_set(XssMatchSetId=None, ChangeToken=None):
"""
Permanently deletes an XssMatchSet . You can\'t delete an XssMatchSet if it\'s still used in any Rules or if it still contains any XssMatchTuple objects.
If you just want to remove an XssMatchSet from a Rule , use UpdateRule .
To permanently delete an XssMatchSet from AWS WAF, perform the following steps:
See also: AWS API Documentation
Exceptions
Examples
The following example deletes an XSS match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.delete_xss_match_set(
XssMatchSetId='string',
ChangeToken='string'
)
:type XssMatchSetId: string
:param XssMatchSetId: [REQUIRED]\nThe XssMatchSetId of the XssMatchSet that you want to delete. XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
The response to a request to delete an XssMatchSet from AWS WAF.
ChangeToken (string) --
The ChangeToken that you used to submit the DeleteXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonEmptyEntityException
Examples
The following example deletes an XSS match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.delete_xss_match_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
XssMatchSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
XssMatchSetId (string) -- [REQUIRED]
The XssMatchSetId of the XssMatchSet that you want to delete. XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets .
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def disassociate_web_acl(ResourceArn=None):
"""
Removes a web ACL from the specified resource, either an application load balancer or Amazon API Gateway stage.
See also: AWS API Documentation
Exceptions
:example: response = client.disassociate_web_acl(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe ARN (Amazon Resource Name) of the resource from which the web ACL is being removed, either an application load balancer or Amazon API Gateway stage.\nThe ARN should be in one of the following formats:\n\nFor an Application Load Balancer: ``arn:aws:elasticloadbalancing:region :account-id :loadbalancer/app/load-balancer-name /load-balancer-id ``\nFor an Amazon API Gateway stage: ``arn:aws:apigateway:region ::/restapis/api-id /stages/stage-name ``\n\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {}
:returns:
(dict) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_byte_match_set(ByteMatchSetId=None):
"""
Returns the ByteMatchSet specified by ByteMatchSetId .
See also: AWS API Documentation
Exceptions
Examples
The following example returns the details of a byte match set with the ID exampleIDs3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.get_byte_match_set(
ByteMatchSetId='string'
)
:type ByteMatchSetId: string
:param ByteMatchSetId: [REQUIRED]\nThe ByteMatchSetId of the ByteMatchSet that you want to get. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets .\n
:rtype: dict
ReturnsResponse Syntax{
'ByteMatchSet': {
'ByteMatchSetId': 'string',
'Name': 'string',
'ByteMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TargetString': b'bytes',
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
]
}
}
Response Structure
(dict) --
ByteMatchSet (dict) --Information about the ByteMatchSet that you specified in the GetByteMatchSet request. For more information, see the following topics:
ByteMatchSet : Contains ByteMatchSetId , ByteMatchTuples , and Name
ByteMatchTuples : Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains FieldToMatch , PositionalConstraint , TargetString , and TextTransformation
FieldToMatch : Contains Data and Type
ByteMatchSetId (string) --The ByteMatchSetId for a ByteMatchSet . You use ByteMatchSetId to get information about a ByteMatchSet (see GetByteMatchSet ), update a ByteMatchSet (see UpdateByteMatchSet ), insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet ).
ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets .
Name (string) --A friendly name or description of the ByteMatchSet . You can\'t change Name after you create a ByteMatchSet .
ByteMatchTuples (list) --Specifies the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.
FieldToMatch (dict) --The part of a web request that you want AWS WAF to search, such as a specified header or a query string. For more information, see FieldToMatch .
Type (string) --The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TargetString (bytes) --The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch . The maximum length of the value is 50 bytes.
Valid values depend on the values that you specified for FieldToMatch :
HEADER : The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch , for example, the value of the User-Agent or Referer header.
METHOD : The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.
URI : The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but instead of inspecting a single parameter, AWS WAF inspects all parameters within the query string for the value or regex pattern that you specify in TargetString .
If TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.
If you\'re using the AWS WAF API
Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.
For example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of TargetString .
If you\'re using the AWS CLI or one of the AWS SDKs
The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.
TextTransformation (string) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.
You can only specify a single type of TextTransformation.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want to perform any text transformations.
PositionalConstraint (string) --Within the portion of a web request that you want to search (for example, in the query string, if any), specify where you want AWS WAF to search. Valid values include the following:
CONTAINS
The specified part of the web request must include the value of TargetString , but the location doesn\'t matter.
CONTAINS_WORD
The specified part of the web request must include the value of TargetString , and TargetString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means one of the following:
TargetString exactly matches the value of the specified part of the web request, such as the value of a header.
TargetString is at the beginning of the specified part of the web request and is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; .
TargetString is at the end of the specified part of the web request and is preceded by a character other than an alphanumeric character or underscore (_), for example, ;BadBot .
TargetString is in the middle of the specified part of the web request and is preceded and followed by characters other than alphanumeric characters or underscore (_), for example, -BadBot; .
EXACTLY
The value of the specified part of the web request must exactly match the value of TargetString .
STARTS_WITH
The value of TargetString must appear at the beginning of the specified part of the web request.
ENDS_WITH
The value of TargetString must appear at the end of the specified part of the web request.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
Examples
The following example returns the details of a byte match set with the ID exampleIDs3t-46da-4fdb-b8d5-abc321j569j5.
response = client.get_byte_match_set(
ByteMatchSetId='exampleIDs3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'ByteMatchSet': {
'ByteMatchSetId': 'exampleIDs3t-46da-4fdb-b8d5-abc321j569j5',
'ByteMatchTuples': [
{
'FieldToMatch': {
'Data': 'referer',
'Type': 'HEADER',
},
'PositionalConstraint': 'CONTAINS',
'TargetString': 'badrefer1',
'TextTransformation': 'NONE',
},
],
'Name': 'ByteMatchNameExample',
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ByteMatchSet': {
'ByteMatchSetId': 'string',
'Name': 'string',
'ByteMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TargetString': b'bytes',
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
]
}
}
:returns:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
"""
pass
def get_change_token():
"""
When you want to create, update, or delete AWS WAF objects, get a change token and include the change token in the create, update, or delete request. Change tokens ensure that your application doesn\'t submit conflicting requests to AWS WAF.
Each create, update, or delete request must use a unique change token. If your application submits a GetChangeToken request and then submits a second GetChangeToken request before submitting a create, update, or delete request, the second GetChangeToken request returns the same value as the first GetChangeToken request.
When you use a change token in a create, update, or delete request, the status of the change token changes to PENDING , which indicates that AWS WAF is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus to determine the status of your change token.
See also: AWS API Documentation
Exceptions
Examples
The following example returns a change token to use for a create, update or delete operation.
Expected Output:
:example: response = client.get_change_token()
:rtype: dict
ReturnsResponse Syntax{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus request to get the current status of the request.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
Examples
The following example returns a change token to use for a create, update or delete operation.
response = client.get_change_token(
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
"""
pass
def get_change_token_status(ChangeToken=None):
"""
Returns the status of a ChangeToken that you got by calling GetChangeToken . ChangeTokenStatus is one of the following values:
See also: AWS API Documentation
Exceptions
Examples
The following example returns the status of a change token with the ID abcd12f2-46da-4fdb-b8d5-fbd4c466928f.
Expected Output:
:example: response = client.get_change_token_status(
ChangeToken='string'
)
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe change token for which you want to get the status. This change token was previously returned in the GetChangeToken response.\n
:rtype: dict
ReturnsResponse Syntax{
'ChangeTokenStatus': 'PROVISIONED'|'PENDING'|'INSYNC'
}
Response Structure
(dict) --
ChangeTokenStatus (string) --The status of the change token.
Exceptions
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInternalErrorException
Examples
The following example returns the status of a change token with the ID abcd12f2-46da-4fdb-b8d5-fbd4c466928f.
response = client.get_change_token_status(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
)
print(response)
Expected Output:
{
'ChangeTokenStatus': 'PENDING',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeTokenStatus': 'PROVISIONED'|'PENDING'|'INSYNC'
}
:returns:
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInternalErrorException
"""
pass
def get_geo_match_set(GeoMatchSetId=None):
"""
Returns the GeoMatchSet that is specified by GeoMatchSetId .
See also: AWS API Documentation
Exceptions
:example: response = client.get_geo_match_set(
GeoMatchSetId='string'
)
:type GeoMatchSetId: string
:param GeoMatchSetId: [REQUIRED]\nThe GeoMatchSetId of the GeoMatchSet that you want to get. GeoMatchSetId is returned by CreateGeoMatchSet and by ListGeoMatchSets .\n
:rtype: dict
ReturnsResponse Syntax{
'GeoMatchSet': {
'GeoMatchSetId': 'string',
'Name': 'string',
'GeoMatchConstraints': [
{
'Type': 'Country',
'Value': 'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW'
},
]
}
}
Response Structure
(dict) --
GeoMatchSet (dict) --Information about the GeoMatchSet that you specified in the GetGeoMatchSet request. This includes the Type , which for a GeoMatchContraint is always Country , as well as the Value , which is the identifier for a specific country.
GeoMatchSetId (string) --The GeoMatchSetId for an GeoMatchSet . You use GeoMatchSetId to get information about a GeoMatchSet (see GeoMatchSet ), update a GeoMatchSet (see UpdateGeoMatchSet ), insert a GeoMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a GeoMatchSet from AWS WAF (see DeleteGeoMatchSet ).
GeoMatchSetId is returned by CreateGeoMatchSet and by ListGeoMatchSets .
Name (string) --A friendly name or description of the GeoMatchSet . You can\'t change the name of an GeoMatchSet after you create it.
GeoMatchConstraints (list) --An array of GeoMatchConstraint objects, which contain the country that you want AWS WAF to search for.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The country from which web requests originate that you want AWS WAF to search for.
Type (string) --The type of geographical area you want AWS WAF to search for. Currently Country is the only valid value.
Value (string) --The country that you want AWS WAF to search for.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {
'GeoMatchSet': {
'GeoMatchSetId': 'string',
'Name': 'string',
'GeoMatchConstraints': [
{
'Type': 'Country',
'Value': 'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW'
},
]
}
}
"""
pass
def get_ip_set(IPSetId=None):
"""
Returns the IPSet that is specified by IPSetId .
See also: AWS API Documentation
Exceptions
Examples
The following example returns the details of an IP match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.get_ip_set(
IPSetId='string'
)
:type IPSetId: string
:param IPSetId: [REQUIRED]\nThe IPSetId of the IPSet that you want to get. IPSetId is returned by CreateIPSet and by ListIPSets .\n
:rtype: dict
ReturnsResponse Syntax{
'IPSet': {
'IPSetId': 'string',
'Name': 'string',
'IPSetDescriptors': [
{
'Type': 'IPV4'|'IPV6',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
IPSet (dict) --Information about the IPSet that you specified in the GetIPSet request. For more information, see the following topics:
IPSet : Contains IPSetDescriptors , IPSetId , and Name
IPSetDescriptors : Contains an array of IPSetDescriptor objects. Each IPSetDescriptor object contains Type and Value
IPSetId (string) --The IPSetId for an IPSet . You use IPSetId to get information about an IPSet (see GetIPSet ), update an IPSet (see UpdateIPSet ), insert an IPSet into a Rule or delete one from a Rule (see UpdateRule ), and delete an IPSet from AWS WAF (see DeleteIPSet ).
IPSetId is returned by CreateIPSet and by ListIPSets .
Name (string) --A friendly name or description of the IPSet . You can\'t change the name of an IPSet after you create it.
IPSetDescriptors (list) --The IP address type (IPV4 or IPV6 ) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the IP address type (IPV4 or IPV6 ) and the IP address range (in CIDR format) that web requests originate from.
Type (string) --Specify IPV4 or IPV6 .
Value (string) --Specify an IPv4 address by using CIDR notation. For example:
To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24 .
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing .
Specify an IPv6 address by using CIDR notation. For example:
To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64 .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
Examples
The following example returns the details of an IP match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.get_ip_set(
IPSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'IPSet': {
'IPSetDescriptors': [
{
'Type': 'IPV4',
'Value': '192.0.2.44/32',
},
],
'IPSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
'Name': 'MyIPSetFriendlyName',
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'IPSet': {
'IPSetId': 'string',
'Name': 'string',
'IPSetDescriptors': [
{
'Type': 'IPV4'|'IPV6',
'Value': 'string'
},
]
}
}
:returns:
To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24 .
"""
pass
def get_logging_configuration(ResourceArn=None):
"""
Returns the LoggingConfiguration for the specified web ACL.
See also: AWS API Documentation
Exceptions
:example: response = client.get_logging_configuration(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration .\n
:rtype: dict
ReturnsResponse Syntax{
'LoggingConfiguration': {
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
]
}
}
Response Structure
(dict) --
LoggingConfiguration (dict) --The LoggingConfiguration for the specified web ACL.
ResourceArn (string) --The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs .
LogDestinationConfigs (list) --An array of Amazon Kinesis Data Firehose ARNs.
(string) --
RedactedFields (list) --The parts of the request that you want redacted from the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies where in a web request to look for TargetString .
Type (string) --The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {
'LoggingConfiguration': {
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
]
}
}
:returns:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_permission_policy(ResourceArn=None):
"""
Returns the IAM policy attached to the RuleGroup.
See also: AWS API Documentation
Exceptions
:example: response = client.get_permission_policy(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the RuleGroup for which you want to get the policy.\n
:rtype: dict
ReturnsResponse Syntax{
'Policy': 'string'
}
Response Structure
(dict) --
Policy (string) --The IAM policy attached to the specified RuleGroup.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {
'Policy': 'string'
}
"""
pass
def get_rate_based_rule(RuleId=None):
"""
Returns the RateBasedRule that is specified by the RuleId that you included in the GetRateBasedRule request.
See also: AWS API Documentation
Exceptions
:example: response = client.get_rate_based_rule(
RuleId='string'
)
:type RuleId: string
:param RuleId: [REQUIRED]\nThe RuleId of the RateBasedRule that you want to get. RuleId is returned by CreateRateBasedRule and by ListRateBasedRules .\n
:rtype: dict
ReturnsResponse Syntax{
'Rule': {
'RuleId': 'string',
'Name': 'string',
'MetricName': 'string',
'MatchPredicates': [
{
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
},
],
'RateKey': 'IP',
'RateLimit': 123
}
}
Response Structure
(dict) --
Rule (dict) --Information about the RateBasedRule that you specified in the GetRateBasedRule request.
RuleId (string) --A unique identifier for a RateBasedRule . You use RuleId to get more information about a RateBasedRule (see GetRateBasedRule ), update a RateBasedRule (see UpdateRateBasedRule ), insert a RateBasedRule into a WebACL or delete one from a WebACL (see UpdateWebACL ), or delete a RateBasedRule from AWS WAF (see DeleteRateBasedRule ).
Name (string) --A friendly name or description for a RateBasedRule . You can\'t change the name of a RateBasedRule after you create it.
MetricName (string) --A friendly name or description for the metrics for a RateBasedRule . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change the name of the metric after you create the RateBasedRule .
MatchPredicates (list) --The Predicates object contains one Predicate element for each ByteMatchSet , IPSet , or SqlInjectionMatchSet object that you want to include in a RateBasedRule .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.
Negated (boolean) --Set Negated to False if you want AWS WAF to allow, block, or count requests based on the settings in the specified ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow or block requests based on that IP address.
Set Negated to True if you want AWS WAF to allow or block a request based on the negation of the settings in the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow, block, or count requests based on all IP addresses except 192.0.2.44 .
Type (string) --The type of predicate in a Rule , such as ByteMatch or IPSet .
DataId (string) --A unique identifier for a predicate in a Rule , such as ByteMatchSetId or IPSetId . The ID is returned by the corresponding Create or List command.
RateKey (string) --The field that AWS WAF uses to determine if requests are likely arriving from single source and thus subject to rate monitoring. The only valid value for RateKey is IP . IP indicates that requests arriving from the same IP address are subject to the RateLimit that is specified in the RateBasedRule .
RateLimit (integer) --The maximum number of requests, which have an identical value in the field specified by the RateKey , allowed in a five-minute period. If the number of requests exceeds the RateLimit and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {
'Rule': {
'RuleId': 'string',
'Name': 'string',
'MetricName': 'string',
'MatchPredicates': [
{
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
},
],
'RateKey': 'IP',
'RateLimit': 123
}
}
"""
pass
def get_rate_based_rule_managed_keys(RuleId=None, NextMarker=None):
"""
Returns an array of IP addresses currently being blocked by the RateBasedRule that is specified by the RuleId . The maximum number of managed keys that will be blocked is 10,000. If more than 10,000 addresses exceed the rate limit, the 10,000 addresses with the highest rates will be blocked.
See also: AWS API Documentation
Exceptions
:example: response = client.get_rate_based_rule_managed_keys(
RuleId='string',
NextMarker='string'
)
:type RuleId: string
:param RuleId: [REQUIRED]\nThe RuleId of the RateBasedRule for which you want to get a list of ManagedKeys . RuleId is returned by CreateRateBasedRule and by ListRateBasedRules .\n
:type NextMarker: string
:param NextMarker: A null value and not currently used. Do not include this in your request.
:rtype: dict
ReturnsResponse Syntax
{
'ManagedKeys': [
'string',
],
'NextMarker': 'string'
}
Response Structure
(dict) --
ManagedKeys (list) --
An array of IP addresses that currently are blocked by the specified RateBasedRule .
(string) --
NextMarker (string) --
A null value and not currently used.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInvalidParameterException
:return: {
'ManagedKeys': [
'string',
],
'NextMarker': 'string'
}
:returns:
(string) --
"""
pass
def get_regex_match_set(RegexMatchSetId=None):
"""
Returns the RegexMatchSet specified by RegexMatchSetId .
See also: AWS API Documentation
Exceptions
:example: response = client.get_regex_match_set(
RegexMatchSetId='string'
)
:type RegexMatchSetId: string
:param RegexMatchSetId: [REQUIRED]\nThe RegexMatchSetId of the RegexMatchSet that you want to get. RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets .\n
:rtype: dict
ReturnsResponse Syntax{
'RegexMatchSet': {
'RegexMatchSetId': 'string',
'Name': 'string',
'RegexMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'RegexPatternSetId': 'string'
},
]
}
}
Response Structure
(dict) --
RegexMatchSet (dict) --Information about the RegexMatchSet that you specified in the GetRegexMatchSet request. For more information, see RegexMatchTuple .
RegexMatchSetId (string) --The RegexMatchSetId for a RegexMatchSet . You use RegexMatchSetId to get information about a RegexMatchSet (see GetRegexMatchSet ), update a RegexMatchSet (see UpdateRegexMatchSet ), insert a RegexMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a RegexMatchSet from AWS WAF (see DeleteRegexMatchSet ).
RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets .
Name (string) --A friendly name or description of the RegexMatchSet . You can\'t change Name after you create a RegexMatchSet .
RegexMatchTuples (list) --Contains an array of RegexMatchTuple objects. Each RegexMatchTuple object contains:
The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.
The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet .
Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. Each RegexMatchTuple object contains:
The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.
The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet .
Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.
FieldToMatch (dict) --Specifies where in a web request to look for the RegexPatternSet .
Type (string) --The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TextTransformation (string) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on RegexPatternSet before inspecting a request for a match.
You can only specify a single type of TextTransformation.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want to perform any text transformations.
RegexPatternSetId (string) --The RegexPatternSetId for a RegexPatternSet . You use RegexPatternSetId to get information about a RegexPatternSet (see GetRegexPatternSet ), update a RegexPatternSet (see UpdateRegexPatternSet ), insert a RegexPatternSet into a RegexMatchSet or delete one from a RegexMatchSet (see UpdateRegexMatchSet ), and delete an RegexPatternSet from AWS WAF (see DeleteRegexPatternSet ).
RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {
'RegexMatchSet': {
'RegexMatchSetId': 'string',
'Name': 'string',
'RegexMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'RegexPatternSetId': 'string'
},
]
}
}
:returns:
The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.
The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet .
Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.
"""
pass
def get_regex_pattern_set(RegexPatternSetId=None):
"""
Returns the RegexPatternSet specified by RegexPatternSetId .
See also: AWS API Documentation
Exceptions
:example: response = client.get_regex_pattern_set(
RegexPatternSetId='string'
)
:type RegexPatternSetId: string
:param RegexPatternSetId: [REQUIRED]\nThe RegexPatternSetId of the RegexPatternSet that you want to get. RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .\n
:rtype: dict
ReturnsResponse Syntax{
'RegexPatternSet': {
'RegexPatternSetId': 'string',
'Name': 'string',
'RegexPatternStrings': [
'string',
]
}
}
Response Structure
(dict) --
RegexPatternSet (dict) --Information about the RegexPatternSet that you specified in the GetRegexPatternSet request, including the identifier of the pattern set and the regular expression patterns you want AWS WAF to search for.
RegexPatternSetId (string) --The identifier for the RegexPatternSet . You use RegexPatternSetId to get information about a RegexPatternSet , update a RegexPatternSet , remove a RegexPatternSet from a RegexMatchSet , and delete a RegexPatternSet from AWS WAF.
RegexMatchSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .
Name (string) --A friendly name or description of the RegexPatternSet . You can\'t change Name after you create a RegexPatternSet .
RegexPatternStrings (list) --Specifies the regular expression (regex) patterns that you want AWS WAF to search for, such as B[a@]dB[o0]t .
(string) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {
'RegexPatternSet': {
'RegexPatternSetId': 'string',
'Name': 'string',
'RegexPatternStrings': [
'string',
]
}
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
"""
pass
def get_rule(RuleId=None):
"""
Returns the Rule that is specified by the RuleId that you included in the GetRule request.
See also: AWS API Documentation
Exceptions
Examples
The following example returns the details of a rule with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.get_rule(
RuleId='string'
)
:type RuleId: string
:param RuleId: [REQUIRED]\nThe RuleId of the Rule that you want to get. RuleId is returned by CreateRule and by ListRules .\n
:rtype: dict
ReturnsResponse Syntax{
'Rule': {
'RuleId': 'string',
'Name': 'string',
'MetricName': 'string',
'Predicates': [
{
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
},
]
}
}
Response Structure
(dict) --
Rule (dict) --Information about the Rule that you specified in the GetRule request. For more information, see the following topics:
Rule : Contains MetricName , Name , an array of Predicate objects, and RuleId
Predicate : Each Predicate object contains DataId , Negated , and Type
RuleId (string) --A unique identifier for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).
RuleId is returned by CreateRule and by ListRules .
Name (string) --The friendly name or description for the Rule . You can\'t change the name of a Rule after you create it.
MetricName (string) --A friendly name or description for the metrics for this Rule . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change MetricName after you create the Rule .
Predicates (list) --The Predicates object contains one Predicate element for each ByteMatchSet , IPSet , or SqlInjectionMatchSet object that you want to include in a Rule .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.
Negated (boolean) --Set Negated to False if you want AWS WAF to allow, block, or count requests based on the settings in the specified ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow or block requests based on that IP address.
Set Negated to True if you want AWS WAF to allow or block a request based on the negation of the settings in the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow, block, or count requests based on all IP addresses except 192.0.2.44 .
Type (string) --The type of predicate in a Rule , such as ByteMatch or IPSet .
DataId (string) --A unique identifier for a predicate in a Rule , such as ByteMatchSetId or IPSetId . The ID is returned by the corresponding Create or List command.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
Examples
The following example returns the details of a rule with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.get_rule(
RuleId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'Rule': {
'MetricName': 'WAFByteHeaderRule',
'Name': 'WAFByteHeaderRule',
'Predicates': [
{
'DataId': 'MyByteMatchSetID',
'Negated': False,
'Type': 'ByteMatch',
},
],
'RuleId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Rule': {
'RuleId': 'string',
'Name': 'string',
'MetricName': 'string',
'Predicates': [
{
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
},
]
}
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
"""
pass
def get_rule_group(RuleGroupId=None):
"""
Returns the RuleGroup that is specified by the RuleGroupId that you included in the GetRuleGroup request.
To view the rules in a rule group, use ListActivatedRulesInRuleGroup .
See also: AWS API Documentation
Exceptions
:example: response = client.get_rule_group(
RuleGroupId='string'
)
:type RuleGroupId: string
:param RuleGroupId: [REQUIRED]\nThe RuleGroupId of the RuleGroup that you want to get. RuleGroupId is returned by CreateRuleGroup and by ListRuleGroups .\n
:rtype: dict
ReturnsResponse Syntax{
'RuleGroup': {
'RuleGroupId': 'string',
'Name': 'string',
'MetricName': 'string'
}
}
Response Structure
(dict) --
RuleGroup (dict) --Information about the RuleGroup that you specified in the GetRuleGroup request.
RuleGroupId (string) --A unique identifier for a RuleGroup . You use RuleGroupId to get more information about a RuleGroup (see GetRuleGroup ), update a RuleGroup (see UpdateRuleGroup ), insert a RuleGroup into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a RuleGroup from AWS WAF (see DeleteRuleGroup ).
RuleGroupId is returned by CreateRuleGroup and by ListRuleGroups .
Name (string) --The friendly name or description for the RuleGroup . You can\'t change the name of a RuleGroup after you create it.
MetricName (string) --A friendly name or description for the metrics for this RuleGroup . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change the name of the metric after you create the RuleGroup .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
:return: {
'RuleGroup': {
'RuleGroupId': 'string',
'Name': 'string',
'MetricName': 'string'
}
}
"""
pass
def get_sampled_requests(WebAclId=None, RuleId=None, TimeWindow=None, MaxItems=None):
"""
Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.
See also: AWS API Documentation
Exceptions
Examples
The following example returns detailed information about 100 requests --a sample-- that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received between the time period 2016-09-27T15:50Z to 2016-09-27T15:50Z.
Expected Output:
:example: response = client.get_sampled_requests(
WebAclId='string',
RuleId='string',
TimeWindow={
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
},
MaxItems=123
)
:type WebAclId: string
:param WebAclId: [REQUIRED]\nThe WebACLId of the WebACL for which you want GetSampledRequests to return a sample of requests.\n
:type RuleId: string
:param RuleId: [REQUIRED]\n\nRuleId is one of three values:\n\nThe RuleId of the Rule or the RuleGroupId of the RuleGroup for which you want GetSampledRequests to return a sample of requests.\nDefault_Action , which causes GetSampledRequests to return a sample of the requests that didn\'t match any of the rules in the specified WebACL .\n\n
:type TimeWindow: dict
:param TimeWindow: [REQUIRED]\nThe start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z . For example, '2016-09-27T14:50Z' . You can specify any time range in the previous three hours.\n\nStartTime (datetime) -- [REQUIRED]The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the date and time in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z . For example, '2016-09-27T14:50Z' . You can specify any time range in the previous three hours.\n\nEndTime (datetime) -- [REQUIRED]The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the date and time in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z . For example, '2016-09-27T14:50Z' . You can specify any time range in the previous three hours.\n\n\n
:type MaxItems: integer
:param MaxItems: [REQUIRED]\nThe number of requests that you want AWS WAF to return from among the first 5,000 requests that your AWS resource received during the time range. If your resource received fewer requests than the value of MaxItems , GetSampledRequests returns information about all of them.\n
:rtype: dict
ReturnsResponse Syntax
{
'SampledRequests': [
{
'Request': {
'ClientIP': 'string',
'Country': 'string',
'URI': 'string',
'Method': 'string',
'HTTPVersion': 'string',
'Headers': [
{
'Name': 'string',
'Value': 'string'
},
]
},
'Weight': 123,
'Timestamp': datetime(2015, 1, 1),
'Action': 'string',
'RuleWithinRuleGroup': 'string'
},
],
'PopulationSize': 123,
'TimeWindow': {
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
SampledRequests (list) --
A complex type that contains detailed information about each of the requests in the sample.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests .
Request (dict) --
A complex type that contains detailed information about the request.
ClientIP (string) --
The IP address that the request originated from. If the WebACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:
c-ip , if the viewer did not use an HTTP proxy or a load balancer to send the request
x-forwarded-for , if the viewer did use an HTTP proxy or a load balancer to send the request
Country (string) --
The two-letter country code for the country that the request originated from. For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2 .
URI (string) --
The part of a web request that identifies the resource, for example, /images/daily-ad.jpg .
Method (string) --
The HTTP method specified in the sampled web request. CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
HTTPVersion (string) --
The HTTP version specified in the sampled web request, for example, HTTP/1.1 .
Headers (list) --
A complex type that contains two values for each header in the sampled web request: the name of the header and the value of the header.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests .
Name (string) --
The name of one of the headers in the sampled web request.
Value (string) --
The value of one of the headers in the sampled web request.
Weight (integer) --
A value that indicates how one result in the response relates proportionally to other results in the response. A result that has a weight of 2 represents roughly twice as many CloudFront web requests as a result that has a weight of 1 .
Timestamp (datetime) --
The time at which AWS WAF received the request from your AWS resource, in Unix time format (in seconds).
Action (string) --
The action for the Rule that the request matched: ALLOW , BLOCK , or COUNT .
RuleWithinRuleGroup (string) --
This value is returned if the GetSampledRequests request specifies the ID of a RuleGroup rather than the ID of an individual rule. RuleWithinRuleGroup is the rule within the specified RuleGroup that matched the request listed in the response.
PopulationSize (integer) --
The total number of requests from which GetSampledRequests got a sample of MaxItems requests. If PopulationSize is less than MaxItems , the sample includes every request that your AWS resource received during the specified time range.
TimeWindow (dict) --
Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests. Times are in Coordinated Universal Time (UTC) format.
StartTime (datetime) --
The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the date and time in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z . For example, "2016-09-27T14:50Z" . You can specify any time range in the previous three hours.
EndTime (datetime) --
The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the date and time in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z . For example, "2016-09-27T14:50Z" . You can specify any time range in the previous three hours.
Exceptions
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInternalErrorException
Examples
The following example returns detailed information about 100 requests --a sample-- that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received between the time period 2016-09-27T15:50Z to 2016-09-27T15:50Z.
response = client.get_sampled_requests(
MaxItems=100,
RuleId='WAFRule-1-Example',
TimeWindow={
'EndTime': datetime(2016, 9, 27, 15, 50, 0, 1, 271, 0),
'StartTime': datetime(2016, 9, 27, 15, 50, 0, 1, 271, 0),
},
WebAclId='createwebacl-1472061481310',
)
print(response)
Expected Output:
{
'PopulationSize': 50,
'SampledRequests': [
{
'Action': 'BLOCK',
'Request': {
'ClientIP': '192.0.2.44',
'Country': 'US',
'HTTPVersion': 'HTTP/1.1',
'Headers': [
{
'Name': 'User-Agent',
'Value': 'BadBot ',
},
],
'Method': 'HEAD',
},
'Timestamp': datetime(2016, 9, 27, 14, 55, 0, 1, 271, 0),
'Weight': 1,
},
],
'TimeWindow': {
'EndTime': datetime(2016, 9, 27, 15, 50, 0, 1, 271, 0),
'StartTime': datetime(2016, 9, 27, 14, 50, 0, 1, 271, 0),
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'SampledRequests': [
{
'Request': {
'ClientIP': 'string',
'Country': 'string',
'URI': 'string',
'Method': 'string',
'HTTPVersion': 'string',
'Headers': [
{
'Name': 'string',
'Value': 'string'
},
]
},
'Weight': 123,
'Timestamp': datetime(2015, 1, 1),
'Action': 'string',
'RuleWithinRuleGroup': 'string'
},
],
'PopulationSize': 123,
'TimeWindow': {
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
}
}
:returns:
c-ip , if the viewer did not use an HTTP proxy or a load balancer to send the request
x-forwarded-for , if the viewer did use an HTTP proxy or a load balancer to send the request
"""
pass
def get_size_constraint_set(SizeConstraintSetId=None):
"""
Returns the SizeConstraintSet specified by SizeConstraintSetId .
See also: AWS API Documentation
Exceptions
Examples
The following example returns the details of a size constraint match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.get_size_constraint_set(
SizeConstraintSetId='string'
)
:type SizeConstraintSetId: string
:param SizeConstraintSetId: [REQUIRED]\nThe SizeConstraintSetId of the SizeConstraintSet that you want to get. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets .\n
:rtype: dict
ReturnsResponse Syntax{
'SizeConstraintSet': {
'SizeConstraintSetId': 'string',
'Name': 'string',
'SizeConstraints': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123
},
]
}
}
Response Structure
(dict) --
SizeConstraintSet (dict) --Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet request. For more information, see the following topics:
SizeConstraintSet : Contains SizeConstraintSetId , SizeConstraints , and Name
SizeConstraints : Contains an array of SizeConstraint objects. Each SizeConstraint object contains FieldToMatch , TextTransformation , ComparisonOperator , and Size
FieldToMatch : Contains Data and Type
SizeConstraintSetId (string) --A unique identifier for a SizeConstraintSet . You use SizeConstraintSetId to get information about a SizeConstraintSet (see GetSizeConstraintSet ), update a SizeConstraintSet (see UpdateSizeConstraintSet ), insert a SizeConstraintSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a SizeConstraintSet from AWS WAF (see DeleteSizeConstraintSet ).
SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets .
Name (string) --The name, if any, of the SizeConstraintSet .
SizeConstraints (list) --Specifies the parts of web requests that you want to inspect the size of.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size , ComparisonOperator , and FieldToMatch to build an expression in the form of "Size ComparisonOperator size in bytes of FieldToMatch ". If that expression is true, the SizeConstraint is considered to match.
FieldToMatch (dict) --Specifies where in a web request to look for the size constraint.
Type (string) --The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TextTransformation (string) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.
You can only specify a single type of TextTransformation.
Note that if you choose BODY for the value of Type , you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.
NONE
Specify NONE if you don\'t want to perform any text transformations.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
ComparisonOperator (string) --The type of comparison you want AWS WAF to perform. AWS WAF uses this in combination with the provided Size and FieldToMatch to build an expression in the form of "Size ComparisonOperator size in bytes of FieldToMatch ". If that expression is true, the SizeConstraint is considered to match.
EQ : Used to test if the Size is equal to the size of the FieldToMatchNE : Used to test if the Size is not equal to the size of the FieldToMatch
LE : Used to test if the Size is less than or equal to the size of the FieldToMatch
LT : Used to test if the Size is strictly less than the size of the FieldToMatch
GE : Used to test if the Size is greater than or equal to the size of the FieldToMatch
GT : Used to test if the Size is strictly greater than the size of the FieldToMatch
Size (integer) --The size in bytes that you want AWS WAF to compare against the size of the specified FieldToMatch . AWS WAF uses this in combination with ComparisonOperator and FieldToMatch to build an expression in the form of "Size ComparisonOperator size in bytes of FieldToMatch ". If that expression is true, the SizeConstraint is considered to match.
Valid values for size are 0 - 21474836480 bytes (0 - 20 GB).
If you specify URI for the value of Type , the / in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
Examples
The following example returns the details of a size constraint match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.get_size_constraint_set(
SizeConstraintSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'SizeConstraintSet': {
'Name': 'MySampleSizeConstraintSet',
'SizeConstraintSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
'SizeConstraints': [
{
'ComparisonOperator': 'GT',
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'Size': 0,
'TextTransformation': 'NONE',
},
],
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'SizeConstraintSet': {
'SizeConstraintSetId': 'string',
'Name': 'string',
'SizeConstraints': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123
},
]
}
}
:returns:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
"""
pass
def get_sql_injection_match_set(SqlInjectionMatchSetId=None):
"""
Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId .
See also: AWS API Documentation
Exceptions
Examples
The following example returns the details of a SQL injection match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.get_sql_injection_match_set(
SqlInjectionMatchSetId='string'
)
:type SqlInjectionMatchSetId: string
:param SqlInjectionMatchSetId: [REQUIRED]\nThe SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to get. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets .\n
:rtype: dict
ReturnsResponse Syntax{
'SqlInjectionMatchSet': {
'SqlInjectionMatchSetId': 'string',
'Name': 'string',
'SqlInjectionMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
}
}
Response Structure
(dict) --The response to a GetSqlInjectionMatchSet request.
SqlInjectionMatchSet (dict) --Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet request. For more information, see the following topics:
SqlInjectionMatchSet : Contains Name , SqlInjectionMatchSetId , and an array of SqlInjectionMatchTuple objects
SqlInjectionMatchTuple : Each SqlInjectionMatchTuple object contains FieldToMatch and TextTransformation
FieldToMatch : Contains Data and Type
SqlInjectionMatchSetId (string) --A unique identifier for a SqlInjectionMatchSet . You use SqlInjectionMatchSetId to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet ), update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet ), insert a SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet ).
SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets .
Name (string) --The name, if any, of the SqlInjectionMatchSet .
SqlInjectionMatchTuples (list) --Specifies the parts of web requests that you want to inspect for snippets of malicious SQL code.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.
FieldToMatch (dict) --Specifies where in a web request to look for snippets of malicious SQL code.
Type (string) --The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TextTransformation (string) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.
You can only specify a single type of TextTransformation.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want to perform any text transformations.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
Examples
The following example returns the details of a SQL injection match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.get_sql_injection_match_set(
SqlInjectionMatchSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'SqlInjectionMatchSet': {
'Name': 'MySQLInjectionMatchSet',
'SqlInjectionMatchSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
'SqlInjectionMatchTuples': [
{
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'TextTransformation': 'URL_DECODE',
},
],
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'SqlInjectionMatchSet': {
'SqlInjectionMatchSetId': 'string',
'Name': 'string',
'SqlInjectionMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
}
}
:returns:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def get_web_acl(WebACLId=None):
"""
Returns the WebACL that is specified by WebACLId .
See also: AWS API Documentation
Exceptions
Examples
The following example returns the details of a web ACL with the ID createwebacl-1472061481310.
Expected Output:
:example: response = client.get_web_acl(
WebACLId='string'
)
:type WebACLId: string
:param WebACLId: [REQUIRED]\nThe WebACLId of the WebACL that you want to get. WebACLId is returned by CreateWebACL and by ListWebACLs .\n
:rtype: dict
ReturnsResponse Syntax{
'WebACL': {
'WebACLId': 'string',
'Name': 'string',
'MetricName': 'string',
'DefaultAction': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'Rules': [
{
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
},
],
'WebACLArn': 'string'
}
}
Response Structure
(dict) --
WebACL (dict) --Information about the WebACL that you specified in the GetWebACL request. For more information, see the following topics:
WebACL : Contains DefaultAction , MetricName , Name , an array of Rule objects, and WebACLId
DefaultAction (Data type is WafAction ): Contains Type
Rules : Contains an array of ActivatedRule objects, which contain Action , Priority , and RuleId
Action : Contains Type
WebACLId (string) --A unique identifier for a WebACL . You use WebACLId to get information about a WebACL (see GetWebACL ), update a WebACL (see UpdateWebACL ), and delete a WebACL from AWS WAF (see DeleteWebACL ).
WebACLId is returned by CreateWebACL and by ListWebACLs .
Name (string) --A friendly name or description of the WebACL . You can\'t change the name of a WebACL after you create it.
MetricName (string) --A friendly name or description for the metrics for this WebACL . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change MetricName after you create the WebACL .
DefaultAction (dict) --The action to perform if none of the Rules contained in the WebACL match. The action is specified by the WafAction object.
Type (string) --Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:
ALLOW : AWS WAF allows requests
BLOCK : AWS WAF blocks requests
COUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .
Rules (list) --An array that contains the action for each Rule in a WebACL , the priority of the Rule , and the ID of the Rule .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL , and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW , BLOCK , or COUNT ).
To specify whether to insert or delete a Rule , use the Action parameter in the WebACLUpdate data type.
Priority (integer) --Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL , the values don\'t need to be consecutive.
RuleId (string) --The RuleId for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).
RuleId is returned by CreateRule and by ListRules .
Action (dict) --Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule . Valid values for Action include the following:
ALLOW : CloudFront responds with the requested object.
BLOCK : CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT : AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case, you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
Type (string) --Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:
ALLOW : AWS WAF allows requests
BLOCK : AWS WAF blocks requests
COUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .
OverrideAction (dict) --Use the OverrideAction to test your RuleGroup .
Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None , the RuleGroup will block a request if any individual rule in the RuleGroup matches the request and is configured to block that request. However if you first want to test the RuleGroup , set the OverrideAction to Count . The RuleGroup will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests .
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
Type (string) --
COUNT overrides the action specified by the individual rule within a RuleGroup . If set to NONE , the rule\'s action will take place.
Type (string) --The rule type, either REGULAR , as defined by Rule , RATE_BASED , as defined by RateBasedRule , or GROUP , as defined by RuleGroup . The default is REGULAR. Although this field is optional, be aware that if you try to add a RATE_BASED rule to a web ACL without setting the type, the UpdateWebACL request will fail because the request tries to add a REGULAR rule with the specified ID, which does not exist.
ExcludedRules (list) --An array of rules to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup .
Sometimes it is necessary to troubleshoot rule groups that are blocking traffic unexpectedly (false positives). One troubleshooting technique is to identify the specific rule within the rule group that is blocking the legitimate traffic and then disable (exclude) that particular rule. You can exclude rules from both your own rule groups and AWS Marketplace rule groups that have been associated with a web ACL.
Specifying ExcludedRules does not remove those rules from the rule group. Rather, it changes the action for the rules to COUNT . Therefore, requests that match an ExcludedRule are counted but not blocked. The RuleGroup owner will receive COUNT metrics for each ExcludedRule .
If you want to exclude rules from a rule group that is already associated with a web ACL, perform the following steps:
Use the AWS WAF logs to identify the IDs of the rules that you want to exclude. For more information about the logs, see Logging Web ACL Traffic Information .
Submit an UpdateWebACL request that has two actions:
The first action deletes the existing rule group from the web ACL. That is, in the UpdateWebACL request, the first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be the rule group that contains the rules that you want to exclude.
The second action inserts the same rule group back in, but specifying the rules to exclude. That is, the second Updates:Action should be INSERT , Updates:ActivatedRule:RuleId should be the rule group that you just removed, and ExcludedRules should contain the rules that you want to exclude.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup . The rule must belong to the RuleGroup that is specified by the ActivatedRule .
RuleId (string) --The unique identifier for the rule to exclude from the rule group.
WebACLArn (string) --Tha Amazon Resource Name (ARN) of the web ACL.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
Examples
The following example returns the details of a web ACL with the ID createwebacl-1472061481310.
response = client.get_web_acl(
WebACLId='createwebacl-1472061481310',
)
print(response)
Expected Output:
{
'WebACL': {
'DefaultAction': {
'Type': 'ALLOW',
},
'MetricName': 'CreateExample',
'Name': 'CreateExample',
'Rules': [
{
'Action': {
'Type': 'ALLOW',
},
'Priority': 1,
'RuleId': 'WAFRule-1-Example',
},
],
'WebACLId': 'createwebacl-1472061481310',
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'WebACL': {
'WebACLId': 'string',
'Name': 'string',
'MetricName': 'string',
'DefaultAction': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'Rules': [
{
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
},
],
'WebACLArn': 'string'
}
}
:returns:
ALLOW : AWS WAF allows requests
BLOCK : AWS WAF blocks requests
COUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .
"""
pass
def get_web_acl_for_resource(ResourceArn=None):
"""
Returns the web ACL for the specified resource, either an application load balancer or Amazon API Gateway stage.
See also: AWS API Documentation
Exceptions
:example: response = client.get_web_acl_for_resource(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe ARN (Amazon Resource Name) of the resource for which to get the web ACL, either an application load balancer or Amazon API Gateway stage.\nThe ARN should be in one of the following formats:\n\nFor an Application Load Balancer: ``arn:aws:elasticloadbalancing:region :account-id :loadbalancer/app/load-balancer-name /load-balancer-id ``\nFor an Amazon API Gateway stage: ``arn:aws:apigateway:region ::/restapis/api-id /stages/stage-name ``\n\n
:rtype: dict
ReturnsResponse Syntax{
'WebACLSummary': {
'WebACLId': 'string',
'Name': 'string'
}
}
Response Structure
(dict) --
WebACLSummary (dict) --Information about the web ACL that you specified in the GetWebACLForResource request. If there is no associated resource, a null WebACLSummary is returned.
WebACLId (string) --A unique identifier for a WebACL . You use WebACLId to get information about a WebACL (see GetWebACL ), update a WebACL (see UpdateWebACL ), and delete a WebACL from AWS WAF (see DeleteWebACL ).
WebACLId is returned by CreateWebACL and by ListWebACLs .
Name (string) --A friendly name or description of the WebACL . You can\'t change the name of a WebACL after you create it.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFUnavailableEntityException
:return: {
'WebACLSummary': {
'WebACLId': 'string',
'Name': 'string'
}
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFUnavailableEntityException
"""
pass
def get_xss_match_set(XssMatchSetId=None):
"""
Returns the XssMatchSet that is specified by XssMatchSetId .
See also: AWS API Documentation
Exceptions
Examples
The following example returns the details of an XSS match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.get_xss_match_set(
XssMatchSetId='string'
)
:type XssMatchSetId: string
:param XssMatchSetId: [REQUIRED]\nThe XssMatchSetId of the XssMatchSet that you want to get. XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets .\n
:rtype: dict
ReturnsResponse Syntax{
'XssMatchSet': {
'XssMatchSetId': 'string',
'Name': 'string',
'XssMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
}
}
Response Structure
(dict) --The response to a GetXssMatchSet request.
XssMatchSet (dict) --Information about the XssMatchSet that you specified in the GetXssMatchSet request. For more information, see the following topics:
XssMatchSet : Contains Name , XssMatchSetId , and an array of XssMatchTuple objects
XssMatchTuple : Each XssMatchTuple object contains FieldToMatch and TextTransformation
FieldToMatch : Contains Data and Type
XssMatchSetId (string) --A unique identifier for an XssMatchSet . You use XssMatchSetId to get information about an XssMatchSet (see GetXssMatchSet ), update an XssMatchSet (see UpdateXssMatchSet ), insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet ).
XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets .
Name (string) --The name, if any, of the XssMatchSet .
XssMatchTuples (list) --Specifies the parts of web requests that you want to inspect for cross-site scripting attacks.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.
FieldToMatch (dict) --Specifies where in a web request to look for cross-site scripting attacks.
Type (string) --The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
TextTransformation (string) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.
You can only specify a single type of TextTransformation.
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want to perform any text transformations.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
Examples
The following example returns the details of an XSS match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.get_xss_match_set(
XssMatchSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'XssMatchSet': {
'Name': 'MySampleXssMatchSet',
'XssMatchSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
'XssMatchTuples': [
{
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'TextTransformation': 'URL_DECODE',
},
],
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'XssMatchSet': {
'XssMatchSetId': 'string',
'Name': 'string',
'XssMatchTuples': [
{
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
}
}
:returns:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
"""
pass
def list_activated_rules_in_rule_group(RuleGroupId=None, NextMarker=None, Limit=None):
"""
Returns an array of ActivatedRule objects.
See also: AWS API Documentation
Exceptions
:example: response = client.list_activated_rules_in_rule_group(
RuleGroupId='string',
NextMarker='string',
Limit=123
)
:type RuleGroupId: string
:param RuleGroupId: The RuleGroupId of the RuleGroup for which you want to get a list of ActivatedRule objects.
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more ActivatedRules than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of ActivatedRules . For the second and subsequent ListActivatedRulesInRuleGroup requests, specify the value of NextMarker from the previous response to get information about another batch of ActivatedRules .
:type Limit: integer
:param Limit: Specifies the number of ActivatedRules that you want AWS WAF to return for this request. If you have more ActivatedRules than the number that you specify for Limit , the response includes a NextMarker value that you can use to get another batch of ActivatedRules .
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'ActivatedRules': [
{
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more ActivatedRules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more ActivatedRules , submit another ListActivatedRulesInRuleGroup request, and specify the NextMarker value from the response in the NextMarker value in the next request.
ActivatedRules (list) --
An array of ActivatedRules objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL , and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW , BLOCK , or COUNT ).
To specify whether to insert or delete a Rule , use the Action parameter in the WebACLUpdate data type.
Priority (integer) --
Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL , the values don\'t need to be consecutive.
RuleId (string) --
The RuleId for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).
RuleId is returned by CreateRule and by ListRules .
Action (dict) --
Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule . Valid values for Action include the following:
ALLOW : CloudFront responds with the requested object.
BLOCK : CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT : AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case, you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
Type (string) --
Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:
ALLOW : AWS WAF allows requests
BLOCK : AWS WAF blocks requests
COUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .
OverrideAction (dict) --
Use the OverrideAction to test your RuleGroup .
Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None , the RuleGroup will block a request if any individual rule in the RuleGroup matches the request and is configured to block that request. However if you first want to test the RuleGroup , set the OverrideAction to Count . The RuleGroup will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests .
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
Type (string) --
COUNT overrides the action specified by the individual rule within a RuleGroup . If set to NONE , the rule\'s action will take place.
Type (string) --
The rule type, either REGULAR , as defined by Rule , RATE_BASED , as defined by RateBasedRule , or GROUP , as defined by RuleGroup . The default is REGULAR. Although this field is optional, be aware that if you try to add a RATE_BASED rule to a web ACL without setting the type, the UpdateWebACL request will fail because the request tries to add a REGULAR rule with the specified ID, which does not exist.
ExcludedRules (list) --
An array of rules to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup .
Sometimes it is necessary to troubleshoot rule groups that are blocking traffic unexpectedly (false positives). One troubleshooting technique is to identify the specific rule within the rule group that is blocking the legitimate traffic and then disable (exclude) that particular rule. You can exclude rules from both your own rule groups and AWS Marketplace rule groups that have been associated with a web ACL.
Specifying ExcludedRules does not remove those rules from the rule group. Rather, it changes the action for the rules to COUNT . Therefore, requests that match an ExcludedRule are counted but not blocked. The RuleGroup owner will receive COUNT metrics for each ExcludedRule .
If you want to exclude rules from a rule group that is already associated with a web ACL, perform the following steps:
Use the AWS WAF logs to identify the IDs of the rules that you want to exclude. For more information about the logs, see Logging Web ACL Traffic Information .
Submit an UpdateWebACL request that has two actions:
The first action deletes the existing rule group from the web ACL. That is, in the UpdateWebACL request, the first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be the rule group that contains the rules that you want to exclude.
The second action inserts the same rule group back in, but specifying the rules to exclude. That is, the second Updates:Action should be INSERT , Updates:ActivatedRule:RuleId should be the rule group that you just removed, and ExcludedRules should contain the rules that you want to exclude.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup . The rule must belong to the RuleGroup that is specified by the ActivatedRule .
RuleId (string) --
The unique identifier for the rule to exclude from the rule group.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInvalidParameterException
:return: {
'NextMarker': 'string',
'ActivatedRules': [
{
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
},
]
}
:returns:
ALLOW : CloudFront responds with the requested object.
BLOCK : CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT : AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
"""
pass
def list_byte_match_sets(NextMarker=None, Limit=None):
"""
Returns an array of ByteMatchSetSummary objects.
See also: AWS API Documentation
Exceptions
:example: response = client.list_byte_match_sets(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more ByteMatchSets than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of ByteMatchSets . For the second and subsequent ListByteMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of ByteMatchSets .
:type Limit: integer
:param Limit: Specifies the number of ByteMatchSet objects that you want AWS WAF to return for this request. If you have more ByteMatchSets objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of ByteMatchSet objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'ByteMatchSets': [
{
'ByteMatchSetId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more ByteMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more ByteMatchSet objects, submit another ListByteMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.
ByteMatchSets (list) --
An array of ByteMatchSetSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Returned by ListByteMatchSets . Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet .
ByteMatchSetId (string) --
The ByteMatchSetId for a ByteMatchSet . You use ByteMatchSetId to get information about a ByteMatchSet , update a ByteMatchSet , remove a ByteMatchSet from a Rule , and delete a ByteMatchSet from AWS WAF.
ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets .
Name (string) --
A friendly name or description of the ByteMatchSet . You can\'t change Name after you create a ByteMatchSet .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
:return: {
'NextMarker': 'string',
'ByteMatchSets': [
{
'ByteMatchSetId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_geo_match_sets(NextMarker=None, Limit=None):
"""
Returns an array of GeoMatchSetSummary objects in the response.
See also: AWS API Documentation
Exceptions
:example: response = client.list_geo_match_sets(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more GeoMatchSet s than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of GeoMatchSet objects. For the second and subsequent ListGeoMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of GeoMatchSet objects.
:type Limit: integer
:param Limit: Specifies the number of GeoMatchSet objects that you want AWS WAF to return for this request. If you have more GeoMatchSet objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of GeoMatchSet objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'GeoMatchSets': [
{
'GeoMatchSetId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more GeoMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more GeoMatchSet objects, submit another ListGeoMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.
GeoMatchSets (list) --
An array of GeoMatchSetSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Contains the identifier and the name of the GeoMatchSet .
GeoMatchSetId (string) --
The GeoMatchSetId for an GeoMatchSet . You can use GeoMatchSetId in a GetGeoMatchSet request to get detailed information about an GeoMatchSet .
Name (string) --
A friendly name or description of the GeoMatchSet . You can\'t change the name of an GeoMatchSet after you create it.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
:return: {
'NextMarker': 'string',
'GeoMatchSets': [
{
'GeoMatchSetId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_ip_sets(NextMarker=None, Limit=None):
"""
Returns an array of IPSetSummary objects in the response.
See also: AWS API Documentation
Exceptions
Examples
The following example returns an array of up to 100 IP match sets.
Expected Output:
:example: response = client.list_ip_sets(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: AWS WAF returns a NextMarker value in the response that allows you to list another group of IPSets . For the second and subsequent ListIPSets requests, specify the value of NextMarker from the previous response to get information about another batch of IPSets .
:type Limit: integer
:param Limit: Specifies the number of IPSet objects that you want AWS WAF to return for this request. If you have more IPSet objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of IPSet objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'IPSets': [
{
'IPSetId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
To list more IPSet objects, submit another ListIPSets request, and in the next request use the NextMarker response value as the NextMarker value.
IPSets (list) --
An array of IPSetSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Contains the identifier and the name of the IPSet .
IPSetId (string) --
The IPSetId for an IPSet . You can use IPSetId in a GetIPSet request to get detailed information about an IPSet .
Name (string) --
A friendly name or description of the IPSet . You can\'t change the name of an IPSet after you create it.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
Examples
The following example returns an array of up to 100 IP match sets.
response = client.list_ip_sets(
Limit=100,
)
print(response)
Expected Output:
{
'IPSets': [
{
'IPSetId': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'Name': 'MyIPSetFriendlyName',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'NextMarker': 'string',
'IPSets': [
{
'IPSetId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_logging_configurations(NextMarker=None, Limit=None):
"""
Returns an array of LoggingConfiguration objects.
See also: AWS API Documentation
Exceptions
:example: response = client.list_logging_configurations(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more LoggingConfigurations than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of LoggingConfigurations . For the second and subsequent ListLoggingConfigurations requests, specify the value of NextMarker from the previous response to get information about another batch of ListLoggingConfigurations .
:type Limit: integer
:param Limit: Specifies the number of LoggingConfigurations that you want AWS WAF to return for this request. If you have more LoggingConfigurations than the number that you specify for Limit , the response includes a NextMarker value that you can use to get another batch of LoggingConfigurations .
:rtype: dict
ReturnsResponse Syntax
{
'LoggingConfigurations': [
{
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
]
},
],
'NextMarker': 'string'
}
Response Structure
(dict) --
LoggingConfigurations (list) --
An array of LoggingConfiguration objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The Amazon Kinesis Data Firehose, RedactedFields information, and the web ACL Amazon Resource Name (ARN).
ResourceArn (string) --
The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs .
LogDestinationConfigs (list) --
An array of Amazon Kinesis Data Firehose ARNs.
(string) --
RedactedFields (list) --
The parts of the request that you want redacted from the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies where in a web request to look for TargetString .
Type (string) --
The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --
When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
NextMarker (string) --
If you have more LoggingConfigurations than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more LoggingConfigurations , submit another ListLoggingConfigurations request, and specify the NextMarker value from the response in the NextMarker value in the next request.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInvalidParameterException
:return: {
'LoggingConfigurations': [
{
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
]
},
],
'NextMarker': 'string'
}
:returns:
(string) --
"""
pass
def list_rate_based_rules(NextMarker=None, Limit=None):
"""
Returns an array of RuleSummary objects.
See also: AWS API Documentation
Exceptions
:example: response = client.list_rate_based_rules(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more Rules than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of Rules . For the second and subsequent ListRateBasedRules requests, specify the value of NextMarker from the previous response to get information about another batch of Rules .
:type Limit: integer
:param Limit: Specifies the number of Rules that you want AWS WAF to return for this request. If you have more Rules than the number that you specify for Limit , the response includes a NextMarker value that you can use to get another batch of Rules .
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'Rules': [
{
'RuleId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more Rules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more Rules , submit another ListRateBasedRules request, and specify the NextMarker value from the response in the NextMarker value in the next request.
Rules (list) --
An array of RuleSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Contains the identifier and the friendly name or description of the Rule .
RuleId (string) --
A unique identifier for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).
RuleId is returned by CreateRule and by ListRules .
Name (string) --
A friendly name or description of the Rule . You can\'t change the name of a Rule after you create it.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
:return: {
'NextMarker': 'string',
'Rules': [
{
'RuleId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_regex_match_sets(NextMarker=None, Limit=None):
"""
Returns an array of RegexMatchSetSummary objects.
See also: AWS API Documentation
Exceptions
:example: response = client.list_regex_match_sets(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more RegexMatchSet objects than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of ByteMatchSets . For the second and subsequent ListRegexMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of RegexMatchSet objects.
:type Limit: integer
:param Limit: Specifies the number of RegexMatchSet objects that you want AWS WAF to return for this request. If you have more RegexMatchSet objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of RegexMatchSet objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'RegexMatchSets': [
{
'RegexMatchSetId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more RegexMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RegexMatchSet objects, submit another ListRegexMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.
RegexMatchSets (list) --
An array of RegexMatchSetSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Returned by ListRegexMatchSets . Each RegexMatchSetSummary object includes the Name and RegexMatchSetId for one RegexMatchSet .
RegexMatchSetId (string) --
The RegexMatchSetId for a RegexMatchSet . You use RegexMatchSetId to get information about a RegexMatchSet , update a RegexMatchSet , remove a RegexMatchSet from a Rule , and delete a RegexMatchSet from AWS WAF.
RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets .
Name (string) --
A friendly name or description of the RegexMatchSet . You can\'t change Name after you create a RegexMatchSet .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
:return: {
'NextMarker': 'string',
'RegexMatchSets': [
{
'RegexMatchSetId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_regex_pattern_sets(NextMarker=None, Limit=None):
"""
Returns an array of RegexPatternSetSummary objects.
See also: AWS API Documentation
Exceptions
:example: response = client.list_regex_pattern_sets(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more RegexPatternSet objects than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of RegexPatternSet objects. For the second and subsequent ListRegexPatternSets requests, specify the value of NextMarker from the previous response to get information about another batch of RegexPatternSet objects.
:type Limit: integer
:param Limit: Specifies the number of RegexPatternSet objects that you want AWS WAF to return for this request. If you have more RegexPatternSet objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of RegexPatternSet objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'RegexPatternSets': [
{
'RegexPatternSetId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more RegexPatternSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RegexPatternSet objects, submit another ListRegexPatternSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.
RegexPatternSets (list) --
An array of RegexPatternSetSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Returned by ListRegexPatternSets . Each RegexPatternSetSummary object includes the Name and RegexPatternSetId for one RegexPatternSet .
RegexPatternSetId (string) --
The RegexPatternSetId for a RegexPatternSet . You use RegexPatternSetId to get information about a RegexPatternSet , update a RegexPatternSet , remove a RegexPatternSet from a RegexMatchSet , and delete a RegexPatternSet from AWS WAF.
RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .
Name (string) --
A friendly name or description of the RegexPatternSet . You can\'t change Name after you create a RegexPatternSet .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
:return: {
'NextMarker': 'string',
'RegexPatternSets': [
{
'RegexPatternSetId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_resources_for_web_acl(WebACLId=None, ResourceType=None):
"""
Returns an array of resources associated with the specified web ACL.
See also: AWS API Documentation
Exceptions
:example: response = client.list_resources_for_web_acl(
WebACLId='string',
ResourceType='APPLICATION_LOAD_BALANCER'|'API_GATEWAY'
)
:type WebACLId: string
:param WebACLId: [REQUIRED]\nThe unique identifier (ID) of the web ACL for which to list the associated resources.\n
:type ResourceType: string
:param ResourceType: The type of resource to list, either an application load balancer or Amazon API Gateway.
:rtype: dict
ReturnsResponse Syntax
{
'ResourceArns': [
'string',
]
}
Response Structure
(dict) --
ResourceArns (list) --
An array of ARNs (Amazon Resource Names) of the resources associated with the specified web ACL. An array with zero elements is returned if there are no resources associated with the web ACL.
(string) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInvalidParameterException
:return: {
'ResourceArns': [
'string',
]
}
:returns:
(string) --
"""
pass
def list_rule_groups(NextMarker=None, Limit=None):
"""
Returns an array of RuleGroup objects.
See also: AWS API Documentation
Exceptions
:example: response = client.list_rule_groups(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more RuleGroups than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of RuleGroups . For the second and subsequent ListRuleGroups requests, specify the value of NextMarker from the previous response to get information about another batch of RuleGroups .
:type Limit: integer
:param Limit: Specifies the number of RuleGroups that you want AWS WAF to return for this request. If you have more RuleGroups than the number that you specify for Limit , the response includes a NextMarker value that you can use to get another batch of RuleGroups .
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'RuleGroups': [
{
'RuleGroupId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more RuleGroups than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more RuleGroups , submit another ListRuleGroups request, and specify the NextMarker value from the response in the NextMarker value in the next request.
RuleGroups (list) --
An array of RuleGroup objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Contains the identifier and the friendly name or description of the RuleGroup .
RuleGroupId (string) --
A unique identifier for a RuleGroup . You use RuleGroupId to get more information about a RuleGroup (see GetRuleGroup ), update a RuleGroup (see UpdateRuleGroup ), insert a RuleGroup into a WebACL or delete one from a WebACL (see UpdateWebACL ), or delete a RuleGroup from AWS WAF (see DeleteRuleGroup ).
RuleGroupId is returned by CreateRuleGroup and by ListRuleGroups .
Name (string) --
A friendly name or description of the RuleGroup . You can\'t change the name of a RuleGroup after you create it.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
:return: {
'NextMarker': 'string',
'RuleGroups': [
{
'RuleGroupId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
"""
pass
def list_rules(NextMarker=None, Limit=None):
"""
Returns an array of RuleSummary objects.
See also: AWS API Documentation
Exceptions
Examples
The following example returns an array of up to 100 rules.
Expected Output:
:example: response = client.list_rules(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more Rules than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of Rules . For the second and subsequent ListRules requests, specify the value of NextMarker from the previous response to get information about another batch of Rules .
:type Limit: integer
:param Limit: Specifies the number of Rules that you want AWS WAF to return for this request. If you have more Rules than the number that you specify for Limit , the response includes a NextMarker value that you can use to get another batch of Rules .
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'Rules': [
{
'RuleId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more Rules than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more Rules , submit another ListRules request, and specify the NextMarker value from the response in the NextMarker value in the next request.
Rules (list) --
An array of RuleSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Contains the identifier and the friendly name or description of the Rule .
RuleId (string) --
A unique identifier for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).
RuleId is returned by CreateRule and by ListRules .
Name (string) --
A friendly name or description of the Rule . You can\'t change the name of a Rule after you create it.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
Examples
The following example returns an array of up to 100 rules.
response = client.list_rules(
Limit=100,
)
print(response)
Expected Output:
{
'Rules': [
{
'Name': 'WAFByteHeaderRule',
'RuleId': 'WAFRule-1-Example',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'NextMarker': 'string',
'Rules': [
{
'RuleId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_size_constraint_sets(NextMarker=None, Limit=None):
"""
Returns an array of SizeConstraintSetSummary objects.
See also: AWS API Documentation
Exceptions
Examples
The following example returns an array of up to 100 size contraint match sets.
Expected Output:
:example: response = client.list_size_constraint_sets(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more SizeConstraintSets than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of SizeConstraintSets . For the second and subsequent ListSizeConstraintSets requests, specify the value of NextMarker from the previous response to get information about another batch of SizeConstraintSets .
:type Limit: integer
:param Limit: Specifies the number of SizeConstraintSet objects that you want AWS WAF to return for this request. If you have more SizeConstraintSets objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of SizeConstraintSet objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'SizeConstraintSets': [
{
'SizeConstraintSetId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more SizeConstraintSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SizeConstraintSet objects, submit another ListSizeConstraintSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.
SizeConstraintSets (list) --
An array of SizeConstraintSetSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The Id and Name of a SizeConstraintSet .
SizeConstraintSetId (string) --
A unique identifier for a SizeConstraintSet . You use SizeConstraintSetId to get information about a SizeConstraintSet (see GetSizeConstraintSet ), update a SizeConstraintSet (see UpdateSizeConstraintSet ), insert a SizeConstraintSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a SizeConstraintSet from AWS WAF (see DeleteSizeConstraintSet ).
SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets .
Name (string) --
The name of the SizeConstraintSet , if any.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
Examples
The following example returns an array of up to 100 size contraint match sets.
response = client.list_size_constraint_sets(
Limit=100,
)
print(response)
Expected Output:
{
'SizeConstraintSets': [
{
'Name': 'MySampleSizeConstraintSet',
'SizeConstraintSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'NextMarker': 'string',
'SizeConstraintSets': [
{
'SizeConstraintSetId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_sql_injection_match_sets(NextMarker=None, Limit=None):
"""
Returns an array of SqlInjectionMatchSet objects.
See also: AWS API Documentation
Exceptions
Examples
The following example returns an array of up to 100 SQL injection match sets.
Expected Output:
:example: response = client.list_sql_injection_match_sets(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more SqlInjectionMatchSet objects than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of SqlInjectionMatchSets . For the second and subsequent ListSqlInjectionMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of SqlInjectionMatchSets .
:type Limit: integer
:param Limit: Specifies the number of SqlInjectionMatchSet objects that you want AWS WAF to return for this request. If you have more SqlInjectionMatchSet objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of Rules .
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'SqlInjectionMatchSets': [
{
'SqlInjectionMatchSetId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
The response to a ListSqlInjectionMatchSets request.
NextMarker (string) --
If you have more SqlInjectionMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.
SqlInjectionMatchSets (list) --
An array of SqlInjectionMatchSetSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The Id and Name of a SqlInjectionMatchSet .
SqlInjectionMatchSetId (string) --
A unique identifier for a SqlInjectionMatchSet . You use SqlInjectionMatchSetId to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet ), update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet ), insert a SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet ).
SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets .
Name (string) --
The name of the SqlInjectionMatchSet , if any, specified by Id .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
Examples
The following example returns an array of up to 100 SQL injection match sets.
response = client.list_sql_injection_match_sets(
Limit=100,
)
print(response)
Expected Output:
{
'SqlInjectionMatchSets': [
{
'Name': 'MySQLInjectionMatchSet',
'SqlInjectionMatchSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'NextMarker': 'string',
'SqlInjectionMatchSets': [
{
'SqlInjectionMatchSetId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_subscribed_rule_groups(NextMarker=None, Limit=None):
"""
Returns an array of RuleGroup objects that you are subscribed to.
See also: AWS API Documentation
Exceptions
:example: response = client.list_subscribed_rule_groups(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more ByteMatchSets subscribed rule groups than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of subscribed rule groups. For the second and subsequent ListSubscribedRuleGroupsRequest requests, specify the value of NextMarker from the previous response to get information about another batch of subscribed rule groups.
:type Limit: integer
:param Limit: Specifies the number of subscribed rule groups that you want AWS WAF to return for this request. If you have more objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'RuleGroups': [
{
'RuleGroupId': 'string',
'Name': 'string',
'MetricName': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more objects, submit another ListSubscribedRuleGroups request, and specify the NextMarker value from the response in the NextMarker value in the next request.
RuleGroups (list) --
An array of RuleGroup objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
A summary of the rule groups you are subscribed to.
RuleGroupId (string) --
A unique identifier for a RuleGroup .
Name (string) --
A friendly name or description of the RuleGroup . You can\'t change the name of a RuleGroup after you create it.
MetricName (string) --
A friendly name or description for the metrics for this RuleGroup . The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can\'t contain whitespace or metric names reserved for AWS WAF, including "All" and "Default_Action." You can\'t change the name of the metric after you create the RuleGroup .
Exceptions
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInternalErrorException
:return: {
'NextMarker': 'string',
'RuleGroups': [
{
'RuleGroupId': 'string',
'Name': 'string',
'MetricName': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInternalErrorException
"""
pass
def list_tags_for_resource(NextMarker=None, Limit=None, ResourceARN=None):
"""
Retrieves the tags associated with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to "customer" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.
Tagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_resource(
NextMarker='string',
Limit=123,
ResourceARN='string'
)
:type NextMarker: string
:param NextMarker:
:type Limit: integer
:param Limit:
:type ResourceARN: string
:param ResourceARN: [REQUIRED]
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'TagInfoForResource': {
'ResourceARN': 'string',
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
NextMarker (string) --
TagInfoForResource (dict) --
ResourceARN (string) --
TagList (list) --
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to "customer" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.
Tagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.
Key (string) --
Value (string) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFBadRequestException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
:return: {
'NextMarker': 'string',
'TagInfoForResource': {
'ResourceARN': 'string',
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
Key (string) --
Value (string) --
"""
pass
def list_web_acls(NextMarker=None, Limit=None):
"""
Returns an array of WebACLSummary objects in the response.
See also: AWS API Documentation
Exceptions
Examples
The following example returns an array of up to 100 web ACLs.
Expected Output:
:example: response = client.list_web_acls(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more WebACL objects than the number that you specify for Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of WebACL objects. For the second and subsequent ListWebACLs requests, specify the value of NextMarker from the previous response to get information about another batch of WebACL objects.
:type Limit: integer
:param Limit: Specifies the number of WebACL objects that you want AWS WAF to return for this request. If you have more WebACL objects than the number that you specify for Limit , the response includes a NextMarker value that you can use to get another batch of WebACL objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'WebACLs': [
{
'WebACLId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
If you have more WebACL objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more WebACL objects, submit another ListWebACLs request, and specify the NextMarker value from the response in the NextMarker value in the next request.
WebACLs (list) --
An array of WebACLSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Contains the identifier and the name or description of the WebACL .
WebACLId (string) --
A unique identifier for a WebACL . You use WebACLId to get information about a WebACL (see GetWebACL ), update a WebACL (see UpdateWebACL ), and delete a WebACL from AWS WAF (see DeleteWebACL ).
WebACLId is returned by CreateWebACL and by ListWebACLs .
Name (string) --
A friendly name or description of the WebACL . You can\'t change the name of a WebACL after you create it.
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
Examples
The following example returns an array of up to 100 web ACLs.
response = client.list_web_acls(
Limit=100,
)
print(response)
Expected Output:
{
'WebACLs': [
{
'Name': 'WebACLexample',
'WebACLId': 'webacl-1472061481310',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'NextMarker': 'string',
'WebACLs': [
{
'WebACLId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def list_xss_match_sets(NextMarker=None, Limit=None):
"""
Returns an array of XssMatchSet objects.
See also: AWS API Documentation
Exceptions
Examples
The following example returns an array of up to 100 XSS match sets.
Expected Output:
:example: response = client.list_xss_match_sets(
NextMarker='string',
Limit=123
)
:type NextMarker: string
:param NextMarker: If you specify a value for Limit and you have more XssMatchSet objects than the value of Limit , AWS WAF returns a NextMarker value in the response that allows you to list another group of XssMatchSets . For the second and subsequent ListXssMatchSets requests, specify the value of NextMarker from the previous response to get information about another batch of XssMatchSets .
:type Limit: integer
:param Limit: Specifies the number of XssMatchSet objects that you want AWS WAF to return for this request. If you have more XssMatchSet objects than the number you specify for Limit , the response includes a NextMarker value that you can use to get another batch of Rules .
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'XssMatchSets': [
{
'XssMatchSetId': 'string',
'Name': 'string'
},
]
}
Response Structure
(dict) --
The response to a ListXssMatchSets request.
NextMarker (string) --
If you have more XssMatchSet objects than the number that you specified for Limit in the request, the response includes a NextMarker value. To list more XssMatchSet objects, submit another ListXssMatchSets request, and specify the NextMarker value from the response in the NextMarker value in the next request.
XssMatchSets (list) --
An array of XssMatchSetSummary objects.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The Id and Name of an XssMatchSet .
XssMatchSetId (string) --
A unique identifier for an XssMatchSet . You use XssMatchSetId to get information about a XssMatchSet (see GetXssMatchSet ), update an XssMatchSet (see UpdateXssMatchSet ), insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule ), and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet ).
XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets .
Name (string) --
The name of the XssMatchSet , if any, specified by Id .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
Examples
The following example returns an array of up to 100 XSS match sets.
response = client.list_xss_match_sets(
Limit=100,
)
print(response)
Expected Output:
{
'XssMatchSets': [
{
'Name': 'MySampleXssMatchSet',
'XssMatchSetId': 'example1ds3t-46da-4fdb-b8d5-abc321j569j5',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'NextMarker': 'string',
'XssMatchSets': [
{
'XssMatchSetId': 'string',
'Name': 'string'
},
]
}
:returns:
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
"""
pass
def put_logging_configuration(LoggingConfiguration=None):
"""
Associates a LoggingConfiguration with a specified web ACL.
You can access information about all traffic that AWS WAF inspects using the following steps:
When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.put_logging_configuration(
LoggingConfiguration={
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
]
}
)
:type LoggingConfiguration: dict
:param LoggingConfiguration: [REQUIRED]\nThe Amazon Kinesis Data Firehose that contains the inspected traffic information, the redacted fields details, and the Amazon Resource Name (ARN) of the web ACL to monitor.\n\nNote\nWhen specifying Type in RedactedFields , you must use one of the following values: URI , QUERY_STRING , HEADER , or METHOD .\n\n\nResourceArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs .\n\nLogDestinationConfigs (list) -- [REQUIRED]An array of Amazon Kinesis Data Firehose ARNs.\n\n(string) --\n\n\nRedactedFields (list) --The parts of the request that you want redacted from the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx .\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies where in a web request to look for TargetString .\n\nType (string) -- [REQUIRED]The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:\n\nHEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .\nMETHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .\nQUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.\nURI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\nBODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .\nSINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.\nALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .\n\n\nData (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.\nWhen the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.\nIf the value of Type is any other value, omit Data .\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax{
'LoggingConfiguration': {
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
]
}
}
Response Structure
(dict) --
LoggingConfiguration (dict) --The LoggingConfiguration that you submitted in the request.
ResourceArn (string) --The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs .
LogDestinationConfigs (list) --An array of Amazon Kinesis Data Firehose ARNs.
(string) --
RedactedFields (list) --The parts of the request that you want redacted from the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies where in a web request to look for TargetString .
Type (string) --The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:
HEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .
METHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .
QUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.
URI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
BODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .
SINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.
ALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .
Data (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.
When the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.
If the value of Type is any other value, omit Data .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFServiceLinkedRoleErrorException
:return: {
'LoggingConfiguration': {
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
]
}
}
:returns:
Associate that firehose to your web ACL using a PutLoggingConfiguration request.
"""
pass
def put_permission_policy(ResourceArn=None, Policy=None):
"""
Attaches an IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.
The PutPermissionPolicy is subject to the following restrictions:
For more information, see IAM Policies .
An example of a valid policy parameter is shown in the Examples section below.
See also: AWS API Documentation
Exceptions
:example: response = client.put_permission_policy(
ResourceArn='string',
Policy='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.\n
:type Policy: string
:param Policy: [REQUIRED]\nThe policy to attach to the specified RuleGroup.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInvalidPermissionPolicyException
:return: {}
:returns:
ResourceArn (string) -- [REQUIRED]
The Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.
Policy (string) -- [REQUIRED]
The policy to attach to the specified RuleGroup.
"""
pass
def tag_resource(ResourceARN=None, Tags=None):
"""
Associates tags with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to "customer" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.
Tagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can use this action to tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
ResourceARN='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceARN: string
:param ResourceARN: [REQUIRED]
:type Tags: list
:param Tags: [REQUIRED]\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nA tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to 'customer' and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.\nTagging is only available through the API, SDKs, and CLI. You can\'t manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.\n\nKey (string) -- [REQUIRED]\nValue (string) -- [REQUIRED]\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFBadRequestException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(ResourceARN=None, TagKeys=None):
"""
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceARN='string',
TagKeys=[
'string',
]
)
:type ResourceARN: string
:param ResourceARN: [REQUIRED]
:type TagKeys: list
:param TagKeys: [REQUIRED]\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFBadRequestException
WAFRegional.Client.exceptions.WAFTagOperationException
WAFRegional.Client.exceptions.WAFTagOperationInternalErrorException
:return: {}
:returns:
(dict) --
"""
pass
def update_byte_match_set(ByteMatchSetId=None, ChangeToken=None, Updates=None):
"""
Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet . For each ByteMatchTuple object, you specify the following values:
For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot . You can then configure AWS WAF to block those requests.
To create and configure a ByteMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a ByteMatchTuple object (filters) in an byte match set with the ID exampleIDs3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.update_byte_match_set(
ByteMatchSetId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'ByteMatchTuple': {
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TargetString': b'bytes',
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
}
},
]
)
:type ByteMatchSetId: string
:param ByteMatchSetId: [REQUIRED]\nThe ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of ByteMatchSetUpdate objects that you want to insert into or delete from a ByteMatchSet . For more information, see the applicable data types:\n\nByteMatchSetUpdate : Contains Action and ByteMatchTuple\nByteMatchTuple : Contains FieldToMatch , PositionalConstraint , TargetString , and TextTransformation\nFieldToMatch : Contains Data and Type\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nIn an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple .\n\nAction (string) -- [REQUIRED]Specifies whether to insert or delete a ByteMatchTuple .\n\nByteMatchTuple (dict) -- [REQUIRED]Information about the part of a web request that you want AWS WAF to inspect and the value that you want AWS WAF to search for. If you specify DELETE for the value of Action , the ByteMatchTuple values must exactly match the values in the ByteMatchTuple that you want to delete from the ByteMatchSet .\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to search, such as a specified header or a query string. For more information, see FieldToMatch .\n\nType (string) -- [REQUIRED]The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:\n\nHEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .\nMETHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .\nQUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.\nURI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\nBODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .\nSINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.\nALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .\n\n\nData (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.\nWhen the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.\nIf the value of Type is any other value, omit Data .\n\n\n\nTargetString (bytes) -- [REQUIRED]The value that you want AWS WAF to search for. AWS WAF searches for the specified string in the part of web requests that you specified in FieldToMatch . The maximum length of the value is 50 bytes.\nValid values depend on the values that you specified for FieldToMatch :\n\nHEADER : The value that you want AWS WAF to search for in the request header that you specified in FieldToMatch , for example, the value of the User-Agent or Referer header.\nMETHOD : The HTTP method, which indicates the type of operation specified in the request. CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .\nQUERY_STRING : The value that you want AWS WAF to search for in the query string, which is the part of a URL that appears after a ? character.\nURI : The value that you want AWS WAF to search for in the part of a URL that identifies a resource, for example, /images/daily-ad.jpg .\nBODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .\nSINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.\nALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but instead of inspecting a single parameter, AWS WAF inspects all parameters within the query string for the value or regex pattern that you specify in TargetString .\n\nIf TargetString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.\n\nIf you\'re using the AWS WAF API\nSpecify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.\nFor example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of TargetString .\n\nIf you\'re using the AWS CLI or one of the AWS SDKs\nThe value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.\n\nTextTransformation (string) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.\nYou can only specify a single type of TextTransformation.\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want to perform any text transformations.\n\nPositionalConstraint (string) -- [REQUIRED]Within the portion of a web request that you want to search (for example, in the query string, if any), specify where you want AWS WAF to search. Valid values include the following:\n\nCONTAINS\nThe specified part of the web request must include the value of TargetString , but the location doesn\'t matter.\n\nCONTAINS_WORD\nThe specified part of the web request must include the value of TargetString , and TargetString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means one of the following:\n\nTargetString exactly matches the value of the specified part of the web request, such as the value of a header.\nTargetString is at the beginning of the specified part of the web request and is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; .\nTargetString is at the end of the specified part of the web request and is preceded by a character other than an alphanumeric character or underscore (_), for example, ;BadBot .\nTargetString is in the middle of the specified part of the web request and is preceded and followed by characters other than alphanumeric characters or underscore (_), for example, -BadBot; .\n\n\nEXACTLY\nThe value of the specified part of the web request must exactly match the value of TargetString .\n\nSTARTS_WITH\nThe value of TargetString must appear at the beginning of the specified part of the web request.\n\nENDS_WITH\nThe value of TargetString must appear at the end of the specified part of the web request.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateByteMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example deletes a ByteMatchTuple object (filters) in an byte match set with the ID exampleIDs3t-46da-4fdb-b8d5-abc321j569j5.
response = client.update_byte_match_set(
ByteMatchSetId='exampleIDs3t-46da-4fdb-b8d5-abc321j569j5',
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
Updates=[
{
'Action': 'DELETE',
'ByteMatchTuple': {
'FieldToMatch': {
'Data': 'referer',
'Type': 'HEADER',
},
'PositionalConstraint': 'CONTAINS',
'TargetString': 'badrefer1',
'TextTransformation': 'NONE',
},
},
],
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
Create a ByteMatchSet. For more information, see CreateByteMatchSet .
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.
Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.
"""
pass
def update_geo_match_set(GeoMatchSetId=None, ChangeToken=None, Updates=None):
"""
Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet . For each GeoMatchConstraint object, you specify the following values:
To create and configure an GeoMatchSet , perform the following steps:
When you update an GeoMatchSet , you specify the country that you want to add and/or the country that you want to delete. If you want to change a country, you delete the existing country and add the new one.
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.update_geo_match_set(
GeoMatchSetId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'GeoMatchConstraint': {
'Type': 'Country',
'Value': 'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW'
}
},
]
)
:type GeoMatchSetId: string
:param GeoMatchSetId: [REQUIRED]\nThe GeoMatchSetId of the GeoMatchSet that you want to update. GeoMatchSetId is returned by CreateGeoMatchSet and by ListGeoMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of GeoMatchSetUpdate objects that you want to insert into or delete from an GeoMatchSet . For more information, see the applicable data types:\n\nGeoMatchSetUpdate : Contains Action and GeoMatchConstraint\nGeoMatchConstraint : Contains Type and Value You can have only one Type and Value per GeoMatchConstraint . To add multiple countries, include multiple GeoMatchSetUpdate objects in your request.\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies the type of update to perform to an GeoMatchSet with UpdateGeoMatchSet .\n\nAction (string) -- [REQUIRED]Specifies whether to insert or delete a country with UpdateGeoMatchSet .\n\nGeoMatchConstraint (dict) -- [REQUIRED]The country from which web requests originate that you want AWS WAF to search for.\n\nType (string) -- [REQUIRED]The type of geographical area you want AWS WAF to search for. Currently Country is the only valid value.\n\nValue (string) -- [REQUIRED]The country that you want AWS WAF to search for.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateGeoMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFLimitsExceededException
:return: {
'ChangeToken': 'string'
}
:returns:
Submit a CreateGeoMatchSet request.
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.
Submit an UpdateGeoMatchSet request to specify the country that you want AWS WAF to watch for.
"""
pass
def update_ip_set(IPSetId=None, ChangeToken=None, Updates=None):
"""
Inserts or deletes IPSetDescriptor objects in an IPSet . For each IPSetDescriptor object, you specify the following values:
AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing .
IPv6 addresses can be represented using any of the following formats:
You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you\'re receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.
To create and configure an IPSet , perform the following steps:
When you update an IPSet , you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.
You can insert a maximum of 1000 addresses in a single request.
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example deletes an IPSetDescriptor object in an IP match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.update_ip_set(
IPSetId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'IPSetDescriptor': {
'Type': 'IPV4'|'IPV6',
'Value': 'string'
}
},
]
)
:type IPSetId: string
:param IPSetId: [REQUIRED]\nThe IPSetId of the IPSet that you want to update. IPSetId is returned by CreateIPSet and by ListIPSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of IPSetUpdate objects that you want to insert into or delete from an IPSet . For more information, see the applicable data types:\n\nIPSetUpdate : Contains Action and IPSetDescriptor\nIPSetDescriptor : Contains Type and Value\n\nYou can insert a maximum of 1000 addresses in a single request.\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies the type of update to perform to an IPSet with UpdateIPSet .\n\nAction (string) -- [REQUIRED]Specifies whether to insert or delete an IP address with UpdateIPSet .\n\nIPSetDescriptor (dict) -- [REQUIRED]The IP address type (IPV4 or IPV6 ) and the IP address range (in CIDR notation) that web requests originate from.\n\nType (string) -- [REQUIRED]Specify IPV4 or IPV6 .\n\nValue (string) -- [REQUIRED]Specify an IPv4 address by using CIDR notation. For example:\n\nTo configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32 .\nTo configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24 .\n\nFor more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing .\nSpecify an IPv6 address by using CIDR notation. For example:\n\nTo configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128 .\nTo configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64 .\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateIPSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example deletes an IPSetDescriptor object in an IP match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.update_ip_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
IPSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
Updates=[
{
'Action': 'DELETE',
'IPSetDescriptor': {
'Type': 'IPV4',
'Value': '192.0.2.44/32',
},
},
],
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
1111:0000:0000:0000:0000:0000:0000:0111/128
1111:0:0:0:0:0:0:0111/128
1111::0111/128
1111::111/128
"""
pass
def update_rate_based_rule(RuleId=None, ChangeToken=None, Updates=None, RateLimit=None):
"""
Inserts or deletes Predicate objects in a rule and updates the RateLimit in the rule.
Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet , that specifies the web requests that you want to block or count. The RateLimit specifies the number of requests every five minutes that triggers the rule.
If you add more than one predicate to a RateBasedRule , a request must match all the predicates and exceed the RateLimit to be counted or blocked. For example, suppose you add the following to a RateBasedRule :
Further, you specify a RateLimit of 1,000.
You then add the RateBasedRule to a WebACL and specify that you want to block requests that satisfy the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot . Further, requests that match these two conditions much be received at a rate of more than 1,000 every five minutes. If the rate drops below this limit, AWS WAF no longer blocks the requests.
As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule :
Further, you specify a RateLimit of 1,000.
By adding this RateBasedRule to a WebACL , you could limit requests to your login page without affecting the rest of your site.
See also: AWS API Documentation
Exceptions
:example: response = client.update_rate_based_rule(
RuleId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'Predicate': {
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
}
},
],
RateLimit=123
)
:type RuleId: string
:param RuleId: [REQUIRED]\nThe RuleId of the RateBasedRule that you want to update. RuleId is returned by CreateRateBasedRule and by ListRateBasedRules .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of RuleUpdate objects that you want to insert into or delete from a RateBasedRule .\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies a Predicate (such as an IPSet ) and indicates whether you want to add it to a Rule or delete it from a Rule .\n\nAction (string) -- [REQUIRED]Specify INSERT to add a Predicate to a Rule . Use DELETE to remove a Predicate from a Rule .\n\nPredicate (dict) -- [REQUIRED]The ID of the Predicate (such as an IPSet ) that you want to add to a Rule .\n\nNegated (boolean) -- [REQUIRED]Set Negated to False if you want AWS WAF to allow, block, or count requests based on the settings in the specified ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow or block requests based on that IP address.\nSet Negated to True if you want AWS WAF to allow or block a request based on the negation of the settings in the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow, block, or count requests based on all IP addresses except 192.0.2.44 .\n\nType (string) -- [REQUIRED]The type of predicate in a Rule , such as ByteMatch or IPSet .\n\nDataId (string) -- [REQUIRED]A unique identifier for a predicate in a Rule , such as ByteMatchSetId or IPSetId . The ID is returned by the corresponding Create or List command.\n\n\n\n\n\n\n
:type RateLimit: integer
:param RateLimit: [REQUIRED]\nThe maximum number of requests, which have an identical value in the field specified by the RateKey , allowed in a five-minute period. If the number of requests exceeds the RateLimit and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFLimitsExceededException
:return: {
'ChangeToken': 'string'
}
:returns:
A ByteMatchSet with FieldToMatch of URI
A PositionalConstraint of STARTS_WITH
A TargetString of login
"""
pass
def update_regex_match_set(RegexMatchSetId=None, Updates=None, ChangeToken=None):
"""
Inserts or deletes RegexMatchTuple objects (filters) in a RegexMatchSet . For each RegexMatchSetUpdate object, you specify the following values:
For example, you can create a RegexPatternSet that matches any requests with User-Agent headers that contain the string B[a@]dB[o0]t . You can then configure AWS WAF to reject those requests.
To create and configure a RegexMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.update_regex_match_set(
RegexMatchSetId='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'RegexMatchTuple': {
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'RegexPatternSetId': 'string'
}
},
],
ChangeToken='string'
)
:type RegexMatchSetId: string
:param RegexMatchSetId: [REQUIRED]\nThe RegexMatchSetId of the RegexMatchSet that you want to update. RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of RegexMatchSetUpdate objects that you want to insert into or delete from a RegexMatchSet . For more information, see RegexMatchTuple .\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nIn an UpdateRegexMatchSet request, RegexMatchSetUpdate specifies whether to insert or delete a RegexMatchTuple and includes the settings for the RegexMatchTuple .\n\nAction (string) -- [REQUIRED]Specifies whether to insert or delete a RegexMatchTuple .\n\nRegexMatchTuple (dict) -- [REQUIRED]Information about the part of a web request that you want AWS WAF to inspect and the identifier of the regular expression (regex) pattern that you want AWS WAF to search for. If you specify DELETE for the value of Action , the RegexMatchTuple values must exactly match the values in the RegexMatchTuple that you want to delete from the RegexMatchSet .\n\nFieldToMatch (dict) -- [REQUIRED]Specifies where in a web request to look for the RegexPatternSet .\n\nType (string) -- [REQUIRED]The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:\n\nHEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .\nMETHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .\nQUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.\nURI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\nBODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .\nSINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.\nALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .\n\n\nData (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.\nWhen the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.\nIf the value of Type is any other value, omit Data .\n\n\n\nTextTransformation (string) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on RegexPatternSet before inspecting a request for a match.\nYou can only specify a single type of TextTransformation.\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system commandline command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want to perform any text transformations.\n\nRegexPatternSetId (string) -- [REQUIRED]The RegexPatternSetId for a RegexPatternSet . You use RegexPatternSetId to get information about a RegexPatternSet (see GetRegexPatternSet ), update a RegexPatternSet (see UpdateRegexPatternSet ), insert a RegexPatternSet into a RegexMatchSet or delete one from a RegexMatchSet (see UpdateRegexMatchSet ), and delete an RegexPatternSet from AWS WAF (see DeleteRegexPatternSet ).\n\nRegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .\n\n\n\n\n\n\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateRegexMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFDisallowedNameException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidAccountException
:return: {
'ChangeToken': 'string'
}
:returns:
Create a RegexMatchSet. For more information, see CreateRegexMatchSet .
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.
Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the identifier of the RegexPatternSet that contain the regular expression patters you want AWS WAF to watch for.
"""
pass
def update_regex_pattern_set(RegexPatternSetId=None, Updates=None, ChangeToken=None):
"""
Inserts or deletes RegexPatternString objects in a RegexPatternSet . For each RegexPatternString object, you specify the following values:
For example, you can create a RegexPatternString such as B[a@]dB[o0]t . AWS WAF will match this RegexPatternString to:
To create and configure a RegexPatternSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.update_regex_pattern_set(
RegexPatternSetId='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'RegexPatternString': 'string'
},
],
ChangeToken='string'
)
:type RegexPatternSetId: string
:param RegexPatternSetId: [REQUIRED]\nThe RegexPatternSetId of the RegexPatternSet that you want to update. RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of RegexPatternSetUpdate objects that you want to insert into or delete from a RegexPatternSet .\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nIn an UpdateRegexPatternSet request, RegexPatternSetUpdate specifies whether to insert or delete a RegexPatternString and includes the settings for the RegexPatternString .\n\nAction (string) -- [REQUIRED]Specifies whether to insert or delete a RegexPatternString .\n\nRegexPatternString (string) -- [REQUIRED]Specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t .\n\n\n\n\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateRegexPatternSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidRegexPatternException
:return: {
'ChangeToken': 'string'
}
:returns:
BadBot
BadB0t
B@dBot
B@dB0t
"""
pass
def update_rule(RuleId=None, ChangeToken=None, Updates=None):
"""
Inserts or deletes Predicate objects in a Rule . Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet , that specifies the web requests that you want to allow, block, or count. If you add more than one predicate to a Rule , a request must match all of the specifications to be allowed, blocked, or counted. For example, suppose that you add the following to a Rule :
You then add the Rule to a WebACL and specify that you want to block requests that satisfy the Rule . For a request to be blocked, the User-Agent header in the request must contain the value BadBot and the request must originate from the IP address 192.0.2.44.
To create and configure a Rule , perform the following steps:
If you want to replace one ByteMatchSet or IPSet with another, you delete the existing one and add the new one.
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a Predicate object in a rule with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.update_rule(
RuleId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'Predicate': {
'Negated': True|False,
'Type': 'IPMatch'|'ByteMatch'|'SqlInjectionMatch'|'GeoMatch'|'SizeConstraint'|'XssMatch'|'RegexMatch',
'DataId': 'string'
}
},
]
)
:type RuleId: string
:param RuleId: [REQUIRED]\nThe RuleId of the Rule that you want to update. RuleId is returned by CreateRule and by ListRules .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of RuleUpdate objects that you want to insert into or delete from a Rule . For more information, see the applicable data types:\n\nRuleUpdate : Contains Action and Predicate\nPredicate : Contains DataId , Negated , and Type\nFieldToMatch : Contains Data and Type\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies a Predicate (such as an IPSet ) and indicates whether you want to add it to a Rule or delete it from a Rule .\n\nAction (string) -- [REQUIRED]Specify INSERT to add a Predicate to a Rule . Use DELETE to remove a Predicate from a Rule .\n\nPredicate (dict) -- [REQUIRED]The ID of the Predicate (such as an IPSet ) that you want to add to a Rule .\n\nNegated (boolean) -- [REQUIRED]Set Negated to False if you want AWS WAF to allow, block, or count requests based on the settings in the specified ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow or block requests based on that IP address.\nSet Negated to True if you want AWS WAF to allow or block a request based on the negation of the settings in the ByteMatchSet , IPSet , SqlInjectionMatchSet , XssMatchSet , RegexMatchSet , GeoMatchSet , or SizeConstraintSet . For example, if an IPSet includes the IP address 192.0.2.44 , AWS WAF will allow, block, or count requests based on all IP addresses except 192.0.2.44 .\n\nType (string) -- [REQUIRED]The type of predicate in a Rule , such as ByteMatch or IPSet .\n\nDataId (string) -- [REQUIRED]A unique identifier for a predicate in a Rule , such as ByteMatchSetId or IPSetId . The ID is returned by the corresponding Create or List command.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example deletes a Predicate object in a rule with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.update_rule(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
RuleId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
Updates=[
{
'Action': 'DELETE',
'Predicate': {
'DataId': 'MyByteMatchSetID',
'Negated': False,
'Type': 'ByteMatch',
},
},
],
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
Create and update the predicates that you want to include in the Rule .
Create the Rule . See CreateRule .
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.
Submit an UpdateRule request to add predicates to the Rule .
Create and update a WebACL that contains the Rule . See CreateWebACL .
"""
pass
def update_rule_group(RuleGroupId=None, Updates=None, ChangeToken=None):
"""
Inserts or deletes ActivatedRule objects in a RuleGroup .
You can only insert REGULAR rules into a rule group.
You can have a maximum of ten rules per rule group.
To create and configure a RuleGroup , perform the following steps:
If you want to replace one Rule with another, you delete the existing one and add the new one.
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.update_rule_group(
RuleGroupId='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'ActivatedRule': {
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
}
},
],
ChangeToken='string'
)
:type RuleGroupId: string
:param RuleGroupId: [REQUIRED]\nThe RuleGroupId of the RuleGroup that you want to update. RuleGroupId is returned by CreateRuleGroup and by ListRuleGroups .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of RuleGroupUpdate objects that you want to insert into or delete from a RuleGroup .\nYou can only insert REGULAR rules into a rule group.\n\nActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies an ActivatedRule and indicates whether you want to add it to a RuleGroup or delete it from a RuleGroup .\n\nAction (string) -- [REQUIRED]Specify INSERT to add an ActivatedRule to a RuleGroup . Use DELETE to remove an ActivatedRule from a RuleGroup .\n\nActivatedRule (dict) -- [REQUIRED]The ActivatedRule object specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL , and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW , BLOCK , or COUNT ).\n\nPriority (integer) -- [REQUIRED]Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL , the values don\'t need to be consecutive.\n\nRuleId (string) -- [REQUIRED]The RuleId for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).\n\nRuleId is returned by CreateRule and by ListRules .\n\nAction (dict) --Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule . Valid values for Action include the following:\n\nALLOW : CloudFront responds with the requested object.\nBLOCK : CloudFront responds with an HTTP 403 (Forbidden) status code.\nCOUNT : AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.\n\n\nActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case, you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .\n\nType (string) -- [REQUIRED]Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:\n\nALLOW : AWS WAF allows requests\nBLOCK : AWS WAF blocks requests\nCOUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .\n\n\n\n\nOverrideAction (dict) --Use the OverrideAction to test your RuleGroup .\nAny rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None , the RuleGroup will block a request if any individual rule in the RuleGroup matches the request and is configured to block that request. However if you first want to test the RuleGroup , set the OverrideAction to Count . The RuleGroup will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests .\n\nActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .\n\nType (string) -- [REQUIRED]\nCOUNT overrides the action specified by the individual rule within a RuleGroup . If set to NONE , the rule\'s action will take place.\n\n\n\nType (string) --The rule type, either REGULAR , as defined by Rule , RATE_BASED , as defined by RateBasedRule , or GROUP , as defined by RuleGroup . The default is REGULAR. Although this field is optional, be aware that if you try to add a RATE_BASED rule to a web ACL without setting the type, the UpdateWebACL request will fail because the request tries to add a REGULAR rule with the specified ID, which does not exist.\n\nExcludedRules (list) --An array of rules to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup .\nSometimes it is necessary to troubleshoot rule groups that are blocking traffic unexpectedly (false positives). One troubleshooting technique is to identify the specific rule within the rule group that is blocking the legitimate traffic and then disable (exclude) that particular rule. You can exclude rules from both your own rule groups and AWS Marketplace rule groups that have been associated with a web ACL.\nSpecifying ExcludedRules does not remove those rules from the rule group. Rather, it changes the action for the rules to COUNT . Therefore, requests that match an ExcludedRule are counted but not blocked. The RuleGroup owner will receive COUNT metrics for each ExcludedRule .\nIf you want to exclude rules from a rule group that is already associated with a web ACL, perform the following steps:\n\nUse the AWS WAF logs to identify the IDs of the rules that you want to exclude. For more information about the logs, see Logging Web ACL Traffic Information .\nSubmit an UpdateWebACL request that has two actions:\nThe first action deletes the existing rule group from the web ACL. That is, in the UpdateWebACL request, the first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be the rule group that contains the rules that you want to exclude.\nThe second action inserts the same rule group back in, but specifying the rules to exclude. That is, the second Updates:Action should be INSERT , Updates:ActivatedRule:RuleId should be the rule group that you just removed, and ExcludedRules should contain the rules that you want to exclude.\n\n\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nThe rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup . The rule must belong to the RuleGroup that is specified by the ActivatedRule .\n\nRuleId (string) -- [REQUIRED]The unique identifier for the rule to exclude from the rule group.\n\n\n\n\n\n\n\n\n\n\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateRuleGroup request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFInvalidParameterException
:return: {
'ChangeToken': 'string'
}
:returns:
RuleGroupId (string) -- [REQUIRED]
The RuleGroupId of the RuleGroup that you want to update. RuleGroupId is returned by CreateRuleGroup and by ListRuleGroups .
Updates (list) -- [REQUIRED]
An array of RuleGroupUpdate objects that you want to insert into or delete from a RuleGroup .
You can only insert REGULAR rules into a rule group.
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
Specifies an ActivatedRule and indicates whether you want to add it to a RuleGroup or delete it from a RuleGroup .
Action (string) -- [REQUIRED]Specify INSERT to add an ActivatedRule to a RuleGroup . Use DELETE to remove an ActivatedRule from a RuleGroup .
ActivatedRule (dict) -- [REQUIRED]The ActivatedRule object specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL , and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW , BLOCK , or COUNT ).
Priority (integer) -- [REQUIRED]Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL , the values don\'t need to be consecutive.
RuleId (string) -- [REQUIRED]The RuleId for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).
RuleId is returned by CreateRule and by ListRules .
Action (dict) --Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule . Valid values for Action include the following:
ALLOW : CloudFront responds with the requested object.
BLOCK : CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT : AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case, you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
Type (string) -- [REQUIRED]Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:
ALLOW : AWS WAF allows requests
BLOCK : AWS WAF blocks requests
COUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .
OverrideAction (dict) --Use the OverrideAction to test your RuleGroup .
Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None , the RuleGroup will block a request if any individual rule in the RuleGroup matches the request and is configured to block that request. However if you first want to test the RuleGroup , set the OverrideAction to Count . The RuleGroup will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests .
ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .
Type (string) -- [REQUIRED]
COUNT overrides the action specified by the individual rule within a RuleGroup . If set to NONE , the rule\'s action will take place.
Type (string) --The rule type, either REGULAR , as defined by Rule , RATE_BASED , as defined by RateBasedRule , or GROUP , as defined by RuleGroup . The default is REGULAR. Although this field is optional, be aware that if you try to add a RATE_BASED rule to a web ACL without setting the type, the UpdateWebACL request will fail because the request tries to add a REGULAR rule with the specified ID, which does not exist.
ExcludedRules (list) --An array of rules to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup .
Sometimes it is necessary to troubleshoot rule groups that are blocking traffic unexpectedly (false positives). One troubleshooting technique is to identify the specific rule within the rule group that is blocking the legitimate traffic and then disable (exclude) that particular rule. You can exclude rules from both your own rule groups and AWS Marketplace rule groups that have been associated with a web ACL.
Specifying ExcludedRules does not remove those rules from the rule group. Rather, it changes the action for the rules to COUNT . Therefore, requests that match an ExcludedRule are counted but not blocked. The RuleGroup owner will receive COUNT metrics for each ExcludedRule .
If you want to exclude rules from a rule group that is already associated with a web ACL, perform the following steps:
Use the AWS WAF logs to identify the IDs of the rules that you want to exclude. For more information about the logs, see Logging Web ACL Traffic Information .
Submit an UpdateWebACL request that has two actions:
The first action deletes the existing rule group from the web ACL. That is, in the UpdateWebACL request, the first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be the rule group that contains the rules that you want to exclude.
The second action inserts the same rule group back in, but specifying the rules to exclude. That is, the second Updates:Action should be INSERT , Updates:ActivatedRule:RuleId should be the rule group that you just removed, and ExcludedRules should contain the rules that you want to exclude.
(dict) --
Note
This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.
For the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.
The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup . The rule must belong to the RuleGroup that is specified by the ActivatedRule .
RuleId (string) -- [REQUIRED]The unique identifier for the rule to exclude from the rule group.
ChangeToken (string) -- [REQUIRED]
The value returned by the most recent call to GetChangeToken .
"""
pass
def update_size_constraint_set(SizeConstraintSetId=None, ChangeToken=None, Updates=None):
"""
Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet . For each SizeConstraint object, you specify the following values:
For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.
To create and configure a SizeConstraintSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a SizeConstraint object (filters) in a size constraint set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.update_size_constraint_set(
SizeConstraintSetId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'SizeConstraint': {
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE',
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123
}
},
]
)
:type SizeConstraintSetId: string
:param SizeConstraintSetId: [REQUIRED]\nThe SizeConstraintSetId of the SizeConstraintSet that you want to update. SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of SizeConstraintSetUpdate objects that you want to insert into or delete from a SizeConstraintSet . For more information, see the applicable data types:\n\nSizeConstraintSetUpdate : Contains Action and SizeConstraint\nSizeConstraint : Contains FieldToMatch , TextTransformation , ComparisonOperator , and Size\nFieldToMatch : Contains Data and Type\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies the part of a web request that you want to inspect the size of and indicates whether you want to add the specification to a SizeConstraintSet or delete it from a SizeConstraintSet .\n\nAction (string) -- [REQUIRED]Specify INSERT to add a SizeConstraintSetUpdate to a SizeConstraintSet . Use DELETE to remove a SizeConstraintSetUpdate from a SizeConstraintSet .\n\nSizeConstraint (dict) -- [REQUIRED]Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size , ComparisonOperator , and FieldToMatch to build an expression in the form of 'Size ComparisonOperator size in bytes of FieldToMatch '. If that expression is true, the SizeConstraint is considered to match.\n\nFieldToMatch (dict) -- [REQUIRED]Specifies where in a web request to look for the size constraint.\n\nType (string) -- [REQUIRED]The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:\n\nHEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .\nMETHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .\nQUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.\nURI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\nBODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .\nSINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.\nALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .\n\n\nData (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.\nWhen the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.\nIf the value of Type is any other value, omit Data .\n\n\n\nTextTransformation (string) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.\nYou can only specify a single type of TextTransformation.\nNote that if you choose BODY for the value of Type , you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.\n\nNONE\nSpecify NONE if you don\'t want to perform any text transformations.\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nComparisonOperator (string) -- [REQUIRED]The type of comparison you want AWS WAF to perform. AWS WAF uses this in combination with the provided Size and FieldToMatch to build an expression in the form of 'Size ComparisonOperator size in bytes of FieldToMatch '. If that expression is true, the SizeConstraint is considered to match.\n\nEQ : Used to test if the Size is equal to the size of the FieldToMatchNE : Used to test if the Size is not equal to the size of the FieldToMatch\nLE : Used to test if the Size is less than or equal to the size of the FieldToMatch\nLT : Used to test if the Size is strictly less than the size of the FieldToMatch\nGE : Used to test if the Size is greater than or equal to the size of the FieldToMatch\nGT : Used to test if the Size is strictly greater than the size of the FieldToMatch\n\n\nSize (integer) -- [REQUIRED]The size in bytes that you want AWS WAF to compare against the size of the specified FieldToMatch . AWS WAF uses this in combination with ComparisonOperator and FieldToMatch to build an expression in the form of 'Size ComparisonOperator size in bytes of FieldToMatch '. If that expression is true, the SizeConstraint is considered to match.\nValid values for size are 0 - 21474836480 bytes (0 - 20 GB).\nIf you specify URI for the value of Type , the / in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateSizeConstraintSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example deletes a SizeConstraint object (filters) in a size constraint set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.update_size_constraint_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
SizeConstraintSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
Updates=[
{
'Action': 'DELETE',
'SizeConstraint': {
'ComparisonOperator': 'GT',
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'Size': 0,
'TextTransformation': 'NONE',
},
},
],
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet .
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.
Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.
"""
pass
def update_sql_injection_match_set(SqlInjectionMatchSetId=None, ChangeToken=None, Updates=None):
"""
Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet . For each SqlInjectionMatchTuple object, you specify the following values:
You use SqlInjectionMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you\'re receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.
To create and configure a SqlInjectionMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example deletes a SqlInjectionMatchTuple object (filters) in a SQL injection match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.update_sql_injection_match_set(
SqlInjectionMatchSetId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'SqlInjectionMatchTuple': {
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
}
},
]
)
:type SqlInjectionMatchSetId: string
:param SqlInjectionMatchSetId: [REQUIRED]\nThe SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to update. SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of SqlInjectionMatchSetUpdate objects that you want to insert into or delete from a SqlInjectionMatchSet . For more information, see the applicable data types:\n\nSqlInjectionMatchSetUpdate : Contains Action and SqlInjectionMatchTuple\nSqlInjectionMatchTuple : Contains FieldToMatch and TextTransformation\nFieldToMatch : Contains Data and Type\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies the part of a web request that you want to inspect for snippets of malicious SQL code and indicates whether you want to add the specification to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet .\n\nAction (string) -- [REQUIRED]Specify INSERT to add a SqlInjectionMatchSetUpdate to a SqlInjectionMatchSet . Use DELETE to remove a SqlInjectionMatchSetUpdate from a SqlInjectionMatchSet .\n\nSqlInjectionMatchTuple (dict) -- [REQUIRED]Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.\n\nFieldToMatch (dict) -- [REQUIRED]Specifies where in a web request to look for snippets of malicious SQL code.\n\nType (string) -- [REQUIRED]The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:\n\nHEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .\nMETHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .\nQUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.\nURI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\nBODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .\nSINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.\nALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .\n\n\nData (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.\nWhen the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.\nIf the value of Type is any other value, omit Data .\n\n\n\nTextTransformation (string) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.\nYou can only specify a single type of TextTransformation.\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want to perform any text transformations.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
The response to an UpdateSqlInjectionMatchSets request.
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example deletes a SqlInjectionMatchTuple object (filters) in a SQL injection match set with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.update_sql_injection_match_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
SqlInjectionMatchSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
Updates=[
{
'Action': 'DELETE',
'SqlInjectionMatchTuple': {
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'TextTransformation': 'URL_DECODE',
},
},
],
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
Submit a CreateSqlInjectionMatchSet request.
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.
"""
pass
def update_web_acl(WebACLId=None, ChangeToken=None, Updates=None, DefaultAction=None):
"""
Inserts or deletes ActivatedRule objects in a WebACL . Each Rule identifies web requests that you want to allow, block, or count. When you update a WebACL , you specify the following values:
To create and configure a WebACL , perform the following steps:
Be aware that if you try to add a RATE_BASED rule to a web ACL without setting the rule type when first creating the rule, the UpdateWebACL request will fail because the request tries to add a REGULAR rule (the default rule type) with the specified ID, which does not exist.
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example deletes an ActivatedRule object in a WebACL with the ID webacl-1472061481310.
Expected Output:
:example: response = client.update_web_acl(
WebACLId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'ActivatedRule': {
'Priority': 123,
'RuleId': 'string',
'Action': {
'Type': 'BLOCK'|'ALLOW'|'COUNT'
},
'OverrideAction': {
'Type': 'NONE'|'COUNT'
},
'Type': 'REGULAR'|'RATE_BASED'|'GROUP',
'ExcludedRules': [
{
'RuleId': 'string'
},
]
}
},
],
DefaultAction={
'Type': 'BLOCK'|'ALLOW'|'COUNT'
}
)
:type WebACLId: string
:param WebACLId: [REQUIRED]\nThe WebACLId of the WebACL that you want to update. WebACLId is returned by CreateWebACL and by ListWebACLs .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: An array of updates to make to the WebACL .\nAn array of WebACLUpdate objects that you want to insert into or delete from a WebACL . For more information, see the applicable data types:\n\nWebACLUpdate : Contains Action and ActivatedRule\nActivatedRule : Contains Action , OverrideAction , Priority , RuleId , and Type . ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case, you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .\nWafAction : Contains Type\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies whether to insert a Rule into or delete a Rule from a WebACL .\n\nAction (string) -- [REQUIRED]Specifies whether to insert a Rule into or delete a Rule from a WebACL .\n\nActivatedRule (dict) -- [REQUIRED]The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL , and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW , BLOCK , or COUNT ).\n\nPriority (integer) -- [REQUIRED]Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL , the values don\'t need to be consecutive.\n\nRuleId (string) -- [REQUIRED]The RuleId for a Rule . You use RuleId to get more information about a Rule (see GetRule ), update a Rule (see UpdateRule ), insert a Rule into a WebACL or delete a one from a WebACL (see UpdateWebACL ), or delete a Rule from AWS WAF (see DeleteRule ).\n\nRuleId is returned by CreateRule and by ListRules .\n\nAction (dict) --Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule . Valid values for Action include the following:\n\nALLOW : CloudFront responds with the requested object.\nBLOCK : CloudFront responds with an HTTP 403 (Forbidden) status code.\nCOUNT : AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.\n\n\nActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case, you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .\n\nType (string) -- [REQUIRED]Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:\n\nALLOW : AWS WAF allows requests\nBLOCK : AWS WAF blocks requests\nCOUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .\n\n\n\n\nOverrideAction (dict) --Use the OverrideAction to test your RuleGroup .\nAny rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None , the RuleGroup will block a request if any individual rule in the RuleGroup matches the request and is configured to block that request. However if you first want to test the RuleGroup , set the OverrideAction to Count . The RuleGroup will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests .\n\nActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a WebACL . In this case you do not use ActivatedRule|Action . For all other update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction .\n\nType (string) -- [REQUIRED]\nCOUNT overrides the action specified by the individual rule within a RuleGroup . If set to NONE , the rule\'s action will take place.\n\n\n\nType (string) --The rule type, either REGULAR , as defined by Rule , RATE_BASED , as defined by RateBasedRule , or GROUP , as defined by RuleGroup . The default is REGULAR. Although this field is optional, be aware that if you try to add a RATE_BASED rule to a web ACL without setting the type, the UpdateWebACL request will fail because the request tries to add a REGULAR rule with the specified ID, which does not exist.\n\nExcludedRules (list) --An array of rules to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup .\nSometimes it is necessary to troubleshoot rule groups that are blocking traffic unexpectedly (false positives). One troubleshooting technique is to identify the specific rule within the rule group that is blocking the legitimate traffic and then disable (exclude) that particular rule. You can exclude rules from both your own rule groups and AWS Marketplace rule groups that have been associated with a web ACL.\nSpecifying ExcludedRules does not remove those rules from the rule group. Rather, it changes the action for the rules to COUNT . Therefore, requests that match an ExcludedRule are counted but not blocked. The RuleGroup owner will receive COUNT metrics for each ExcludedRule .\nIf you want to exclude rules from a rule group that is already associated with a web ACL, perform the following steps:\n\nUse the AWS WAF logs to identify the IDs of the rules that you want to exclude. For more information about the logs, see Logging Web ACL Traffic Information .\nSubmit an UpdateWebACL request that has two actions:\nThe first action deletes the existing rule group from the web ACL. That is, in the UpdateWebACL request, the first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be the rule group that contains the rules that you want to exclude.\nThe second action inserts the same rule group back in, but specifying the rules to exclude. That is, the second Updates:Action should be INSERT , Updates:ActivatedRule:RuleId should be the rule group that you just removed, and ExcludedRules should contain the rules that you want to exclude.\n\n\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nThe rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup . The rule must belong to the RuleGroup that is specified by the ActivatedRule .\n\nRuleId (string) -- [REQUIRED]The unique identifier for the rule to exclude from the rule group.\n\n\n\n\n\n\n\n\n\n\n
:type DefaultAction: dict
:param DefaultAction: A default action for the web ACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn\'t match the criteria in any of the rules in a web ACL.\n\nType (string) -- [REQUIRED]Specifies how you want AWS WAF to respond to requests that match the settings in a Rule . Valid settings include the following:\n\nALLOW : AWS WAF allows requests\nBLOCK : AWS WAF blocks requests\nCOUNT : AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can\'t specify COUNT for the default action for a WebACL .\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateWebACL request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFReferencedItemException
WAFRegional.Client.exceptions.WAFLimitsExceededException
WAFRegional.Client.exceptions.WAFSubscriptionNotFoundException
Examples
The following example deletes an ActivatedRule object in a WebACL with the ID webacl-1472061481310.
response = client.update_web_acl(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
DefaultAction={
'Type': 'ALLOW',
},
Updates=[
{
'Action': 'DELETE',
'ActivatedRule': {
'Action': {
'Type': 'ALLOW',
},
'Priority': 1,
'RuleId': 'WAFRule-1-Example',
},
},
],
WebACLId='webacl-1472061481310',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
Create and update the predicates that you want to include in Rules . For more information, see CreateByteMatchSet , UpdateByteMatchSet , CreateIPSet , UpdateIPSet , CreateSqlInjectionMatchSet , and UpdateSqlInjectionMatchSet .
Create and update the Rules that you want to include in the WebACL . For more information, see CreateRule and UpdateRule .
Create a WebACL . See CreateWebACL .
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.
Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL , to specify the default action, and to associate the WebACL with a CloudFront distribution. The ActivatedRule can be a rule group. If you specify a rule group as your ActivatedRule , you can exclude specific rules from that rule group. If you already have a rule group associated with a web ACL and want to submit an UpdateWebACL request to exclude certain rules from that rule group, you must first remove the rule group from the web ACL, the re-insert it again, specifying the excluded rules. For details, see ActivatedRule$ExcludedRules .
"""
pass
def update_xss_match_set(XssMatchSetId=None, ChangeToken=None, Updates=None):
"""
Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet . For each XssMatchTuple object, you specify the following values:
You use XssMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you\'re receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.
To create and configure an XssMatchSet , perform the following steps:
For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
Examples
The following example deletes an XssMatchTuple object (filters) in an XssMatchSet with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
Expected Output:
:example: response = client.update_xss_match_set(
XssMatchSetId='string',
ChangeToken='string',
Updates=[
{
'Action': 'INSERT'|'DELETE',
'XssMatchTuple': {
'FieldToMatch': {
'Type': 'URI'|'QUERY_STRING'|'HEADER'|'METHOD'|'BODY'|'SINGLE_QUERY_ARG'|'ALL_QUERY_ARGS',
'Data': 'string'
},
'TextTransformation': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
}
},
]
)
:type XssMatchSetId: string
:param XssMatchSetId: [REQUIRED]\nThe XssMatchSetId of the XssMatchSet that you want to update. XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets .\n
:type ChangeToken: string
:param ChangeToken: [REQUIRED]\nThe value returned by the most recent call to GetChangeToken .\n
:type Updates: list
:param Updates: [REQUIRED]\nAn array of XssMatchSetUpdate objects that you want to insert into or delete from an XssMatchSet . For more information, see the applicable data types:\n\nXssMatchSetUpdate : Contains Action and XssMatchTuple\nXssMatchTuple : Contains FieldToMatch and TextTransformation\nFieldToMatch : Contains Data and Type\n\n\n(dict) --\nNote\nThis is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.\n\nFor the latest version of AWS WAF , use the AWS WAFV2 API and see the AWS WAF Developer Guide . With the latest version, AWS WAF has a single set of endpoints for regional and global use.\n\nSpecifies the part of a web request that you want to inspect for cross-site scripting attacks and indicates whether you want to add the specification to an XssMatchSet or delete it from an XssMatchSet .\n\nAction (string) -- [REQUIRED]Specify INSERT to add an XssMatchSetUpdate to an XssMatchSet . Use DELETE to remove an XssMatchSetUpdate from an XssMatchSet .\n\nXssMatchTuple (dict) -- [REQUIRED]Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.\n\nFieldToMatch (dict) -- [REQUIRED]Specifies where in a web request to look for cross-site scripting attacks.\n\nType (string) -- [REQUIRED]The part of the web request that you want AWS WAF to search for a specified string. Parts of a request that you can search include the following:\n\nHEADER : A specified request header, for example, the value of the User-Agent or Referer header. If you choose HEADER for the type, specify the name of the header in Data .\nMETHOD : The HTTP method, which indicated the type of operation that the request is asking the origin to perform. Amazon CloudFront supports the following methods: DELETE , GET , HEAD , OPTIONS , PATCH , POST , and PUT .\nQUERY_STRING : A query string, which is the part of a URL that appears after a ? character, if any.\nURI : The part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\nBODY : The part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form. The request body immediately follows the request headers. Note that only the first 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow or block requests based on the length of the body, you can create a size constraint set. For more information, see CreateSizeConstraintSet .\nSINGLE_QUERY_ARG : The parameter in the query string that you will inspect, such as UserName or SalesRegion . The maximum length for SINGLE_QUERY_ARG is 30 characters.\nALL_QUERY_ARGS : Similar to SINGLE_QUERY_ARG , but rather than inspecting a single parameter, AWS WAF will inspect all parameters within the query for the value or regex pattern that you specify in TargetString .\n\n\nData (string) --When the value of Type is HEADER , enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer . The name of the header is not case sensitive.\nWhen the value of Type is SINGLE_QUERY_ARG , enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion . The parameter name is not case sensitive.\nIf the value of Type is any other value, omit Data .\n\n\n\nTextTransformation (string) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.\nYou can only specify a single type of TextTransformation.\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want to perform any text transformations.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ChangeToken': 'string'
}
Response Structure
(dict) --
The response to an UpdateXssMatchSets request.
ChangeToken (string) --
The ChangeToken that you used to submit the UpdateXssMatchSet request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus .
Exceptions
WAFRegional.Client.exceptions.WAFInternalErrorException
WAFRegional.Client.exceptions.WAFInvalidAccountException
WAFRegional.Client.exceptions.WAFInvalidOperationException
WAFRegional.Client.exceptions.WAFInvalidParameterException
WAFRegional.Client.exceptions.WAFNonexistentContainerException
WAFRegional.Client.exceptions.WAFNonexistentItemException
WAFRegional.Client.exceptions.WAFStaleDataException
WAFRegional.Client.exceptions.WAFLimitsExceededException
Examples
The following example deletes an XssMatchTuple object (filters) in an XssMatchSet with the ID example1ds3t-46da-4fdb-b8d5-abc321j569j5.
response = client.update_xss_match_set(
ChangeToken='abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
Updates=[
{
'Action': 'DELETE',
'XssMatchTuple': {
'FieldToMatch': {
'Type': 'QUERY_STRING',
},
'TextTransformation': 'URL_DECODE',
},
},
],
XssMatchSetId='example1ds3t-46da-4fdb-b8d5-abc321j569j5',
)
print(response)
Expected Output:
{
'ChangeToken': 'abcd12f2-46da-4fdb-b8d5-fbd4c466928f',
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'ChangeToken': 'string'
}
:returns:
Submit a CreateXssMatchSet request.
Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.
Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.
"""
pass
| 45.708312
| 9,656
| 0.722908
| 61,270
| 452,558
| 5.317088
| 0.024465
| 0.017128
| 0.03713
| 0.007861
| 0.92544
| 0.911151
| 0.899481
| 0.883691
| 0.871201
| 0.858327
| 0
| 0.011014
| 0.207757
| 452,558
| 9,900
| 9,657
| 45.712929
| 0.89762
| 0.981454
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
636c1e9663543fc6003ed2347eb172f9015791c9
| 46,568
|
py
|
Python
|
budget_inputs.py
|
CPOMUCL/Budget_tool
|
eca9b1064a4d11d07028ef631b5780c6aeebe0b6
|
[
"MIT"
] | null | null | null |
budget_inputs.py
|
CPOMUCL/Budget_tool
|
eca9b1064a4d11d07028ef631b5780c6aeebe0b6
|
[
"MIT"
] | null | null | null |
budget_inputs.py
|
CPOMUCL/Budget_tool
|
eca9b1064a4d11d07028ef631b5780c6aeebe0b6
|
[
"MIT"
] | null | null | null |
import numpy as np
import datetime as dt
import struct
from dateutil.relativedelta import relativedelta
from netCDF4 import Dataset
from os.path import exists
from scipy.interpolate import griddata
from scipy.interpolate.interpnd import _ndim_coords_from_arrays
from scipy.spatial import cKDTree
class Pathfinder():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath,grid=False):
self.name = 'Pathfinder'
self.path = ppath
self.vyear_load = 0
self.vels_loaded = False
if type(grid) == bool:
self.check_grid = False
else:
self.check_grid = True
self.grid = grid
def get_dates(self,time_start,time_end):
"""
returns the all encompassing date list for use with the forcing object
"""
dates =[]
d0 = dt.datetime(1970,1,1)
n_yrs = (time_end.year - time_start.year)+1
for y in range(n_yrs):
yu = time_start.year + y
f_name = 'icemotion_daily_nh_25km_'+str(yu)+'0101_'+str(yu)+'1231_v4.1.nc'
if exists(self.path+f_name):
f_nc = Dataset(self.path+f_name)
[dates.append(d0 + relativedelta(days = d))
for d in f_nc['time'][:]]
f_nc.close()
self.dates = dates
print(self.name+' Found '+str(np.shape(dates)[0])+' dates')
# daily points in yearly files
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_vels(self,dates_u,verbos=False):
d0 = dt.datetime(1970,1,1)
# does dates_u cover one year or more
if (dates_u[-1].year -dates_u[0].year) == 0:
# one year, one file
yu = dates_u[0].year
if ((self.vyear_load != yu) or (not self.vels_loaded)):
print('loading new year of data: '+str(yu))
f_name = 'icemotion_daily_nh_25km_'+str(yu)+'0101_'+str(yu)+'1231_v4.1.nc'
f_nc = Dataset(self.path+f_name)
# print(p0,p1)
self.u = f_nc['u'][:]
self.v = f_nc['v'][:]
self.u[self.u.mask] = np.nan
self.v[self.v.mask] = np.nan
f_nc.close()
self.vyear_load = yu
self.vels_loaded= True
p0 = dates_u[ 0].timetuple().tm_yday -1
p1 = dates_u[-1].timetuple().tm_yday
datau = self.u[p0:p1,:,:].transpose((0,2,1))/100
datav = self.v[p0:p1,:,:].transpose((0,2,1))/100
if self.check_grid:
for n in range(np.shape(datau)[0]):
datau[n][self.grid.lats>88] = np.nanmean
datav[n][self.grid.lats>88] = np.nanmean
return datau,datav
class PIOMAS():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath,time_smooth = 0):
self.name = 'PIOMAS'
self.path = ppath
self.vyear_load = 0
self.hyear_load = 0
self.ayear_load = 0
self.vels_loaded = False
self.hi_loaded = False
self.aice_loaded = False
self.tsmth = time_smooth
def get_dates(self,time_start,time_end):
"""
returns the all encompassing date list for use with the forcing object
PIOMAS is a standardised list so we can just build a list
"""
dates =[]
n_yrs = (time_end.year - time_start.year)-1
if n_yrs>-1:
y0 = dt.datetime(time_start.year,1,1)
ye = dt.datetime(time_start.year,12,31)
data_f = self.path+'uiday.H'+y0.strftime('%Y')
if exists(data_f):
for d in range(time_start.timetuple().tm_yday-1,
ye.timetuple().tm_yday):
dates.append(y0 + relativedelta(days = d))
for y in range(n_yrs):
y0 += relativedelta(years=1)
ye += relativedelta(years=1)
data_f = self.path+'uiday.H'+y0.strftime('%Y')
if exists(data_f):
for d in range(ye.timetuple().tm_yday):
dates.append(y0 + relativedelta(days = d))
y0 += relativedelta(years=1)
ye = time_end
data_f = self.path+'uiday.H'+y0.strftime('%Y')
if exists(data_f):
for d in range(ye.timetuple().tm_yday):
dates.append(y0 + relativedelta(days = d))
else:
y0 = dt.datetime(time_start.year,1,1)
data_f = self.path+'uiday.H'+y0.strftime('%Y')
if exists(data_f):
for d in range(time_start.timetuple().tm_yday-1,
time_end.timetuple().tm_yday):
dates.append(y0 + relativedelta(days = d))
self.dates= dates
print(self.name+' Found '+str(np.shape(dates)[0])+' dates')
# daily points in yearly files
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_vels(self,dates_u,verbos=False):
# does dates_u cover one year or more
if (dates_u[-1].year -dates_u[0].year) == 0:
# one year, one file
yu = dates_u[0].year
if ((self.vyear_load != yu) or (not self.vels_loaded)):
print('loading new year of data: '+str(yu))
data_f = self.path+'uiday.H'+str(yu)
with open(data_f, mode='rb') as file:
filecontent = file.read()
data = struct.unpack("f" * (len(filecontent)// 4), filecontent)
self.vels=np.asarray(data).reshape(365,2,120,360)
self.vyear_load = yu
self.vels_loaded= True
p0 = dates_u[ 0].timetuple().tm_yday -1
p1 = dates_u[-1].timetuple().tm_yday
# print(p0,p1)
if self.tsmth < 1:
datau = self.vels[p0:p1,0,:,:].transpose((0,2,1))
datav = self.vels[p0:p1,1,:,:].transpose((0,2,1))
return datau,datav
elif np.shape(dates_u)[0]>1:
print('time smoothing not compatible with multiple dates')
datau = self.vels[p0:p1,0,:,:].transpose((0,2,1))
datav = self.vels[p0:p1,1,:,:].transpose((0,2,1))
return datau,datav
else:
#### each time slice is the mean of 2*tsmth+1
p0 = np.maximum(p0-self.tsmth,0)
datau = self.vels[p0:p1+self.tsmth,0,:,:].transpose((0,2,1))
datav = self.vels[p0:p1+self.tsmth,1,:,:].transpose((0,2,1))
# print(np.shape(datau))
datau2 = np.expand_dims(np.nanmean(datau,axis=0),0)
datav2 = np.expand_dims(np.nanmean(datav,axis=0),0)
return datau2,datav2
def get_hi(self,dates_u,verbos=False):
# does dates_u cover one year or more
if (dates_u[-1].year -dates_u[0].year) == 0:
# one year, one file
yu = dates_u[0].year
if ((self.hyear_load != yu) or (not self.hi_loaded)):
print('loading new year of data: '+str(yu))
data_f = self.path+'hiday.H'+str(yu)
with open(data_f, mode='rb') as file:
fileContent = file.read()
data = struct.unpack("f" * (len(fileContent)// 4), fileContent)
self.hi=np.asarray(data).reshape(365,120,360)
self.hyear_load = yu
self.hi_loaded = True
p0 = dates_u[ 0].timetuple().tm_yday -1
p1 = dates_u[-1].timetuple().tm_yday
# print(p0,p1)
if self.tsmth < 1:
data = self.hi[p0:p1,:,:].transpose((0,2,1))
return data
elif np.shape(dates_u)[0]>1:
print('Time smoothing not compatible with multiple dates')
data = self.hi[p0:p1,:,:].transpose((0,2,1))
return data
else:
#### each time slice is the mean of 2*tsmth+1
p0 = np.maximum(p0-self.tsmth,0)
data = self.hi[p0:p1+self.tsmth,0,:,:].transpose((0,2,1))
data2 = np.expand_dims(np.nanmean(data,axis=0),0)
return data2
def get_aice(self,dates_u,verbos=False):
# does dates_u cover one year or more
if (dates_u[-1].year -dates_u[0].year) == 0:
# one year, one file
yu = dates_u[0].year
if ((self.ayear_load != yu) or (not self.aice_loaded)):
print('loading new year of data: '+str(yu))
data_f = self.path+'aiday.H'+str(yu)
with open(data_f, mode='rb') as file:
fileContent = file.read()
data = struct.unpack("f" * (len(fileContent)// 4), fileContent)
self.aice=np.asarray(data).reshape(365,120,360)
self.ayear_load = yu
self.aice_loaded= True
p0 = dates_u[ 0].timetuple().tm_yday -1
p1 = dates_u[-1].timetuple().tm_yday
# print(p0,p1)
if self.tsmth < 1:
data = self.aice[p0:p1,:,:].transpose((0,2,1))
return data
elif np.shape(dates_u)[0]>1:
print('Time smoothing not compatible with multiple dates')
data = self.aice[p0:p1,:,:].transpose((0,2,1))
return data
else:
#### each time slice is the mean of 2*tsmth+1
p0 = np.maximum(p0-self.tsmth,0)
data = self.aice[p0:p1+self.tsmth,0,:,:].transpose((0,2,1))
data2 = np.expand_dims(np.nanmean(data,axis=0),0)
return data2
class NSIDC_nt():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath):
self.name = 'NSIDC_n'
self.path = ppath
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_aice(self,dates_u,verbos=False):
# does dates_u cover one year or more
#daily files
dimY = 304
dimX = 448
d_no = np.shape(dates_u)[0]
data = np.empty([d_no, dimX, dimY])
for n,d in enumerate(dates_u):
# infile = self.path+d.strftime('/%Y/')+"nt_"+d.strftime('%Y%m%d')+"_f17_v1.1_n.bin"
if d.year<2020:
# infile = self.path+"/nt_"+d.strftime('%Y%m%d')+"_f17_v1.1_n.bin"
infile = self.path+d.strftime('/%Y/')+"nt_"+d.strftime('%Y%m%d')+"_f17_v1.1_n.bin"
if d.year>2019:
# infile = self.path+"/nt_"+d.strftime('%Y%m%d')+"_f18_nrt_n.bin"
infile = self.path+d.strftime('/%Y/')+"nt_"+d.strftime('%Y%m%d')+"_f18_nrt_n.bin"
# if d.year<2019:
# infile = self.path+"nt_"+d.strftime('%Y%m%d')+"_f17_v1.1_n.bin"
# if d.year>2018:
# infile = self.path+"nt_"+d.strftime('%Y%m%d')+"_f18_nrt_n.bin"
with open(infile, 'rb') as fr:
hdr = fr.read(300)
ice = np.fromfile(fr, dtype=np.uint8)
ice = ice.reshape(dimX,dimY)
ice = np.flipud(ice)
data[n] = ice / 250.
data[data>1.0] = np.nan
return data
def get_dates(self,time_start,time_end):
# does dates_u cover one year or more
#daily files
dates_u = []
d_no = (time_end-time_start).days +3
# make sure we get the bracket points
for dn in range(d_no):
d = time_start+ relativedelta(days = dn - 1)
if d.year<2020:
infile = self.path+d.strftime('/%Y/')+"nt_"+d.strftime('%Y%m%d')+"_f17_v1.1_n.bin"
if d.year>2019:
infile = self.path+d.strftime('/%Y/')+"nt_"+d.strftime('%Y%m%d')+"_f18_nrt_n.bin"
# check infile exists
if exists(infile):
dates_u.append(d)
#if it does append dates_u
self.dates= dates_u
print(self.name+' Found '+str(np.shape(dates_u)[0])+' dates')
class NSIDC_bt():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath):
self.name = 'NSIDC_b'
self.path = ppath
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_aice(self,dates_u,verbos=False):
# does dates_u cover one year or more
#daily files
dimY = 304
dimX = 448
d_no = np.shape(dates_u)[0]
data = np.empty([d_no, dimX, dimY])
for n,d in enumerate(dates_u):
infile = self.path+d.strftime('/%Y/')+"bt_"+d.strftime('%Y%m%d')+"_f17_v3.1_n.bin"
# if d.year<2019:
# infile = self.path+"nt_"+d.strftime('%Y%m%d')+"_f17_v1.1_n.bin"
# if d.year>2018:
# infile = self.path+"nt_"+d.strftime('%Y%m%d')+"_f18_nrt_n.bin"
with open(infile, 'rb') as fr:
hdr = fr.read(0)
ice = np.fromfile(fr, dtype="<i2")
ice = ice.reshape(dimX,dimY)
ice = np.flipud(ice)
data[n] = ice / 1000.
data[data>1.0] = np.nan
return data
def get_dates(self,time_start,time_end):
# does dates_u cover one year or more
#daily files
dates_u = []
d_no = (time_end-time_start).days +3
# make sure we get the bracket points
for dn in range(d_no):
d = time_start+ relativedelta(days = dn - 1)
# if d.year<2019:
infile = self.path+d.strftime('/%Y/')+"bt_"+d.strftime('%Y%m%d')+"_f17_v3.1_n.bin"
# if d.year>2018:
# infile = self.path+"nt_"+d.strftime('%Y%m%d')+"_f18_nrt_n.bin"
# check infile exists
if exists(infile):
dates_u.append(d)
#if it does append dates_u
self.dates= dates_u
print(self.name+' Found '+str(np.shape(dates_u)[0])+' dates')
class AWI_SMOS_daily():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath):
self.name = 'AWI_SMOS_daily'
self.path = ppath
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_hi(self,dates_u,verbos=False):
# does dates_u cover one year or more
#dmonthly
blurb = 'W_XX-ESA,SMOS_CS2,NH_25KM_EASE2_'
dn = np.shape(dates_u)[0]
d = dates_u[0]
if d > dt.datetime(2019,6,1):
blurb2 = '_r_v202_02_l4sit.nc'
else:
blurb2 = '_r_v202_01_l4sit.nc'
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('%Y/%m/')+blurb+t0+t1+blurb2
f_nc = Dataset(file)
hi = f_nc['analysis_sea_ice_thickness'][0]
hi[hi.mask] = np.nan
dx,dy = hi.shape
data = np.empty([dn,dx,dy])
data[0] = hi
f_nc.close()
for n,d in enumerate(dates_u[1:]):
if d > dt.datetime(2019,6,1):
blurb2 = '_r_v202_02_l4sit.nc'
else:
blurb2 = '_r_v202_01_l4sit.nc'
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('%Y/%m/')+blurb+t0+t1+blurb2
f_nc = Dataset(file)
hi = f_nc['analysis_sea_ice_thickness'][0]
hi[hi.mask] = np.nan
data[n+1] = hi
f_nc.close()
#if it does append dates_u
return data
def get_aice(self,dates_u,verbos=False):
# does dates_u cover one year or more
#dmonthly
blurb = 'W_XX-ESA,SMOS_CS2,NH_25KM_EASE2_'
dn = np.shape(dates_u)[0]
d = dates_u[0]
if d > dt.datetime(2019,6,1):
blurb2 = '_r_v202_02_l4sit.nc'
else:
blurb2 = '_r_v202_01_l4sit.nc'
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('/%Y/%m/')+blurb+t0+t1+blurb2
f_nc = Dataset(file)
aice = f_nc['sea_ice_concentration'][0]
aice[aice.mask] = np.nan
dx,dy = aice.shape
data = np.empty([dn,dx,dy])
data[0] = aice/100
f_nc.close()
for n,d in enumerate(dates_u[1:]):
if d > dt.datetime(2019,6,1):
blurb2 = '_r_v202_02_l4sit.nc'
else:
blurb2 = '_r_v202_01_l4sit.nc'
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('%Y/%m/')+blurb+t0+t1+blurb2
f_nc = Dataset(file)
aice = f_nc['sea_ice_concentration'][0]
aice[aice.mask] = np.nan
data[n+1] = aice/100
#if it does append dates_u
f_nc.close()
return data
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_dates(self,time_start,time_end):
# does dates_u cover one year or more
#daily files
blurb = 'W_XX-ESA,SMOS_CS2,NH_25KM_EASE2_'
dates_u = []
dd = (time_end-time_start).days+1
# make sure we get the bracket points
for dn in range(dd):
d = time_start+ relativedelta(days = dn)
if d > dt.datetime(2019,6,1):
blurb2 = '_r_v202_02_l4sit.nc'
else:
blurb2 = '_r_v202_01_l4sit.nc'
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('%Y/%m/')+blurb+t0+t1+blurb2
if exists(file):
dates_u.append(d)
#if it does append dates_u
self.dates= dates_u
print(self.name+' Found '+str(np.shape(dates_u)[0])+' dates')
class AWI_weekly():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath):
self.name = 'AWI_weekly'
self.path = ppath
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_hi(self,dates_u,verbos=False):
# does dates_u cover one year or more
#dmonthly
blurb = 'awi-siral-l3c-sithick-cryosat2-rep-nh_25km_ease2-'
blurb2 = '-fv2p2.nc'
dn = np.shape(dates_u)[0]
d = dates_u[0]
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('%Y/')+blurb+t0+t1+blurb2
f_nc = Dataset(file)
hi = f_nc['analysis_sea_ice_thickness'][0]
hi[hi.mask] = np.nan
dx,dy = hi.shape
data = np.empty([dn,dx,dy])
data[0] = hi
f_nc.close()
for n,d in enumerate(dates_u[1:]):
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('%Y/')+blurb+t0+t1+blurb2
f_nc = Dataset(file)
hi = f_nc['analysis_sea_ice_thickness'][0]
hi[hi.mask] = np.nan
data[n+1] = hi
f_nc.close()
#if it does append dates_u
return data
def get_aice(self,dates_u,verbos=False):
# does dates_u cover one year or more
#dmonthly
blurb = 'awi-siral-l3c-sithick-cryosat2-rep-nh_25km_ease2-'
blurb2 = '-fv2p2.nc'
dn = np.shape(dates_u)[0]
d = dates_u[0]
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('/%Y/')+blurb+t0+t1+blurb2
f_nc = Dataset(file)
aice = f_nc['sea_ice_concentration'][0]
aice[aice.mask] = np.nan
dx,dy = aice.shape
data = np.empty([dn,dx,dy])
data[0] = aice/100
f_nc.close()
for n,d in enumerate(dates_u[1:]):
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('%Y/')+blurb+t0+t1+blurb2
f_nc = Dataset(file)
aice = f_nc['sea_ice_concentration'][0]
aice[aice.mask] = np.nan
data[n+1] = aice/100
#if it does append dates_u
f_nc.close()
return data
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_dates(self,time_start,time_end):
# does dates_u cover one year or more
# awi weekly - first ever week is 20101101 to 20101107
# midpoint of 20101104
w0 = dt.datetime(2010,11,4)
# find first time point before time start
w1 = int((time_start-w0).days/7)
# find first time point after time end
w2 = int((time_end-w0).days/7)+1
print(w1,w2)
#daily files
blurb = 'awi-siral-l3c-sithick-cryosat2-rep-nh_25km_ease2-'
blurb2 = '-fv2p2.nc'
dates_u = []
# make sure we get the bracket points
for w in range(w1,w2):
d = w0+ relativedelta(days = w*7)
t0 = (d - relativedelta(days = 3)).strftime('%Y%m%d_')
t1 = (d + relativedelta(days = 3)).strftime('%Y%m%d')
file = self.path+d.strftime('%Y/')+blurb+t0+t1+blurb2
if exists(file):
dates_u.append(d)
#if it does append dates_u
self.dates= dates_u
print(self.name+' Found '+str(np.shape(dates_u)[0])+' dates')
class AWI_monthly():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath):
self.name = 'AWI_monthly'
self.path = ppath
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_hi(self,dates_u,verbos=False):
# does dates_u cover one year or more
#dmonthly
blurb = 'awi-siral-l3c-sithick-cryosat2-rep-nh_25km_ease2-'
dn = np.shape(dates_u)[0]
d = dates_u[0]
dload = d.replace(day=1)
file = self.path+dload.strftime('/%Y/')+blurb+dload.strftime('%Y%m')+'-fv2p2.nc'
f_nc = Dataset(file)
hi = f_nc['sea_ice_thickness'][0]
dx,dy = hi.shape
data = np.empty([dn,dx,dy])
data[0] = hi
f_nc.close()
for n,d in enumerate(dates_u[1:]):
dload = d.replace(day=1)
file = self.path+dload.strftime('/%Y/')+blurb+dload.strftime('%Y%m')+'-fv2p2.nc'
f_nc = Dataset(file)
hi = f_nc['sea_ice_thickness'][0]
data[n+1] = hi
f_nc.close()
#if it does append dates_u
return data
def get_aice(self,dates_u,verbos=False):
# does dates_u cover one year or more
#dmonthly
blurb = 'awi-siral-l3c-sithick-cryosat2-rep-nh_25km_ease2-'
dn = np.shape(dates_u)[0]
d = dates_u[0]
dload = d.replace(day=1)
file = self.path+dload.strftime('/%Y/')+blurb+dload.strftime('%Y%m')+'-fv2p2.nc'
f_nc = Dataset(file)
aice = f_nc['sea_ice_concentration'][0]
dx,dy = aice.shape
data = np.empty([dn,dx,dy])
data[0] = aice/100
f_nc.close()
for n,d in enumerate(dates_u[1:]):
dload = d.replace(day=1)
file = self.path+dload.strftime('/%Y/')+blurb+dload.strftime('%Y%m')+'-fv2p2.nc'
f_nc = Dataset(file)
aice = f_nc['sea_ice_concentration'][0]
data[n+1] = aice/100
#if it does append dates_u
f_nc.close()
return data
# next function will take a list of dates and return an appropriately orientated arrays
def get_dates(self,time_start,time_end,fill_end_months=False):
# does dates_u cover one year or more
#daily files
blurb = 'awi-siral-l3c-sithick-cryosat2-rep-nh_25km_ease2-'
dates_u = []
dy = time_end.year-time_start.year
dm = time_end.month-time_start.month
m_no = dy*12 + dm +2
# make sure we get the bracket points
ts_m = dt.datetime(time_start.year,time_start.month,1)
for mn in range(m_no):
d = ts_m+ relativedelta(months = mn )
file = self.path+d.strftime('/%Y/')+blurb+d.strftime('%Y%m')+'-fv2p2.nc'
if exists(file):
if d.month==2:
mid_day = 13
else:
mid_day = 14
dates_u.append(d + relativedelta(days=mid_day))
### now work over the date and adjust for summer
### remove all months = [5,6,8,7,9]
### also need hole end points to be at month end not mid
self.dates= []
month_keep = [1,2,3,4,10,11,12]
for d in dates_u:
if d.month in month_keep:
if fill_end_months and d.month == 4:
self.dates.append(d)
d_end = d.replace(day=30)
self.dates.append(d_end)
elif fill_end_months and d.month == 10:
d_start = d.replace(day=1)
self.dates.append(d_start)
self.dates.append(d)
else:
self.dates.append(d)
print(self.name+' Found '+str(np.shape(dates_u)[0])+' dates')
class OSISAF():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath):
self.path = ppath
self.name = 'osisaf'
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_dates(self,time_start,time_end):
# individual drift for each day slice
dates_u = []
d_no = (time_end-time_start).days +3
# make sure we get the bracket points
for dn in range(d_no):
d = time_start+ relativedelta(days = dn - 1)
t1 = (d - relativedelta(hours=12)).strftime('%Y%m%d%H00')
t2 = (d + relativedelta(hours=36)).strftime('%Y%m%d%H00')
# check if were on the last day of the month
next_month = d.replace(day=28) + dt.timedelta(days=4) # this will never fail
next_month.replace(day = 1)
if (next_month - d).days>1:
td = d.strftime('%Y/%m/')
else:
td = (d + relativedelta(months=1)).strftime('%Y/%m/')
f_name = td+'ice_drift_nh_polstere-625_multi-oi_'+t1+'-'+t2+'.nc'
# print(f_name)
if exists(self.path+f_name):
dates_u.append(d)
self.dates= dates_u
print(self.name+' Found '+str(np.shape(dates_u)[0])+' dates')
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_vels(self,dates_u,verbos=False):
# individual drift for each day slice
d_no = np.shape(dates_u)[0]
d = dates_u[0]
# one year, one file
t1 = (d - relativedelta(hours=12)).strftime('%Y%m%d%H00')
t2 = (d + relativedelta(hours=36)).strftime('%Y%m%d%H00')
# check if were on the last day of the month
next_month = d.replace(day=28) + dt.timedelta(days=4)
next_month.replace(day = 1)
if (next_month - d).days>1:
td = d.strftime('%Y/%m/')
else:
td = (d + relativedelta(months=1)).strftime('%Y/%m/')
## extra date check for v component
vmult = -1.0
if d > dt.datetime(2015,9,12): vmult = 1.0
f_name = td+'ice_drift_nh_polstere-625_multi-oi_'+t1+'-'+t2+'.nc'
f_nc = Dataset(self.path+f_name)
unow = np.fliplr(f_nc['dX'][0].T)
vnow = np.fliplr(f_nc['dY'][0].T)
unow[unow.mask] = np.nan
vnow[vnow.mask] = np.nan
dx,dy = np.shape(unow)
data_u = np.zeros([d_no,dx,dy])
data_v = np.zeros([d_no,dx,dy])
# convert km/48hrs to m/s
data_u[0] = unow*1e3/60/60/48
data_v[0] = vmult*vnow*1e3/60/60/48
f_nc.close()
for n,d in enumerate(dates_u[1:]):
# one year, one file
t1 = (d - relativedelta(hours=12)).strftime('%Y%m%d%H00')
t2 = (d + relativedelta(hours=36)).strftime('%Y%m%d%H00')
# check if were on the last day of the month
next_month = d.replace(day=28) + dt.timedelta(days=4)
next_month.replace(day = 1)
if (next_month - d).days>1:
td = d.strftime('%Y/%m/')
else:
td = (d + relativedelta(months=1)).strftime('%Y/%m/')
f_name = td+'ice_drift_nh_polstere-625_multi-oi_'+t1+'-'+t2+'.nc'
f_nc = Dataset(self.path+f_name)
unow = np.fliplr(f_nc['dX'][0].T)
vnow = np.fliplr(f_nc['dY'][0].T)
unow[unow.mask] = np.nan
vnow[vnow.mask] = np.nan
data_u[n+1] = unow*1e3/60/60/48
data_v[n+1] = vmult*vnow*1e3/60/60/48
f_nc.close()
return data_u,data_v
class CPOM_hi:
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath,G):
self.name = 'CPOM_monthly'
self.path = ppath
### we need to regrid as we load
### for efficiency we will buffer two months
self.hi1 = 0.0
self.dl1 = None
self.hi2 = 0.0
self.dl2 = None
self.G = G
self.THRESH = np.hypot(np.mean(np.diff(self.G.ypts)),
np.mean(np.diff(self.G.xpts)))/2
# self.edges_x = self.G.xpts[:,int(G.n/2)] - self.G.dxRes/2
# self.edges_y = self.G.ypts[int(G.m/2),:] - self.G.dyRes/2
# self.edges_x = np.append(self.edges_x,2*self.G.xpts[-1,int(G.n/2)] - self.G.xpts[-2,int(G.n/2)])
# self.edges_y = np.append(self.edges_y,2*self.G.ypts[int(G.m/2),-1] - self.G.ypts[int(G.m/2),-2])
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_hi(self,dates_u,verbos=False):
# does dates_u cover one year or more
#dmonthly
reload = [False,False]
d_no = np.shape(dates_u)[0]
if d_no>2:
print('Warning CPOM hi not compatible with tw over 1 month')
return None
### first check if any of the dates in dates_u
### match the preloaded, ddL
### if the first date matches the second
data_u = np.zeros([d_no,self.G.m,self.G.n])
for n,d in enumerate(dates_u):
if d == self.dl1:
### return buffered
data_u[n] = self.hi1
elif d == self.dl2:
### return buffered
data_u[n] = self.hi2
else:
### load and bin
### check dates
### normal month, data on the first
dload = d.replace(day=1)
file = self.path+d.strftime('%Y%m_')+'Thick.map'
if verbos: print(file)
f = np.genfromtxt(file)
hi = f[:,2]
lon = f[:,1]
lat = f[:,0]
xpts,ypts = self.G.mplot(lon,lat)
# print('Binning awkward CPOM data')
# #
# ret = binned_statistic_2d( xpts, ypts, hi,
# statistic='mean', bins=[self.edges_x,self.edges_y])
# data_u[n] = ret.statistic
# ret = binned_statistic_2d( xpts, ypts, [],
# statistic='count', bins=[self.edges_x,self.edges_y])
# data_u[n][ret.statistic<4] = np.nan
print('Regridding awkward CPOM data')
xpts,ypts = self.G.mplot(lon,lat)
xy = np.vstack([xpts,ypts]).T
data_u[n] = griddata(xy, hi, (self.G.xpts, self.G.ypts),method='nearest')
# Construct kd-tree, functionality copied from scipy.interpolate
### this finds empty values on the grid
tree = cKDTree(xy)
xi = _ndim_coords_from_arrays((self.G.xpts, self.G.ypts))
dists, indexes = tree.query(xi)
# Copy original result but mask missing values with NaNs
data_u[n][dists > self.THRESH] = np.nan
reload[n] = True
### update buffered thickness
if d_no == 2:
self.dl1 = dates_u[0]
self.hi1 = data_u[0]
self.dl2 = dates_u[1]
self.hi2 = data_u[1]
else:
self.dl2 = dates_u[0]
self.hi2 = data_u[0]
return data_u
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_dates(self,time_start,time_end,fill_end_months=False):
# does dates_u cover one year or more
#daily files
dates_u = []
dy = time_end.year-time_start.year
dm = time_end.month-time_start.month
m_no = dy*12 + dm +2
# make sure we get the bracket points
ts_m = dt.datetime(time_start.year,time_start.month,1)
for mn in range(m_no):
d = ts_m+ relativedelta(months = mn )
file = self.path+d.strftime('%Y%m_')+'Thick.map'
if exists(file):
if d.month==2:
mid_day = 13
else:
mid_day = 14
dates_u.append(d + relativedelta(days=mid_day))
#if it does append dates_u
### now work over the date and adjust for summer
### remove all months = [5,6,8,7,9]
### also need hole end points to be at month end not mid
self.dates= []
month_keep = [1,2,3,4,10,11,12]
for d in dates_u:
if d.month in month_keep:
if fill_end_months and d.month == 4:
self.dates.append(d)
d_end = d.replace(day=30)
self.dates.append(d_end)
elif fill_end_months and d.month == 10:
d_start = d.replace(day=1)
self.dates.append(d_start)
self.dates.append(d)
else:
self.dates.append(d)
print(self.name+' Found '+str(np.shape(dates_u)[0])+' dates')
class Kimura():
"""
forcing class for the budget
lets the forcing load efficiently
"""
def __init__(self,ppath):
self.name = 'Kimura_drift'
self.path = ppath
def get_dates(self,time_start,time_end):
"""
returns the all encompassing date list for use with the forcing object
"""
dates =[]
d_no = (time_end-time_start).days +3
for dn in range(d_no):
d = time_start+ relativedelta(days = dn - 1)
infile = self.path+d.strftime('%y%m%d')+".amsr36i"
if exists(infile):
dates.append(d)
else:
infile = self.path+d.strftime('%y%m%d')+".amsr18i"
if exists(infile):
dates.append(d)
self.dates = dates
print(self.name+' Found '+str(np.shape(dates)[0])+' dates')
# daily points in yearly files
# next function will take a list of dates and return an appropriately orientated arrays
# give a
def get_vels(self,dates_u,verbos=False):
dimY = 145
dimX = 145
d_no = np.shape(dates_u)[0]
data_u = np.zeros([d_no,dimX,dimY])
data_v = np.zeros([d_no,dimX,dimY])
for n,d in enumerate(dates_u):
infile = self.path+d.strftime('%y%m%d')+".amsr36i"
if not exists(infile):
infile = self.path+d.strftime('%y%m%d')+".amsr18i"
if verbos: print(infile)
# print(d.strftime('%y%m%d'))
with open(infile, 'rb') as fr:
hdr = fr.read(0)
ice = np.fromfile(fr, dtype=np.float32)
ice = ice/100
ice[ice>9] = np.nan
iceG = ice.reshape(dimX,dimY,2)
data_u[n] =-iceG[:,:,1]
data_v[n] = iceG[:,:,0]
return data_u,data_v
class CICE_jas:
def __init__(self,ppath):
self.name = 'CICE_1deg'
self.path = ppath
self.vyear_load = 0
self.hyear_load = 0
self.ayear_load = 0
self.vels_loaded = False
self.hi_loaded = False
self.aice_loaded = False
def get_dates(self,time_start,time_end):
"""
returns the all encompassing date list for use with the forcing object
"""
dates =[]
n_yrs = (time_end.year - time_start.year)+1
for y in range(n_yrs):
yu = time_start.year + y
d0 = dt.datetime(yu,1,1)
f_name = 'cice_daily_'+str(yu)+'.nc'
if exists(self.path+f_name):
f_nc = Dataset(self.path+f_name)
[dates.append(d0 + relativedelta(days = d))
for d in range(f_nc['time'].shape[0])]
f_nc.close()
self.dates = dates
print(self.name+' Found '+str(np.shape(dates)[0])+' dates')
def get_vels(self,dates_u,verbos=False):
d0 = dt.datetime(1970,1,1)
# does dates_u cover one year or more
if (dates_u[-1].year -dates_u[0].year) == 0:
dates = dates_u
single_year = True
else: ## multiple years
dates = [d for d in dates_u if d.year == dates_u[0].year]
single_year = False
# one year, one file
yu = dates_u[0].year
if ((self.vyear_load != yu) or (not self.vels_loaded)):
print('loading new year of data: '+str(yu))
f_name = 'cice_daily_'+str(yu)+'.nc'
f_nc = Dataset(self.path+f_name)
# print(p0,p1)
self.u = f_nc['uvel_d'][:]
self.v = f_nc['vvel_d'][:]
self.u[self.u.mask] = np.nan
self.v[self.v.mask] = np.nan
f_nc.close()
self.vyear_load = yu
self.vels_loaded= True
p0 = dates[ 0].timetuple().tm_yday -1
p1 = dates[-1].timetuple().tm_yday
datau = self.u[p0:p1,:,:].transpose((0,2,1))
datav = self.v[p0:p1,:,:].transpose((0,2,1))
if single_year:
return datau,datav
else: ## multiple years
data1 = datau
dates = [d for d in dates_u if d.year == dates_u[-1].year]
yu = dates[-1].year
print('loading new year of data: '+str(yu))
f_name = 'cice_daily_'+str(yu)+'.nc'
f_nc = Dataset(self.path+f_name)
# print(p0,p1)
self.u = f_nc['uvel_d'][:]
self.v = f_nc['vvel_d'][:]
self.u[self.u.mask] = np.nan
self.v[self.v.mask] = np.nan
f_nc.close()
self.hyear_load = yu
self.hi_loaded= True
p0 = dates[ 0].timetuple().tm_yday -1
p1 = dates[-1].timetuple().tm_yday
datau2= self.u[p0:p1,:,:].transpose((0,2,1))
datav2= self.v[p0:p1,:,:].transpose((0,2,1))
datau = np.vstack([datau,datau2])
datav = np.vstack([datav,datav2])
return datau,datav
def get_aice(self,dates_u,verbos=False):
d0 = dt.datetime(1970,1,1)
# does dates_u cover one year or more
if (dates_u[-1].year -dates_u[0].year) == 0:
dates = dates_u
single_year = True
else: ## multiple years
dates = [d for d in dates_u if d.year == dates_u[0].year]
single_year = False
# one year, one file
yu = dates_u[0].year
if ((self.ayear_load != yu) or (not self.aice_loaded)):
print('loading new year of data: '+str(yu))
f_name = 'cice_daily_'+str(yu)+'.nc'
f_nc = Dataset(self.path+f_name)
# print(p0,p1)
self.a = f_nc['aice_d'][:]
self.a[self.a.mask] = np.nan
f_nc.close()
self.ayear_load = yu
self.aice_loaded= True
p0 = dates[ 0].timetuple().tm_yday -1
p1 = dates[-1].timetuple().tm_yday
datau = self.a[p0:p1,:,:].transpose((0,2,1))
if single_year:
return datau
else: ## multiple years
data1 = datau
dates = [d for d in dates_u if d.year == dates_u[-1].year]
yu = dates[-1].year
print('loading new year of data: '+str(yu))
f_name = 'cice_daily_'+str(yu)+'.nc'
f_nc = Dataset(self.path+f_name)
# print(p0,p1)
self.a = f_nc['aice_d'][:]
self.a[self.a.mask] = np.nan
f_nc.close()
self.ayear_load = yu
self.aice_loaded= True
p0 = dates[ 0].timetuple().tm_yday -1
p1 = dates[-1].timetuple().tm_yday
data2 = self.a[p0:p1,:,:].transpose((0,2,1))
datau = np.vstack([data1,data2])
return datau
def get_hi(self,dates_u,verbos=False):
# does dates_u cover one year or more
if (dates_u[-1].year -dates_u[0].year) == 0:
dates = dates_u
single_year = True
else: ## multiple years
dates = [d for d in dates_u if d.year == dates_u[0].year]
single_year = False
# one year, one file
yu = dates_u[0].year
if ((self.hyear_load != yu) or (not self.hi_loaded)):
print('loading new year of data: '+str(yu))
f_name = 'cice_daily_'+str(yu)+'.nc'
f_nc = Dataset(self.path+f_name)
# print(p0,p1)
self.h = f_nc['hi_d'][:]
self.h[self.h.mask] = np.nan
f_nc.close()
self.hyear_load = yu
self.hi_loaded= True
p0 = dates[ 0].timetuple().tm_yday -1
p1 = dates[-1].timetuple().tm_yday
datau = self.h[p0:p1,:,:].transpose((0,2,1))
if single_year:
return datau
else: ## multiple years
data1 = datau
dates = [d for d in dates_u if d.year == dates_u[-1].year]
yu = dates[-1].year
print('loading new year of data: '+str(yu))
f_name = 'cice_daily_'+str(yu)+'.nc'
f_nc = Dataset(self.path+f_name)
# print(p0,p1)
self.h = f_nc['hi_d'][:]
self.h[self.h.mask] = np.nan
f_nc.close()
self.hyear_load = yu
self.hi_loaded= True
p0 = dates[ 0].timetuple().tm_yday -1
p1 = dates[-1].timetuple().tm_yday
data2 = self.h[p0:p1,:,:].transpose((0,2,1))
datau = np.vstack([data1,data2])
return datau
class Bristol_thickness:
def __init__(self,ppath,var='Sea_Ice_Thickness_incSMOS'):
"""
var can be any of
'Sea_Ice_Thickness'
'Sea_Ice_Thickness_W99'
'Sea_Ice_Thickness_incSMOS'
'Sea_Ice_Volume'
'Sea_Ice_Volume_W99'
'Sea_Ice_Volume_incSMOS'
"""
self.name = 'Bristol_hi'
self.path = ppath
self.hi1 = 0.0
self.dl1 = None
self.hi2 = 0.0
self.dl2 = None
self.var = var
def get_dates(self,time_start,time_end,fill_end_months=False):
"""
returns the all encompassing date list for use with the forcing object
"""
blurb ='ubristol_cryosat2_seaicethickness_nh25km_'
dy = time_end.year-time_start.year
dm = time_end.month-time_start.month
m_no = dy*12 + dm +2
# make sure we get the bracket points
ts_m = dt.datetime(time_start.year,time_start.month,1)
dates_u = []
for mn in range(m_no):
d = ts_m+ relativedelta(months = mn )
file = self.path+blurb+d.strftime('%Y_%m_')+'v1.nc'
if exists(file):
if d.month==2:
mid_day = 13
else:
mid_day = 14
dates_u.append(d + relativedelta(days=mid_day))
#if it does append dates_u
### now work over the date and adjust for summer
### remove all months = [5,6,8,7,9]
### also need hole end points to be at month end not mid
self.dates= []
month_keep = [1,2,3,4,10,11,12]
for d in dates_u:
if d.month in month_keep:
if fill_end_months and d.month == 4:
self.dates.append(d)
d_end = d.replace(day=30)
self.dates.append(d_end)
elif fill_end_months and d.month == 10:
d_start = d.replace(day=1)
self.dates.append(d_start)
self.dates.append(d)
else:
self.dates.append(d)
print(self.name+' Found '+str(np.shape(dates_u)[0])+' dates')
def get_hi(self,dates_u,verbos=False):
# does dates_u cover one year or more
### check dates
### normal month, data on the first
blurb ='ubristol_cryosat2_seaicethickness_nh25km_'
d = dates_u[0]
dn = np.shape(dates_u)[0]
file = self.path+blurb+d.strftime('%Y_%m_')+'v1.nc'
f_nc = Dataset(file)
hi = f_nc[self.var][:]
dx,dy = hi.shape
data = np.empty([dn,dx,dy])
data[0] = hi
f_nc.close()
for n,d in enumerate(dates_u[1:]):
dload = d.replace(day=1)
file = self.path+blurb+d.strftime('%Y_%m_')+'v1.nc'
f_nc = Dataset(file)
hi = f_nc[self.var][:]
data[n+1] = hi
f_nc.close()
#if it does append dates_u
return data
| 38.709892
| 106
| 0.526155
| 6,621
| 46,568
| 3.558375
| 0.064794
| 0.043294
| 0.028014
| 0.02101
| 0.887691
| 0.873302
| 0.847284
| 0.836375
| 0.821689
| 0.812649
| 0
| 0.039273
| 0.341672
| 46,568
| 1,202
| 107
| 38.742097
| 0.72923
| 0.169709
| 0
| 0.818807
| 0
| 0
| 0.074144
| 0.02317
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049312
| false
| 0
| 0.010321
| 0
| 0.106651
| 0.034404
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63ba39886af2db11585f7e6587789767bd4c4e60
| 23,141
|
py
|
Python
|
tests/http/test_wrappers.py
|
Daanvdk/jackie
|
c8529ea7a585e1c53288a154e147a55f4c790531
|
[
"MIT"
] | null | null | null |
tests/http/test_wrappers.py
|
Daanvdk/jackie
|
c8529ea7a585e1c53288a154e147a55f4c790531
|
[
"MIT"
] | 2
|
2021-02-22T01:56:44.000Z
|
2021-02-22T02:15:22.000Z
|
tests/http/test_wrappers.py
|
daanvdk/jackie
|
c8529ea7a585e1c53288a154e147a55f4c790531
|
[
"MIT"
] | null | null | null |
import asyncio
import urllib.parse
import tempfile
import pytest
from jackie.http import (
asgi_to_jackie, jackie_to_asgi, Request, Response, Socket,
Disconnect,
)
from jackie.http.stream import SendFile
@pytest.mark.asyncio
async def test_jackie_to_asgi():
@jackie_to_asgi
async def app(request):
name = request.query.get('name', 'World')
return Response(text=f'Hello, {name}!')
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'http',
'method': 'GET',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(app(scope, input_queue.get, output_queue.put))
message = await output_queue.get()
assert message['type'] == 'http.response.start'
assert message['status'] == 200
assert message['headers'] == [
(b'content-type', b'text/plain; charset=UTF-8'),
]
body = b''
while True:
message = await output_queue.get()
body += message['body']
if not message['more_body']:
break
assert body == b'Hello, World!'
await task
scope = {
'type': 'http',
'method': 'GET',
'path': '/',
'query_string': b'name=Jack',
'headers': [],
}
task = asyncio.ensure_future(app(scope, input_queue.get, output_queue.put))
message = await output_queue.get()
assert message['type'] == 'http.response.start'
assert message['status'] == 200
assert message['headers'] == [
(b'content-type', b'text/plain; charset=UTF-8'),
]
body = b''
while True:
message = await output_queue.get()
body += message['body']
if not message['more_body']:
break
assert body == b'Hello, Jack!'
await task
@pytest.mark.asyncio
async def test_asgi_to_jackie():
@asgi_to_jackie
async def view(scope, receive, send):
query = urllib.parse.parse_qs(scope['query_string'].decode())
try:
name = query['name'][-1]
except KeyError:
name = 'World'
body = f'Hello, {name}!'.encode()
await send({'type': 'http.response.start', 'status': 200, 'headers': [
(b'content-type', b'text/plain; charset=UTF-8'),
]})
await send({'type': 'http.response.body', 'body': body})
response = await view(Request())
assert response.status == 200
assert list(response.headers.allitems()) == [
('content-type', 'text/plain; charset=UTF-8'),
]
assert await response.body() == b'Hello, World!'
response = await view(Request(query={'name': 'Jack'}))
assert response.status == 200
assert list(response.headers.allitems()) == [
('content-type', 'text/plain; charset=UTF-8'),
]
assert await response.body() == b'Hello, Jack!'
def test_double_wrap():
async def app(scope, receive, send):
pass
async def view(request):
pass
assert jackie_to_asgi(asgi_to_jackie(app)) is app
assert asgi_to_jackie(jackie_to_asgi(view)) is view
@pytest.mark.asyncio
async def test_jackie_to_asgi_request_body():
@jackie_to_asgi
async def app(request):
return Response(body=request.chunks())
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'http',
'method': 'GET',
'path': '/',
'query_string': b'name=Jack',
'headers': [],
}
task = asyncio.ensure_future(app(scope, input_queue.get, output_queue.put))
message = await output_queue.get()
assert message['type'] == 'http.response.start'
assert message['status'] == 200
assert message['headers'] == []
await input_queue.put({
'type': 'http.request',
'body': b'foo',
'more_body': True,
})
assert await output_queue.get() == {
'type': 'http.response.body',
'body': b'foo',
'more_body': True,
}
await input_queue.put({
'type': 'http.request',
'body': b'bar',
'more_body': False,
})
assert await output_queue.get() == {
'type': 'http.response.body',
'body': b'bar',
'more_body': True,
}
assert await output_queue.get() == {
'type': 'http.response.body',
'body': b'',
'more_body': False,
}
await task
task = asyncio.ensure_future(app(scope, input_queue.get, output_queue.put))
message = await output_queue.get()
assert message['type'] == 'http.response.start'
assert message['status'] == 200
assert message['headers'] == []
await input_queue.put({'type': 'http.disconnect'})
await task
task = asyncio.ensure_future(app(scope, input_queue.get, output_queue.put))
message = await output_queue.get()
assert message['type'] == 'http.response.start'
assert message['status'] == 200
assert message['headers'] == []
await input_queue.put({'type': 'invalid'})
with pytest.raises(ValueError):
await task
@pytest.mark.asyncio
async def test_jackie_to_asgi_unusual_scope():
@jackie_to_asgi
def app(request):
pass
with pytest.raises(ValueError):
await app({'type': 'unusual'}, None, None)
@pytest.mark.asyncio
async def test_asgi_to_jackie_disconnect():
@asgi_to_jackie
async def view(scope, receive, send):
assert scope['type'] == 'http'
assert await receive() == {
'type': 'http.request',
'body': b'foo',
'more_body': True,
}
assert await receive() == {
'type': 'http.disconnect',
}
await send({
'type': 'http.response.start',
'status': 200,
})
await send({
'type': 'http.response.body',
'body': b'foo',
})
async def get_request_body():
yield b'foo'
raise Disconnect
await view(Request(body=get_request_body()))
@pytest.mark.asyncio
async def test_asgi_to_jackie_no_more_messages():
@asgi_to_jackie
async def view(scope, receive, send):
assert await receive() == {
'type': 'http.request',
'body': b'',
'more_body': False,
}
with pytest.raises(ValueError):
await receive()
with pytest.raises(ValueError):
await view(Request(body=[]))
@pytest.mark.asyncio
async def test_asgi_to_jackie_forward_exception():
@asgi_to_jackie
async def view(scope, receive, send):
raise ValueError('test exception')
with pytest.raises(ValueError) as exc_info:
await view(Request(body=[]))
assert str(exc_info.value) == 'test exception'
@pytest.mark.asyncio
async def test_asgi_to_jackie_unexpected_message():
@asgi_to_jackie
async def view(scope, receive, send):
assert scope['type'] == 'http'
await send({'type': 'invalid'})
with pytest.raises(ValueError):
await view(Request())
@asgi_to_jackie
async def view(scope, receive, send):
assert scope['type'] == 'http'
await send({'type': 'http.response.start', 'status': 200})
await send({'type': 'invalid'})
response = await view(Request())
with pytest.raises(ValueError):
await response.body()
@pytest.mark.asyncio
async def test_jackie_to_asgi_websocket():
@jackie_to_asgi
async def echo(socket):
await socket.accept()
while True:
try:
message = await socket.receive_bytes()
except Disconnect:
break
try:
await socket.send_text(message.decode())
except ValueError:
await socket.send_bytes(message)
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'websocket',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(
echo(scope, input_queue.get, output_queue.put)
)
await input_queue.put({
'type': 'websocket.connect',
})
assert await output_queue.get() == {
'type': 'websocket.accept',
'headers': [],
}
await input_queue.put({
'type': 'websocket.receive',
'text': 'foo',
})
assert await output_queue.get() == {
'type': 'websocket.send',
'bytes': None,
'text': 'foo',
}
await input_queue.put({
'type': 'websocket.receive',
'bytes': 'bar',
})
assert await output_queue.get() == {
'type': 'websocket.send',
'bytes': None,
'text': 'bar',
}
await input_queue.put({
'type': 'websocket.disconnect',
})
await task
@pytest.mark.asyncio
async def test_jackie_to_asgi_websocket_message_types():
@jackie_to_asgi
async def echo(socket):
await socket.accept()
assert await socket.receive_text() == 'foo'
assert await socket.receive_text() == 'bar'
assert await socket.receive_bytes() == b'baz'
assert await socket.receive_bytes() == b'qux'
assert await socket.receive_json() == {'foo': 'bar'}
await socket.send_text('foo')
with pytest.raises(TypeError):
await socket.send_text(b'bar')
await socket.send_bytes(b'baz')
with pytest.raises(TypeError):
await socket.send_bytes('qux')
await socket.send_json({'foo': 'bar'})
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'websocket',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(
echo(scope, input_queue.get, output_queue.put)
)
await input_queue.put({
'type': 'websocket.connect',
})
assert await output_queue.get() == {
'type': 'websocket.accept',
'headers': [],
}
await input_queue.put({'type': 'websocket.receive', 'text': 'foo'})
await input_queue.put({'type': 'websocket.receive', 'bytes': b'bar'})
await input_queue.put({'type': 'websocket.receive', 'bytes': b'baz'})
await input_queue.put({'type': 'websocket.receive', 'text': 'qux'})
await input_queue.put({
'type': 'websocket.receive',
'text': '{"foo": "bar"}',
})
assert await output_queue.get() == {
'type': 'websocket.send',
'text': 'foo',
'bytes': None,
}
assert await output_queue.get() == {
'type': 'websocket.send',
'text': None,
'bytes': b'baz',
}
assert await output_queue.get() == {
'type': 'websocket.send',
'text': '{"foo": "bar"}',
'bytes': None,
}
await task
@pytest.mark.asyncio
async def test_jackie_to_asgi_websocket_double_accept():
@jackie_to_asgi
async def echo(socket):
await socket.accept()
with pytest.raises(ValueError):
await socket.accept()
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'websocket',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(
echo(scope, input_queue.get, output_queue.put)
)
await input_queue.put({'type': 'websocket.connect'})
assert await output_queue.get() == {
'type': 'websocket.accept',
'headers': [],
}
await task
@pytest.mark.asyncio
async def test_jackie_to_asgi_websocket_double_close():
@jackie_to_asgi
async def echo(socket):
await socket.close()
with pytest.raises(ValueError):
await socket.close()
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'websocket',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(
echo(scope, input_queue.get, output_queue.put)
)
await input_queue.put({'type': 'websocket.connect'})
assert await output_queue.get() == {
'type': 'websocket.close',
'code': 1000,
}
await task
@pytest.mark.asyncio
async def test_jackie_to_asgi_websocket_receive_before_accept():
@jackie_to_asgi
async def echo(socket):
with pytest.raises(ValueError):
await socket.receive_bytes()
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'websocket',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(
echo(scope, input_queue.get, output_queue.put)
)
await input_queue.put({'type': 'websocket.connect'})
await task
@pytest.mark.asyncio
async def test_jackie_to_asgi_websocket_send_before_accept():
@jackie_to_asgi
async def echo(socket):
with pytest.raises(ValueError):
await socket.send_bytes(b'foo')
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'websocket',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(
echo(scope, input_queue.get, output_queue.put)
)
await input_queue.put({'type': 'websocket.connect'})
await task
@pytest.mark.asyncio
async def test_jackie_to_asgi_websocket_unexpected_message():
@jackie_to_asgi
async def echo(socket):
await socket.accept()
with pytest.raises(ValueError):
await socket.receive_bytes()
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'websocket',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(
echo(scope, input_queue.get, output_queue.put)
)
await input_queue.put({'type': 'websocket.connect'})
await input_queue.put({'type': 'unexpected'})
await task
@pytest.mark.asyncio
async def test_jackie_to_asgi_send_invalid_type():
@jackie_to_asgi
async def echo(socket):
await socket.accept()
with pytest.raises(TypeError):
await socket._send(123)
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'websocket',
'path': '/',
'query_string': b'',
'headers': [],
}
task = asyncio.ensure_future(
echo(scope, input_queue.get, output_queue.put)
)
await input_queue.put({'type': 'websocket.connect'})
await task
@pytest.mark.asyncio
async def test_asgi_to_jackie_websocket():
@asgi_to_jackie
async def echo(scope, receive, send):
assert scope['type'] == 'websocket'
assert (await receive())['type'] == 'websocket.connect'
await send({'type': 'websocket.accept'})
while True:
message = await receive()
if message['type'] == 'websocket.close':
break
assert message['type'] == 'websocket.receive'
await send({**message, 'type': 'websocket.send'})
async def get_messages():
yield 'foo'
yield b'bar'
raise Disconnect()
connect_queue = asyncio.Queue()
message_queue = asyncio.Queue()
socket = Socket(
accept=lambda *args, **kwargs: connect_queue.put(True),
close=lambda *args, **kwargs: connect_queue.put(False),
receive=get_messages().__anext__,
send=message_queue.put,
)
task = asyncio.ensure_future(echo(socket))
assert await connect_queue.get() is True
assert await message_queue.get() == 'foo'
assert await message_queue.get() == b'bar'
await task
async def get_wrong_messages():
yield True
connect_queue = asyncio.Queue()
message_queue = asyncio.Queue()
socket = Socket(
accept=lambda *args, **kwargs: connect_queue.put(True),
close=lambda *args, **kwargs: connect_queue.put(False),
receive=get_wrong_messages().__anext__,
send=message_queue.put,
)
task = asyncio.ensure_future(echo(socket))
assert await connect_queue.get() is True
with pytest.raises(ValueError):
await task
@pytest.mark.asyncio
async def test_asgi_to_jackie_websocket_close():
@asgi_to_jackie
async def close_it(scope, receive, send):
assert scope['type'] == 'websocket'
assert (await receive())['type'] == 'websocket.connect'
await send({'type': 'websocket.close'})
async def get_messages():
for value in []:
yield value
connect_queue = asyncio.Queue()
message_queue = asyncio.Queue()
socket = Socket(
accept=lambda *args, **kwargs: connect_queue.put(True),
close=lambda *args, **kwargs: connect_queue.put(False),
receive=get_messages().__anext__,
send=message_queue.put,
)
task = asyncio.ensure_future(close_it(socket))
assert await connect_queue.get() is False
await task
@pytest.mark.asyncio
async def test_asgi_to_jackie_websocket_send_validation():
@asgi_to_jackie
async def close_it(scope, receive, send):
assert scope['type'] == 'websocket'
assert (await receive())['type'] == 'websocket.connect'
with pytest.raises(ValueError):
await send({'type': 'websocket.send'})
with pytest.raises(ValueError):
await send({'type': 'unexpected'})
async def get_messages():
for value in []:
yield value
connect_queue = asyncio.Queue()
message_queue = asyncio.Queue()
socket = Socket(
accept=lambda: connect_queue.put(True),
close=lambda: connect_queue.put(False),
receive=get_messages().__anext__,
send=message_queue.put,
)
task = asyncio.ensure_future(close_it(socket))
await task
@pytest.mark.asyncio
async def test_asgi_to_jackie_invalid_request():
@asgi_to_jackie
async def view(scope, receive, send):
pass
with pytest.raises(TypeError):
await view(None)
@pytest.mark.asyncio
async def test_jackie_to_asgi_send_file():
with tempfile.NamedTemporaryFile(suffix='.txt') as f:
f.write(b'foobar')
f.flush()
@jackie_to_asgi
async def app(request):
offset = int(request.query.get('offset', '0'))
size = int(request.query.get('size', '-1'))
return Response(body=[SendFile(f.name, offset=offset, size=size)])
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'http',
'method': 'GET',
'path': '/',
'query_string': b'',
'headers': [],
'extensions': {'http.response.zerocopysend': {}},
}
task = asyncio.ensure_future(
app(scope, input_queue.get, output_queue.put)
)
message = await output_queue.get()
assert message['type'] == 'http.response.start'
assert message['status'] == 200
assert message['headers'] == []
chunks = []
while True:
message = await output_queue.get()
if not (
message['type'] == 'http.response.body' and
message['body'] == b''
):
chunks.append(message)
if not message.pop('more_body'):
break
assert len(chunks) == 1
assert set(chunks[0]) == {'type', 'file'}
assert chunks[0]['type'] == 'http.response.zerocopysend'
assert chunks[0]['file'].name == f.name
await task
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'http',
'method': 'GET',
'path': '/',
'query_string': b'offset=3',
'headers': [],
'extensions': {'http.response.zerocopysend': {}},
}
task = asyncio.ensure_future(
app(scope, input_queue.get, output_queue.put)
)
message = await output_queue.get()
assert message['type'] == 'http.response.start'
assert message['status'] == 200
assert message['headers'] == []
chunks = []
while True:
message = await output_queue.get()
if not (
message['type'] == 'http.response.body' and
message['body'] == b''
):
chunks.append(message)
if not message.pop('more_body'):
break
assert len(chunks) == 1
assert set(chunks[0]) == {'type', 'file', 'offset'}
assert chunks[0]['type'] == 'http.response.zerocopysend'
assert chunks[0]['file'].name == f.name
assert chunks[0]['offset'] == 3
await task
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
scope = {
'type': 'http',
'method': 'GET',
'path': '/',
'query_string': b'size=3',
'headers': [],
'extensions': {'http.response.zerocopysend': {}},
}
task = asyncio.ensure_future(
app(scope, input_queue.get, output_queue.put)
)
message = await output_queue.get()
assert message['type'] == 'http.response.start'
assert message['status'] == 200
assert message['headers'] == []
chunks = []
while True:
message = await output_queue.get()
if not (
message['type'] == 'http.response.body' and
message['body'] == b''
):
chunks.append(message)
if not message.pop('more_body'):
break
assert len(chunks) == 1
assert set(chunks[0]) == {'type', 'file', 'count'}
assert chunks[0]['type'] == 'http.response.zerocopysend'
assert chunks[0]['file'].name == f.name
assert chunks[0]['count'] == 3
await task
@pytest.mark.asyncio
async def test_asgi_to_jackie_send_file():
with tempfile.NamedTemporaryFile(suffix='.txt') as f:
f.write(b'foobar')
f.flush()
@asgi_to_jackie
async def view(scope, receive, send):
await send({
'type': 'http.response.start',
'status': 200,
'headers': [],
})
query = urllib.parse.parse_qs(scope['query_string'].decode())
message = {
'type': 'http.response.zerocopysend',
'file': open(f.name, 'rb'),
}
if 'offset' in query:
message['offset'] = int(query['offset'][-1])
if 'size' in query:
message['count'] = int(query['size'][-1])
await send(message)
response = await view(Request())
assert await response.body() == b'foobar'
response = await view(Request(query={'offset': 3}))
assert await response.body() == b'bar'
response = await view(Request(query={'size': 3}))
assert await response.body() == b'foo'
| 27.353428
| 79
| 0.572015
| 2,571
| 23,141
| 4.990665
| 0.057176
| 0.046294
| 0.045047
| 0.03702
| 0.858546
| 0.815057
| 0.796586
| 0.758164
| 0.750526
| 0.697529
| 0
| 0.004769
| 0.284128
| 23,141
| 845
| 80
| 27.385799
| 0.769769
| 0
| 0
| 0.727666
| 0
| 0
| 0.138499
| 0.007865
| 0
| 0
| 0
| 0
| 0.12536
| 1
| 0.002882
| false
| 0.005764
| 0.008646
| 0
| 0.01585
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
986df3e9e42537ae2b8e87f144880ed715e30604
| 7,272
|
py
|
Python
|
2018/day_13/python/day13.py
|
josephroquedev/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | null | null | null |
2018/day_13/python/day13.py
|
josephroquedev/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | 2
|
2021-06-02T00:41:38.000Z
|
2021-11-30T10:05:29.000Z
|
2018/day_13/python/day13.py
|
autoreleasefool/advent-of-code
|
bb217deb7a5f5ed5c8c04cb726ddadb5b042ee4d
|
[
"MIT"
] | null | null | null |
from aoc import AOC
aoc = AOC(year=2018, day=13)
data = aoc.load()
path_ids = set(["|", "-"])
curve_ids = set(["\\", "/"])
intersection_ids = set(["+"])
cart_ids = set(["<", ">", "^", "v"])
paths = {}
carts = {}
cart_last_turn = {}
y = 0
next_cart_id = 0
for line in data.lines():
for index, c in enumerate(line):
cell = (index, y)
if c == " ":
paths[cell] = None
elif c in path_ids:
paths[cell] = c
elif c in intersection_ids:
paths[cell] = c
elif c in curve_ids:
paths[cell] = c
elif c in cart_ids:
cart = (index, y)
next_cart_id += 1
if c in ("<", ">"):
paths[cell] = "-"
carts[cart] = (
(-1, 0, next_cart_id) if c == "<" else (1, 0, next_cart_id)
)
else:
paths[cell] = "|"
carts[cart] = (
(0, -1, next_cart_id) if c == "^" else (0, 1, next_cart_id)
)
y += 1
for cart in carts:
cart_last_turn[carts[cart][2]] = -1
tick = 0
crash_position = None
crashes = {}
while not crash_position:
cart_cells = sorted(list(carts.keys()), key=lambda x: (x[1], x[0]))
for cart in cart_cells:
cart_velocity = (carts[cart][0], carts[cart][1])
cart_id = carts[cart][2]
del carts[cart]
cart_position = (cart[0], cart[1])
cart_position_next = (cart[0] + cart_velocity[0], cart[1] + cart_velocity[1])
cart_next_id = (cart_position_next[0], cart_position_next[1])
if cart_position_next in carts:
crash_position = cart_position_next
crashes[crash_position] = True
break
if cart_velocity[0] == -1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (0, 1, cart_id)
else:
carts[cart_next_id] = (0, -1, cart_id)
elif cart_velocity[0] == 1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (0, -1, cart_id)
else:
carts[cart_next_id] = (0, 1, cart_id)
elif cart_velocity[1] == -1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (1, 0, cart_id)
else:
carts[cart_next_id] = (-1, 0, cart_id)
elif cart_velocity[1] == 1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (-1, 0, cart_id)
else:
carts[cart_next_id] = (1, 0, cart_id)
elif paths[cart_position_next] in intersection_ids:
cart_last_turn[cart_id] += 1
if cart_last_turn[cart_id] % 3 == 0:
if cart_velocity[0] == 0:
carts[cart_next_id] = (cart_velocity[1], 0, cart_id)
else:
carts[cart_next_id] = (0, -cart_velocity[0], cart_id)
elif cart_last_turn[cart_id] % 3 == 1:
carts[cart_next_id] = (cart_velocity[0], cart_velocity[1], cart_id)
else:
if cart_velocity[0] == 0:
carts[cart_next_id] = (-cart_velocity[1], 0, cart_id)
else:
carts[cart_next_id] = (0, cart_velocity[0], cart_id)
else:
carts[cart_next_id] = (cart_velocity[0], cart_velocity[1], cart_id)
aoc.p1(crash_position)
## Part 2
path_ids = set(["|", "-"])
curve_ids = set(["\\", "/"])
intersection_ids = set(["+"])
cart_ids = set(["<", ">", "^", "v"])
paths = {}
carts = {}
cart_last_turn = {}
y = 0
next_cart_id = 0
for line in data.lines():
for index, c in enumerate(line):
cell = (index, y)
if c == " ":
paths[cell] = None
elif c in path_ids:
paths[cell] = c
elif c in intersection_ids:
paths[cell] = c
elif c in curve_ids:
paths[cell] = c
elif c in cart_ids:
cart = (index, y)
next_cart_id += 1
if c in ("<", ">"):
paths[cell] = "-"
carts[cart] = (
(-1, 0, next_cart_id) if c == "<" else (1, 0, next_cart_id)
)
else:
paths[cell] = "|"
carts[cart] = (
(0, -1, next_cart_id) if c == "^" else (0, 1, next_cart_id)
)
y += 1
for cart in carts:
cart_last_turn[carts[cart][2]] = -1
tick = 0
crash_position = None
crashes = {}
while len(carts) > 1:
cart_cells = sorted(list(carts.keys()), key=lambda x: (x[1], x[0]))
for cart in cart_cells:
if cart not in carts:
continue
cart_velocity = (carts[cart][0], carts[cart][1])
cart_id = carts[cart][2]
del carts[cart]
cart_position = (cart[0], cart[1])
cart_position_next = (cart[0] + cart_velocity[0], cart[1] + cart_velocity[1])
cart_next_id = (cart_position_next[0], cart_position_next[1])
if cart_position_next in carts:
crash_position = cart_position_next
crashes[crash_position] = True
del carts[cart_position_next]
continue
if cart_velocity[0] == -1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (0, 1, cart_id)
else:
carts[cart_next_id] = (0, -1, cart_id)
elif cart_velocity[0] == 1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (0, -1, cart_id)
else:
carts[cart_next_id] = (0, 1, cart_id)
elif cart_velocity[1] == -1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (1, 0, cart_id)
else:
carts[cart_next_id] = (-1, 0, cart_id)
elif cart_velocity[1] == 1 and paths[cart_position_next] in curve_ids:
if paths[cart_position_next] == "/":
carts[cart_next_id] = (-1, 0, cart_id)
else:
carts[cart_next_id] = (1, 0, cart_id)
elif paths[cart_position_next] in intersection_ids:
cart_last_turn[cart_id] += 1
if cart_last_turn[cart_id] % 3 == 0:
if cart_velocity[0] == 0:
carts[cart_next_id] = (cart_velocity[1], 0, cart_id)
else:
carts[cart_next_id] = (0, -cart_velocity[0], cart_id)
elif cart_last_turn[cart_id] % 3 == 1:
carts[cart_next_id] = (cart_velocity[0], cart_velocity[1], cart_id)
else:
if cart_velocity[0] == 0:
carts[cart_next_id] = (-cart_velocity[1], 0, cart_id)
else:
carts[cart_next_id] = (0, cart_velocity[0], cart_id)
else:
carts[cart_next_id] = (cart_velocity[0], cart_velocity[1], cart_id)
aoc.p2(list(carts.keys())[0])
| 34.628571
| 85
| 0.515677
| 972
| 7,272
| 3.578189
| 0.063786
| 0.082806
| 0.086256
| 0.120759
| 0.951696
| 0.951696
| 0.951696
| 0.951696
| 0.951696
| 0.951696
| 0
| 0.033228
| 0.350248
| 7,272
| 209
| 86
| 34.794258
| 0.702857
| 0.000825
| 0
| 0.923913
| 0
| 0
| 0.005783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005435
| 0
| 0.005435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9891595b8cb437ae7e4c0b8ac7a9a8a2fef65863
| 6,708
|
py
|
Python
|
common/tests/document_parser/test_document_parser.py
|
Wildertrek/gamechanger-data
|
d087044594c722bd373cce1a48293d1a6da5d24e
|
[
"MIT"
] | 18
|
2021-04-20T20:34:01.000Z
|
2021-11-08T10:28:17.000Z
|
common/tests/document_parser/test_document_parser.py
|
Wildertrek/gamechanger-data
|
d087044594c722bd373cce1a48293d1a6da5d24e
|
[
"MIT"
] | 15
|
2021-04-20T20:31:33.000Z
|
2022-03-18T16:00:44.000Z
|
common/tests/document_parser/test_document_parser.py
|
dod-advana/gamechanger-data
|
1cdba2a3dbc1072f5991dcfe1daea6310c8ae42b
|
[
"MIT"
] | 8
|
2021-04-23T11:38:26.000Z
|
2021-11-17T22:42:38.000Z
|
import tempfile
import os
from pathlib import Path
from common.document_parser.cli import pdf_to_json
from common.tests import PACKAGE_OCR_PDF_PATH
import json
import pytest
import shutil
from dev_tools import REPO_PATH
ORIGINAL_TEST_FILES = dict(
ocr_pdf_file=os.path.join(REPO_PATH,
"dev_tools/universal_test_harness/data/crawler_output/2021-01-01T110000/Title 1.pdf"),
ocr_pdf_metadata_file=os.path.join(REPO_PATH,
"dev_tools/universal_test_harness/data/crawler_output/2021-01-01T110000/Title 1.pdf.metadata"),
non_ocr_pdf_file=os.path.join(REPO_PATH,
"dev_tools/universal_test_harness/data/crawler_output/2021-01-01T110000/Title 2.pdf"),
non_ocr_pdf_metadata_file=os.path.join(REPO_PATH,
"dev_tools/universal_test_harness/data/crawler_output/2021-01-01T110000/Title 2.pdf.metadata")
)
EXPECTED_OUTPUT_FILES = dict(
ocr_json_file="Title 1.json",
non_ocr_json_file="Title 2.json",
)
@pytest.fixture(scope='function')
def parsed_doc_output_dir(tmpdir) -> str:
yield str(tmpdir)
@pytest.fixture(scope='function')
def input_dir_with_one_ocr_raw_doc(tmpdir) -> str:
shutil.copy(ORIGINAL_TEST_FILES['ocr_pdf_file'], tmpdir)
shutil.copy(ORIGINAL_TEST_FILES['ocr_pdf_metadata_file'], tmpdir)
yield str(tmpdir)
@pytest.fixture(scope='function')
def input_dir_with_one_non_ocr_raw_doc(tmpdir) -> str:
shutil.copy(ORIGINAL_TEST_FILES['non_ocr_pdf_file'], tmpdir)
shutil.copy(ORIGINAL_TEST_FILES['non_ocr_pdf_metadata_file'], tmpdir)
yield str(tmpdir)
@pytest.fixture(scope='function')
def input_dir_with_ocr_and_non_ocr_raw_doc(tmpdir) -> str:
shutil.copy(ORIGINAL_TEST_FILES['ocr_pdf_file'], tmpdir)
shutil.copy(ORIGINAL_TEST_FILES['ocr_pdf_metadata_file'], tmpdir)
shutil.copy(ORIGINAL_TEST_FILES['non_ocr_pdf_file'], tmpdir)
shutil.copy(ORIGINAL_TEST_FILES['non_ocr_pdf_metadata_file'], tmpdir)
yield str(tmpdir)
def test_single_process_ocr_doc(input_dir_with_one_ocr_raw_doc,
parsed_doc_output_dir):
parser_path = "common.document_parser.parsers.policy_analytics.parse::parse"
verify = True
ocr_missing_doc = True
num_ocr_threads = 2
pdf_to_json(
parser_path=parser_path,
source=ORIGINAL_TEST_FILES["ocr_pdf_file"],
metadata=ORIGINAL_TEST_FILES["ocr_pdf_metadata_file"],
destination=parsed_doc_output_dir,
verify=verify,
ocr_missing_doc=ocr_missing_doc,
num_ocr_threads=num_ocr_threads
)
f_name = EXPECTED_OUTPUT_FILES["ocr_json_file"]
json_fp = f"{parsed_doc_output_dir}/{f_name}"
out_dict = None
with open(json_fp) as f:
out_dict = json.load(f)
assert out_dict is not None
def test_single_process_non_ocr_doc(input_dir_with_one_non_ocr_raw_doc,
parsed_doc_output_dir):
parser_path = "common.document_parser.parsers.policy_analytics.parse::parse"
verify = True
ocr_missing_doc = True
num_ocr_threads = 2
pdf_to_json(
parser_path=parser_path,
source=ORIGINAL_TEST_FILES["non_ocr_pdf_file"],
metadata=ORIGINAL_TEST_FILES["non_ocr_pdf_metadata_file"],
destination=parsed_doc_output_dir,
verify=verify,
ocr_missing_doc=ocr_missing_doc,
num_ocr_threads=num_ocr_threads
)
def _assert_parsed_dir_has_what_i_expect():
f_name = EXPECTED_OUTPUT_FILES["non_ocr_json_file"]
json_fp = f"{parsed_doc_output_dir}/{f_name}"
out_dict = None
with open(json_fp) as f:
out_dict = json.load(f)
return out_dict is not None
assert _assert_parsed_dir_has_what_i_expect()
def test_single_process_non_ocr_doc_no_metadata(input_dir_with_one_non_ocr_raw_doc,
parsed_doc_output_dir):
parser_path = "common.document_parser.parsers.policy_analytics.parse::parse"
verify = True
ocr_missing_doc = True
num_ocr_threads = 2
pdf_to_json(
parser_path=parser_path,
source=ORIGINAL_TEST_FILES["non_ocr_pdf_file"],
destination=parsed_doc_output_dir,
verify=verify,
ocr_missing_doc=ocr_missing_doc,
num_ocr_threads=num_ocr_threads
)
f_name = EXPECTED_OUTPUT_FILES["non_ocr_json_file"]
json_fp = f"{parsed_doc_output_dir}/{f_name}"
out_dict = None
with open(json_fp) as f:
out_dict = json.load(f)
assert out_dict is not None
def test_single_process_mixed_dir(input_dir_with_ocr_and_non_ocr_raw_doc,
parsed_doc_output_dir):
parser_path = "common.document_parser.parsers.policy_analytics.parse::parse"
verify = True
ocr_missing_doc = True
num_ocr_threads = 2
pdf_to_json(
parser_path=parser_path,
source=input_dir_with_ocr_and_non_ocr_raw_doc,
metadata=input_dir_with_ocr_and_non_ocr_raw_doc,
destination=parsed_doc_output_dir,
verify=verify,
ocr_missing_doc=ocr_missing_doc,
num_ocr_threads=num_ocr_threads
)
f_name = EXPECTED_OUTPUT_FILES["non_ocr_json_file"]
json_fp = f"{parsed_doc_output_dir}/{f_name}"
out_dicts = []
for f_name in [EXPECTED_OUTPUT_FILES["non_ocr_json_file"], EXPECTED_OUTPUT_FILES["ocr_json_file"]]:
json_fp = f"{parsed_doc_output_dir}/{f_name}"
out_dict = None
with open(json_fp) as f:
out_dict = json.load(f)
out_dicts.append(out_dict)
assert None not in out_dicts
def test_multiprocess_mixed_dir(input_dir_with_ocr_and_non_ocr_raw_doc,
parsed_doc_output_dir):
parser_path = "common.document_parser.parsers.policy_analytics.parse::parse"
verify = True
ocr_missing_doc = True
num_ocr_threads = 2
pdf_to_json(
parser_path=parser_path,
source=input_dir_with_ocr_and_non_ocr_raw_doc,
metadata=input_dir_with_ocr_and_non_ocr_raw_doc,
destination=parsed_doc_output_dir,
verify=verify,
ocr_missing_doc=ocr_missing_doc,
num_ocr_threads=num_ocr_threads,
multiprocess=0
)
f_name = EXPECTED_OUTPUT_FILES["non_ocr_json_file"]
json_fp = f"{parsed_doc_output_dir}/{f_name}"
out_dicts = []
for f_name in [EXPECTED_OUTPUT_FILES["non_ocr_json_file"], EXPECTED_OUTPUT_FILES["ocr_json_file"]]:
json_fp = f"{parsed_doc_output_dir}/{f_name}"
out_dict = None
with open(json_fp) as f:
out_dict = json.load(f)
out_dicts.append(out_dict)
assert None not in out_dicts
| 34.4
| 137
| 0.70483
| 977
| 6,708
| 4.350051
| 0.096213
| 0.039529
| 0.063529
| 0.076235
| 0.907059
| 0.895765
| 0.894353
| 0.858118
| 0.853412
| 0.852
| 0
| 0.012896
| 0.213924
| 6,708
| 194
| 138
| 34.57732
| 0.793097
| 0
| 0
| 0.708861
| 0
| 0
| 0.194544
| 0.144007
| 0
| 0
| 0
| 0
| 0.037975
| 1
| 0.063291
| false
| 0
| 0.056962
| 0
| 0.126582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f410743ff955f321358463531dd10766e86ff8d
| 42,098
|
py
|
Python
|
sdk/python/pulumi_gcp/monitoring/alert_policy.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/monitoring/alert_policy.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/monitoring/alert_policy.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['AlertPolicyArgs', 'AlertPolicy']
@pulumi.input_type
class AlertPolicyArgs:
def __init__(__self__, *,
combiner: pulumi.Input[str],
conditions: pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]],
display_name: pulumi.Input[str],
documentation: Optional[pulumi.Input['AlertPolicyDocumentationArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a AlertPolicy resource.
:param pulumi.Input[str] combiner: How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
:param pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]] conditions: A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
:param pulumi.Input['AlertPolicyDocumentationArgs'] documentation: Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
pulumi.set(__self__, "combiner", combiner)
pulumi.set(__self__, "conditions", conditions)
pulumi.set(__self__, "display_name", display_name)
if documentation is not None:
pulumi.set(__self__, "documentation", documentation)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if notification_channels is not None:
pulumi.set(__self__, "notification_channels", notification_channels)
if project is not None:
pulumi.set(__self__, "project", project)
if user_labels is not None:
pulumi.set(__self__, "user_labels", user_labels)
@property
@pulumi.getter
def combiner(self) -> pulumi.Input[str]:
"""
How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
"""
return pulumi.get(self, "combiner")
@combiner.setter
def combiner(self, value: pulumi.Input[str]):
pulumi.set(self, "combiner", value)
@property
@pulumi.getter
def conditions(self) -> pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]:
"""
A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def documentation(self) -> Optional[pulumi.Input['AlertPolicyDocumentationArgs']]:
"""
Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
"""
return pulumi.get(self, "documentation")
@documentation.setter
def documentation(self, value: Optional[pulumi.Input['AlertPolicyDocumentationArgs']]):
pulumi.set(self, "documentation", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not the policy is enabled. The default is true.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="notificationChannels")
def notification_channels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
"""
return pulumi.get(self, "notification_channels")
@notification_channels.setter
def notification_channels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_channels", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
return pulumi.get(self, "user_labels")
@user_labels.setter
def user_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "user_labels", value)
@pulumi.input_type
class _AlertPolicyState:
def __init__(__self__, *,
combiner: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]] = None,
creation_records: Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input['AlertPolicyDocumentationArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering AlertPolicy resources.
:param pulumi.Input[str] combiner: How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
:param pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]] conditions: A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]] creation_records: A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be
ignored.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
:param pulumi.Input['AlertPolicyDocumentationArgs'] documentation: Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true.
:param pulumi.Input[str] name: -
The unique resource name for this condition.
Its syntax is:
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
[CONDITION_ID] is assigned by Stackdriver Monitoring when
the condition is created as part of a new or updated alerting
policy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
if combiner is not None:
pulumi.set(__self__, "combiner", combiner)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if creation_records is not None:
pulumi.set(__self__, "creation_records", creation_records)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if documentation is not None:
pulumi.set(__self__, "documentation", documentation)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_channels is not None:
pulumi.set(__self__, "notification_channels", notification_channels)
if project is not None:
pulumi.set(__self__, "project", project)
if user_labels is not None:
pulumi.set(__self__, "user_labels", user_labels)
@property
@pulumi.getter
def combiner(self) -> Optional[pulumi.Input[str]]:
"""
How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
"""
return pulumi.get(self, "combiner")
@combiner.setter
def combiner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "combiner", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]]:
"""
A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="creationRecords")
def creation_records(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]]:
"""
A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be
ignored.
"""
return pulumi.get(self, "creation_records")
@creation_records.setter
def creation_records(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]]):
pulumi.set(self, "creation_records", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def documentation(self) -> Optional[pulumi.Input['AlertPolicyDocumentationArgs']]:
"""
Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
"""
return pulumi.get(self, "documentation")
@documentation.setter
def documentation(self, value: Optional[pulumi.Input['AlertPolicyDocumentationArgs']]):
pulumi.set(self, "documentation", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not the policy is enabled. The default is true.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
-
The unique resource name for this condition.
Its syntax is:
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
[CONDITION_ID] is assigned by Stackdriver Monitoring when
the condition is created as part of a new or updated alerting
policy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationChannels")
def notification_channels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
"""
return pulumi.get(self, "notification_channels")
@notification_channels.setter
def notification_channels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_channels", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
return pulumi.get(self, "user_labels")
@user_labels.setter
def user_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "user_labels", value)
class AlertPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
combiner: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
A description of the conditions under which some aspect of your system is
considered to be "unhealthy" and the ways to notify people or services
about this state.
To get more information about AlertPolicy, see:
* [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies)
* How-to Guides
* [Official Documentation](https://cloud.google.com/monitoring/alerts/)
## Example Usage
### Monitoring Alert Policy Basic
```python
import pulumi
import pulumi_gcp as gcp
alert_policy = gcp.monitoring.AlertPolicy("alertPolicy",
combiner="OR",
conditions=[gcp.monitoring.AlertPolicyConditionArgs(
condition_threshold=gcp.monitoring.AlertPolicyConditionConditionThresholdArgs(
aggregations=[gcp.monitoring.AlertPolicyConditionConditionThresholdAggregationArgs(
alignment_period="60s",
per_series_aligner="ALIGN_RATE",
)],
comparison="COMPARISON_GT",
duration="60s",
filter="metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
),
display_name="test condition",
)],
display_name="My Alert Policy",
user_labels={
"foo": "bar",
})
```
## Import
AlertPolicy can be imported using any of these accepted formats
```sh
$ pulumi import gcp:monitoring/alertPolicy:AlertPolicy default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] combiner: How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]] conditions: A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
:param pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']] documentation: Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AlertPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A description of the conditions under which some aspect of your system is
considered to be "unhealthy" and the ways to notify people or services
about this state.
To get more information about AlertPolicy, see:
* [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies)
* How-to Guides
* [Official Documentation](https://cloud.google.com/monitoring/alerts/)
## Example Usage
### Monitoring Alert Policy Basic
```python
import pulumi
import pulumi_gcp as gcp
alert_policy = gcp.monitoring.AlertPolicy("alertPolicy",
combiner="OR",
conditions=[gcp.monitoring.AlertPolicyConditionArgs(
condition_threshold=gcp.monitoring.AlertPolicyConditionConditionThresholdArgs(
aggregations=[gcp.monitoring.AlertPolicyConditionConditionThresholdAggregationArgs(
alignment_period="60s",
per_series_aligner="ALIGN_RATE",
)],
comparison="COMPARISON_GT",
duration="60s",
filter="metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
),
display_name="test condition",
)],
display_name="My Alert Policy",
user_labels={
"foo": "bar",
})
```
## Import
AlertPolicy can be imported using any of these accepted formats
```sh
$ pulumi import gcp:monitoring/alertPolicy:AlertPolicy default {{name}}
```
:param str resource_name: The name of the resource.
:param AlertPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AlertPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
combiner: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AlertPolicyArgs.__new__(AlertPolicyArgs)
if combiner is None and not opts.urn:
raise TypeError("Missing required property 'combiner'")
__props__.__dict__["combiner"] = combiner
if conditions is None and not opts.urn:
raise TypeError("Missing required property 'conditions'")
__props__.__dict__["conditions"] = conditions
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["documentation"] = documentation
__props__.__dict__["enabled"] = enabled
__props__.__dict__["notification_channels"] = notification_channels
__props__.__dict__["project"] = project
__props__.__dict__["user_labels"] = user_labels
__props__.__dict__["creation_records"] = None
__props__.__dict__["name"] = None
super(AlertPolicy, __self__).__init__(
'gcp:monitoring/alertPolicy:AlertPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
combiner: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]] = None,
creation_records: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyCreationRecordArgs']]]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'AlertPolicy':
"""
Get an existing AlertPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] combiner: How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]] conditions: A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyCreationRecordArgs']]]] creation_records: A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be
ignored.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
:param pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']] documentation: Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true.
:param pulumi.Input[str] name: -
The unique resource name for this condition.
Its syntax is:
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
[CONDITION_ID] is assigned by Stackdriver Monitoring when
the condition is created as part of a new or updated alerting
policy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AlertPolicyState.__new__(_AlertPolicyState)
__props__.__dict__["combiner"] = combiner
__props__.__dict__["conditions"] = conditions
__props__.__dict__["creation_records"] = creation_records
__props__.__dict__["display_name"] = display_name
__props__.__dict__["documentation"] = documentation
__props__.__dict__["enabled"] = enabled
__props__.__dict__["name"] = name
__props__.__dict__["notification_channels"] = notification_channels
__props__.__dict__["project"] = project
__props__.__dict__["user_labels"] = user_labels
return AlertPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def combiner(self) -> pulumi.Output[str]:
"""
How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
"""
return pulumi.get(self, "combiner")
@property
@pulumi.getter
def conditions(self) -> pulumi.Output[Sequence['outputs.AlertPolicyCondition']]:
"""
A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter(name="creationRecords")
def creation_records(self) -> pulumi.Output[Sequence['outputs.AlertPolicyCreationRecord']]:
"""
A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be
ignored.
"""
return pulumi.get(self, "creation_records")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def documentation(self) -> pulumi.Output[Optional['outputs.AlertPolicyDocumentation']]:
"""
Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
"""
return pulumi.get(self, "documentation")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not the policy is enabled. The default is true.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
-
The unique resource name for this condition.
Its syntax is:
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
[CONDITION_ID] is assigned by Stackdriver Monitoring when
the condition is created as part of a new or updated alerting
policy.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationChannels")
def notification_channels(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
"""
return pulumi.get(self, "notification_channels")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
return pulumi.get(self, "user_labels")
| 50.477218
| 241
| 0.658962
| 4,839
| 42,098
| 5.615003
| 0.069436
| 0.066799
| 0.037614
| 0.029443
| 0.920872
| 0.904678
| 0.893747
| 0.882485
| 0.87748
| 0.869088
| 0
| 0.001997
| 0.26234
| 42,098
| 833
| 242
| 50.537815
| 0.872963
| 0.520215
| 0
| 0.710366
| 1
| 0
| 0.124621
| 0.053723
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161585
| false
| 0.003049
| 0.021341
| 0
| 0.280488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7f9da29f035236a61918b7e5b829a8bcdf6568d9
| 28,344
|
py
|
Python
|
tests/test_packages/test_skills_integration/test_tac.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_packages/test_skills_integration/test_tac.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_packages/test_skills_integration/test_tac.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the integration test for the tac skills."""
import datetime
import json
import uuid
from random import uniform
import pytest
from aea.test_tools.test_cases import AEATestCaseMany
from packages.fetchai.connections.p2p_libp2p.connection import LIBP2P_SUCCESS_MESSAGE
from tests.conftest import (
ETHEREUM,
ETHEREUM_PRIVATE_KEY_FILE,
FETCHAI,
FETCHAI_PRIVATE_KEY_FILE,
FETCHAI_PRIVATE_KEY_FILE_CONNECTION,
FUNDED_ETH_PRIVATE_KEY_1,
FUNDED_ETH_PRIVATE_KEY_2,
FUNDED_ETH_PRIVATE_KEY_3,
MAX_FLAKY_RERUNS_ETH,
MAX_FLAKY_RERUNS_INTEGRATION,
NON_FUNDED_FETCHAI_PRIVATE_KEY_1,
NON_GENESIS_CONFIG,
NON_GENESIS_CONFIG_TWO,
UseGanache,
)
MAX_FLAKY_RERUNS_ETH -= 1
class TestTacSkills(AEATestCaseMany):
"""Test that tac skills work."""
capture_log = True
@pytest.mark.integration
@pytest.mark.flaky(
reruns=MAX_FLAKY_RERUNS_INTEGRATION
) # cause possible network issues
def test_tac(self):
"""Run the tac skills sequence."""
tac_aea_one = "tac_participant_one"
tac_aea_two = "tac_participant_two"
tac_controller_name = "tac_controller"
# create tac controller, agent one and agent two
self.create_agents(
tac_aea_one, tac_aea_two, tac_controller_name,
)
default_routing = {
"fetchai/oef_search:0.12.0": "fetchai/soef:0.15.0",
}
# generate random location
location = {
"latitude": round(uniform(-90, 90), 2), # nosec
"longitude": round(uniform(-180, 180), 2), # nosec
}
# tac name
tac_id = uuid.uuid4().hex
# prepare tac controller for test
self.set_agent_context(tac_controller_name)
self.add_item("connection", "fetchai/p2p_libp2p:0.14.0")
self.set_config("agent.default_connection", "fetchai/p2p_libp2p:0.14.0")
self.add_item("connection", "fetchai/soef:0.15.0")
self.remove_item("connection", "fetchai/stub:0.14.0")
self.add_item("skill", "fetchai/tac_control:0.14.0")
self.set_config("agent.default_ledger", FETCHAI)
setting_path = "agent.default_routing"
self.nested_set_config(setting_path, default_routing)
self.run_install()
diff = self.difference_to_fetched_agent(
"fetchai/tac_controller:0.17.0", tac_controller_name
)
assert (
diff == []
), "Difference between created and fetched project for files={}".format(diff)
# add keys
self.generate_private_key(FETCHAI)
self.generate_private_key(FETCHAI, FETCHAI_PRIVATE_KEY_FILE_CONNECTION)
self.add_private_key(FETCHAI, FETCHAI_PRIVATE_KEY_FILE)
self.add_private_key(
FETCHAI, FETCHAI_PRIVATE_KEY_FILE_CONNECTION, connection=True
)
self.replace_private_key_in_file(
NON_FUNDED_FETCHAI_PRIVATE_KEY_1, FETCHAI_PRIVATE_KEY_FILE_CONNECTION
)
setting_path = "vendor.fetchai.connections.p2p_libp2p.config.ledger_id"
self.set_config(setting_path, FETCHAI)
# replace location
setting_path = (
"vendor.fetchai.skills.tac_control.models.parameters.args.location"
)
self.nested_set_config(setting_path, location)
# set tac id
data = {"key": "tac", "value": tac_id}
setting_path = (
"vendor.fetchai.skills.tac_control.models.parameters.args.service_data"
)
self.nested_set_config(setting_path, data)
default_routing = {
"fetchai/ledger_api:0.9.0": "fetchai/ledger:0.12.0",
"fetchai/oef_search:0.12.0": "fetchai/soef:0.15.0",
}
# prepare agents for test
for agent_name, config in (
(tac_aea_one, NON_GENESIS_CONFIG),
(tac_aea_two, NON_GENESIS_CONFIG_TWO),
):
self.set_agent_context(agent_name)
self.add_item("connection", "fetchai/p2p_libp2p:0.14.0")
self.set_config("agent.default_connection", "fetchai/p2p_libp2p:0.14.0")
self.add_item("connection", "fetchai/soef:0.15.0")
self.add_item("connection", "fetchai/ledger:0.12.0")
self.remove_item("connection", "fetchai/stub:0.14.0")
self.add_item("skill", "fetchai/tac_participation:0.15.0")
self.add_item("skill", "fetchai/tac_negotiation:0.17.0")
self.set_config("agent.default_ledger", FETCHAI)
setting_path = "agent.default_routing"
self.nested_set_config(setting_path, default_routing)
self.run_install()
diff = self.difference_to_fetched_agent(
"fetchai/tac_participant:0.19.0", agent_name
)
assert (
diff == []
), "Difference between created and fetched project for files={}".format(
diff
)
# add keys
self.generate_private_key(FETCHAI)
self.generate_private_key(FETCHAI, FETCHAI_PRIVATE_KEY_FILE_CONNECTION)
self.add_private_key(FETCHAI, FETCHAI_PRIVATE_KEY_FILE)
self.add_private_key(
FETCHAI, FETCHAI_PRIVATE_KEY_FILE_CONNECTION, connection=True
)
# set p2p configs
setting_path = "vendor.fetchai.connections.p2p_libp2p.config"
self.nested_set_config(setting_path, config)
# replace location
setting_path = (
"vendor.fetchai.skills.tac_participation.models.game.args.location"
)
self.nested_set_config(setting_path, location)
# set tac id
data = {
"search_key": "tac",
"search_value": tac_id,
"constraint_type": "==",
}
setting_path = (
"vendor.fetchai.skills.tac_participation.models.game.args.search_query"
)
self.nested_set_config(setting_path, data)
# run tac controller
self.set_agent_context(tac_controller_name)
now = datetime.datetime.now().strftime("%d %m %Y %H:%M")
now_min = datetime.datetime.strptime(now, "%d %m %Y %H:%M")
fut = now_min + datetime.timedelta(0, 60)
start_time = fut.strftime("%d %m %Y %H:%M")
setting_path = "vendor.fetchai.skills.tac_control.models.parameters.args.registration_start_time"
self.set_config(setting_path, start_time)
self.run_cli_command("build", cwd=self._get_cwd())
self.run_cli_command("issue-certificates", cwd=self._get_cwd())
tac_controller_process = self.run_agent()
check_strings = (
"Starting libp2p node...",
"Connecting to libp2p node...",
"Successfully connected to libp2p node!",
LIBP2P_SUCCESS_MESSAGE,
)
missing_strings = self.missing_from_output(
tac_controller_process, check_strings, timeout=30, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_controller output.".format(missing_strings)
# run two agents (participants)
self.set_agent_context(tac_aea_one)
self.run_cli_command("build", cwd=self._get_cwd())
self.run_cli_command("issue-certificates", cwd=self._get_cwd())
tac_aea_one_process = self.run_agent()
self.set_agent_context(tac_aea_two)
self.run_cli_command("build", cwd=self._get_cwd())
self.run_cli_command("issue-certificates", cwd=self._get_cwd())
tac_aea_two_process = self.run_agent()
check_strings = (
"Starting libp2p node...",
"Connecting to libp2p node...",
"Successfully connected to libp2p node!",
LIBP2P_SUCCESS_MESSAGE,
)
missing_strings = self.missing_from_output(
tac_aea_one_process, check_strings, timeout=30, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_aea_one output.".format(missing_strings)
check_strings = (
"Starting libp2p node...",
"Connecting to libp2p node...",
"Successfully connected to libp2p node!",
LIBP2P_SUCCESS_MESSAGE,
)
missing_strings = self.missing_from_output(
tac_aea_two_process, check_strings, timeout=30, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_aea_two output.".format(missing_strings)
check_strings = (
"registering agent on SOEF.",
"registering TAC data model on SOEF.",
"TAC open for registration until:",
"agent registered: 'tac_participant_one'",
"agent registered: 'tac_participant_two'",
"started competition:",
"unregistering TAC data model from SOEF.",
"handling valid transaction:",
"Current good & money allocation & score: ",
)
missing_strings = self.missing_from_output(
tac_controller_process, check_strings, timeout=240, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_controller output.".format(missing_strings)
check_strings = (
"searching for TAC, search_id=",
"found the TAC controller. Registering...",
"received start event from the controller. Starting to compete...",
"registering agent on SOEF.",
"searching for sellers, search_id=",
"searching for buyers, search_id=",
"found potential sellers agents=",
"received cfp from",
"received decline from",
"received propose from",
"received accept from",
"received match_accept_w_inform from",
"sending CFP to agent=",
"sending propose to",
"sending accept to",
"requesting signature, sending sign_message to decision_maker, message=",
"received signed_message from decision_maker, message=",
"sending transaction to controller, tx=",
"received transaction confirmation from the controller:",
"Applying state update!",
"found potential buyers agents=",
"sending CFP to agent=",
)
missing_strings = self.missing_from_output(
tac_aea_one_process, check_strings, timeout=240, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_aea_one output.".format(missing_strings)
# Note, we do not need to check std output of the other participant as it is implied
self.terminate_agents(
tac_controller_process, tac_aea_one_process, tac_aea_two_process
)
assert (
self.is_successfully_terminated()
), "Agents weren't successfully terminated."
class TestTacSkillsContract(AEATestCaseMany, UseGanache):
"""Test that tac skills work."""
capture_log = True
@pytest.mark.integration
@pytest.mark.ledger
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS_ETH) # cause possible network issues
def test_tac(self):
"""Run the tac skills sequence."""
tac_aea_one = "tac_participant_one"
tac_aea_two = "tac_participant_two"
tac_controller_name = "tac_controller_contract"
# create tac controller, agent one and agent two
self.create_agents(
tac_aea_one, tac_aea_two, tac_controller_name,
)
default_routing = {
"fetchai/contract_api:0.10.0": "fetchai/ledger:0.12.0",
"fetchai/ledger_api:0.9.0": "fetchai/ledger:0.12.0",
"fetchai/oef_search:0.12.0": "fetchai/soef:0.15.0",
}
# generate random location
location = {
"latitude": round(uniform(-90, 90), 2), # nosec
"longitude": round(uniform(-180, 180), 2), # nosec
}
# tac name
tac_id = uuid.uuid4().hex
# prepare tac controller for test
self.set_agent_context(tac_controller_name)
self.add_item("connection", "fetchai/p2p_libp2p:0.14.0")
self.set_config("agent.default_connection", "fetchai/p2p_libp2p:0.14.0")
self.add_item("connection", "fetchai/soef:0.15.0")
self.add_item("connection", "fetchai/ledger:0.12.0")
self.remove_item("connection", "fetchai/stub:0.14.0")
self.add_item("skill", "fetchai/tac_control_contract:0.16.0")
self.set_config("agent.default_ledger", ETHEREUM)
setting_path = "agent.default_routing"
self.nested_set_config(setting_path, default_routing)
self.run_install()
diff = self.difference_to_fetched_agent(
"fetchai/tac_controller_contract:0.19.0", tac_controller_name
)
assert (
diff == []
), "Difference between created and fetched project for files={}".format(diff)
# add keys
self.generate_private_key(ETHEREUM)
self.generate_private_key(FETCHAI, FETCHAI_PRIVATE_KEY_FILE_CONNECTION)
self.add_private_key(ETHEREUM, ETHEREUM_PRIVATE_KEY_FILE)
self.add_private_key(
FETCHAI, FETCHAI_PRIVATE_KEY_FILE_CONNECTION, connection=True
)
self.replace_private_key_in_file(
FUNDED_ETH_PRIVATE_KEY_1, ETHEREUM_PRIVATE_KEY_FILE
)
self.replace_private_key_in_file(
NON_FUNDED_FETCHAI_PRIVATE_KEY_1, FETCHAI_PRIVATE_KEY_FILE_CONNECTION
)
setting_path = "vendor.fetchai.connections.p2p_libp2p.cert_requests"
settings = json.dumps(
[
{
"identifier": "acn",
"ledger_id": ETHEREUM,
"not_after": "2022-01-01",
"not_before": "2021-01-01",
"public_key": FETCHAI,
"save_path": ".certs/conn_cert.txt",
}
]
)
self.set_config(setting_path, settings, type_="list")
setting_path = "vendor.fetchai.connections.soef.config.chain_identifier"
self.set_config(setting_path, ETHEREUM)
setting_path = "vendor.fetchai.skills.tac_control.is_abstract"
self.set_config(setting_path, True, "bool")
# replace location
setting_path = (
"vendor.fetchai.skills.tac_control_contract.models.parameters.args.location"
)
self.nested_set_config(setting_path, location)
# set tac id
data = {"key": "tac", "value": tac_id}
setting_path = "vendor.fetchai.skills.tac_control_contract.models.parameters.args.service_data"
self.nested_set_config(setting_path, data)
default_routing = {
"fetchai/contract_api:0.10.0": "fetchai/ledger:0.12.0",
"fetchai/ledger_api:0.9.0": "fetchai/ledger:0.12.0",
"fetchai/oef_search:0.12.0": "fetchai/soef:0.15.0",
}
# prepare agents for test
for agent_name, config, private_key in (
(tac_aea_one, NON_GENESIS_CONFIG, FUNDED_ETH_PRIVATE_KEY_2),
(tac_aea_two, NON_GENESIS_CONFIG_TWO, FUNDED_ETH_PRIVATE_KEY_3),
):
self.set_agent_context(agent_name)
self.add_item("connection", "fetchai/p2p_libp2p:0.14.0")
self.set_config("agent.default_connection", "fetchai/p2p_libp2p:0.14.0")
self.add_item("connection", "fetchai/soef:0.15.0")
self.add_item("connection", "fetchai/ledger:0.12.0")
self.remove_item("connection", "fetchai/stub:0.14.0")
self.add_item("skill", "fetchai/tac_participation:0.15.0")
self.add_item("skill", "fetchai/tac_negotiation:0.17.0")
self.set_config("agent.default_ledger", ETHEREUM)
setting_path = "agent.default_routing"
self.nested_set_config(setting_path, default_routing)
self.set_config(
"vendor.fetchai.skills.tac_participation.models.game.args.is_using_contract",
True,
"bool",
)
self.set_config(
"vendor.fetchai.skills.tac_negotiation.models.strategy.args.is_contract_tx",
True,
"bool",
)
self.run_install()
diff = self.difference_to_fetched_agent(
"fetchai/tac_participant_contract:0.9.0", agent_name
)
assert (
diff == []
), "Difference between created and fetched project for files={}".format(
diff
)
# add keys
self.generate_private_key(ETHEREUM)
self.generate_private_key(FETCHAI, FETCHAI_PRIVATE_KEY_FILE_CONNECTION)
self.add_private_key(ETHEREUM, ETHEREUM_PRIVATE_KEY_FILE)
self.add_private_key(
FETCHAI, FETCHAI_PRIVATE_KEY_FILE_CONNECTION, connection=True
)
self.replace_private_key_in_file(private_key, ETHEREUM_PRIVATE_KEY_FILE)
# set p2p configs
setting_path = "vendor.fetchai.connections.p2p_libp2p.config"
self.nested_set_config(setting_path, config)
setting_path = "vendor.fetchai.connections.p2p_libp2p.cert_requests"
settings = json.dumps(
[
{
"identifier": "acn",
"ledger_id": ETHEREUM,
"not_after": "2022-01-01",
"not_before": "2021-01-01",
"public_key": FETCHAI,
"save_path": ".certs/conn_cert.txt",
}
]
)
self.set_config(setting_path, settings, type_="list")
# replace location
setting_path = (
"vendor.fetchai.skills.tac_participation.models.game.args.location"
)
self.nested_set_config(setting_path, location)
# set tac id
data = {
"search_key": "tac",
"search_value": tac_id,
"constraint_type": "==",
}
setting_path = (
"vendor.fetchai.skills.tac_participation.models.game.args.search_query"
)
self.nested_set_config(setting_path, data)
setting_path = "vendor.fetchai.connections.soef.config.chain_identifier"
self.set_config(setting_path, ETHEREUM)
# run tac controller
self.set_agent_context(tac_controller_name)
self.run_cli_command("build", cwd=self._get_cwd())
self.run_cli_command("issue-certificates", cwd=self._get_cwd())
now = datetime.datetime.now().strftime("%d %m %Y %H:%M")
now_min = datetime.datetime.strptime(now, "%d %m %Y %H:%M")
fut = now_min + datetime.timedelta(
0, 120
) # we provide 2 minutes time for contract deployment
start_time = fut.strftime("%d %m %Y %H:%M")
setting_path = "vendor.fetchai.skills.tac_control_contract.models.parameters.args.registration_start_time"
self.set_config(setting_path, start_time)
tac_controller_process = self.run_agent()
check_strings = (
"Starting libp2p node...",
"Connecting to libp2p node...",
"Successfully connected to libp2p node!",
LIBP2P_SUCCESS_MESSAGE,
"registering agent on SOEF.",
"requesting contract deployment transaction...",
"Start processing messages...",
"received raw transaction=",
"transaction signing was successful.",
"sending transaction to ledger.",
"transaction was successfully submitted. Transaction digest=",
"requesting transaction receipt.",
"transaction was successfully settled. Transaction receipt=",
"contract deployed.",
"registering TAC data model on SOEF.",
"TAC open for registration until:",
)
missing_strings = self.missing_from_output(
tac_controller_process, check_strings, timeout=180, is_terminating=False
) # we need to wait sufficiently long (at least 2 minutes - see above for deployment)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_controller output.".format(missing_strings)
# run two agents (participants)
self.set_agent_context(tac_aea_one)
self.run_cli_command("build", cwd=self._get_cwd())
self.run_cli_command("issue-certificates", cwd=self._get_cwd())
tac_aea_one_process = self.run_agent()
self.set_agent_context(tac_aea_two)
self.run_cli_command("build", cwd=self._get_cwd())
self.run_cli_command("issue-certificates", cwd=self._get_cwd())
tac_aea_two_process = self.run_agent()
check_strings = (
"Starting libp2p node...",
"Connecting to libp2p node...",
"Successfully connected to libp2p node!",
LIBP2P_SUCCESS_MESSAGE,
"Start processing messages...",
"searching for TAC, search_id=",
)
missing_strings = self.missing_from_output(
tac_aea_one_process, check_strings, timeout=30, is_terminating=False
)
check_strings = ("found the TAC controller. Registering...",)
missing_strings = self.missing_from_output(
tac_aea_one_process, check_strings, timeout=60, is_terminating=False
) # we need to wait sufficiently long (at least 1 minutes - for registration)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_aea_one output.".format(missing_strings)
check_strings = (
"Starting libp2p node...",
"Connecting to libp2p node...",
"Successfully connected to libp2p node!",
LIBP2P_SUCCESS_MESSAGE,
"Start processing messages...",
"searching for TAC, search_id=",
"found the TAC controller. Registering...",
)
missing_strings = self.missing_from_output(
tac_aea_two_process, check_strings, timeout=30, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_aea_two output.".format(missing_strings)
check_strings = (
"agent registered:",
"closing registration!",
"unregistering TAC data model from SOEF.",
"requesting create items transaction...",
"received raw transaction=",
"proposing the transaction to the decision maker. Waiting for confirmation ...",
"transaction signing was successful.",
"transaction was successfully submitted. Transaction digest=",
"requesting transaction receipt.",
"transaction was successfully settled. Transaction receipt=",
"tokens created.",
"requesting mint_items transactions for agent=",
"tokens minted.",
"requesting mint_items transactions for agent=",
"tokens minted.",
"all tokens minted.",
"started competition:",
)
missing_strings = self.missing_from_output(
tac_controller_process, check_strings, timeout=240, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_controller output.".format(missing_strings)
check_strings = (
"received start event from the controller. Starting to compete...",
"received a contract address:",
"registering agent on SOEF.",
"searching for sellers, search_id=",
"searching for buyers, search_id=",
"found potential sellers agents=",
"found potential buyers agents=",
"sending CFP to agent=",
"received cfp from",
"received propose from",
"received decline from",
"received accept from",
"received match_accept_w_inform from",
"sending propose to",
"sending accept to",
"requesting batch transaction hash, sending get_raw_message to fetchai/erc1155:0.14.0, message=",
"requesting batch atomic swap transaction, sending get_raw_transaction to fetchai/erc1155:0.14.0, message=",
"received raw transaction=",
"received raw message=",
"proposing the transaction to the decision maker. Waiting for confirmation ...",
"proposing the message to the decision maker. Waiting for confirmation ...",
"received signed_message from decision_maker, message=",
"received signed_transaction from decision_maker, message=",
"sending send_signed_transaction to ledger ethereum, message=",
)
missing_strings = self.missing_from_output(
tac_aea_one_process, check_strings, timeout=300, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_aea_one output.".format(missing_strings)
check_strings = (
"received start event from the controller. Starting to compete...",
"received a contract address:",
"registering agent on SOEF.",
"searching for sellers, search_id=",
"searching for buyers, search_id=",
"found potential sellers agents=",
"found potential buyers agents=",
"sending CFP to agent=",
"received cfp from",
"received propose from",
"received decline from",
"received accept from",
"received match_accept_w_inform from",
"sending propose to",
"sending accept to",
"requesting batch transaction hash, sending get_raw_message to fetchai/erc1155:0.14.0, message=",
"requesting batch atomic swap transaction, sending get_raw_transaction to fetchai/erc1155:0.14.0, message=",
"received raw transaction=",
"received raw message=",
"proposing the transaction to the decision maker. Waiting for confirmation ...",
"proposing the message to the decision maker. Waiting for confirmation ...",
"received signed_message from decision_maker, message=",
"received signed_transaction from decision_maker, message=",
"sending send_signed_transaction to ledger ethereum, message=",
)
missing_strings = self.missing_from_output(
tac_aea_two_process, check_strings, timeout=360, is_terminating=False
)
assert (
missing_strings == []
), "Strings {} didn't appear in tac_aea_two output.".format(missing_strings)
# Note, we do not need to check std output of the other participant as it is implied
self.terminate_agents(
tac_controller_process, tac_aea_one_process, tac_aea_two_process
)
assert (
self.is_successfully_terminated()
), "Agents weren't successfully terminated."
| 41.560117
| 120
| 0.6105
| 3,166
| 28,344
| 5.202464
| 0.111497
| 0.030356
| 0.023739
| 0.026714
| 0.88501
| 0.864125
| 0.858479
| 0.837836
| 0.825512
| 0.820533
| 0
| 0.019417
| 0.287715
| 28,344
| 681
| 121
| 41.621145
| 0.796424
| 0.068657
| 0
| 0.727758
| 0
| 0.003559
| 0.344999
| 0.099901
| 0
| 0
| 0
| 0
| 0.030249
| 1
| 0.003559
| false
| 0
| 0.014235
| 0
| 0.024911
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.