hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
135152c9dd9da9ede2d7290397493e2ab1259931
| 3,573
|
bzl
|
Python
|
tensorflow/core/platform/build_config.bzl
|
hugosjoberg/tensorflow
|
c91c02353d9c6c1b0c851b10e29beb9be23a7597
|
[
"Apache-2.0"
] | 1
|
2020-09-22T16:29:56.000Z
|
2020-09-22T16:29:56.000Z
|
tensorflow/core/platform/build_config.bzl
|
ramoslin/tensorflow
|
c91c02353d9c6c1b0c851b10e29beb9be23a7597
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/core/platform/build_config.bzl
|
ramoslin/tensorflow
|
c91c02353d9c6c1b0c851b10e29beb9be23a7597
|
[
"Apache-2.0"
] | null | null | null |
"""Provides a redirection point for platform specific implementations of starlark utilities."""
load(
"//tensorflow/core/platform:default/build_config.bzl",
_pyx_library = "pyx_library",
_tf_additional_all_protos = "tf_additional_all_protos",
_tf_additional_binary_deps = "tf_additional_binary_deps",
_tf_additional_core_deps = "tf_additional_core_deps",
_tf_additional_cupti_test_flags = "tf_additional_cupti_test_flags",
_tf_additional_cupti_utils_cuda_deps = "tf_additional_cupti_utils_cuda_deps",
_tf_additional_device_tracer_srcs = "tf_additional_device_tracer_srcs",
_tf_additional_env_hdrs = "tf_additional_env_hdrs",
_tf_additional_lib_deps = "tf_additional_lib_deps",
_tf_additional_lib_hdrs = "tf_additional_lib_hdrs",
_tf_additional_monitoring_hdrs = "tf_additional_monitoring_hdrs",
_tf_additional_proto_hdrs = "tf_additional_proto_hdrs",
_tf_additional_rpc_deps = "tf_additional_rpc_deps",
_tf_additional_tensor_coding_deps = "tf_additional_tensor_coding_deps",
_tf_additional_test_deps = "tf_additional_test_deps",
_tf_additional_test_srcs = "tf_additional_test_srcs",
_tf_fingerprint_deps = "tf_fingerprint_deps",
_tf_jspb_proto_library = "tf_jspb_proto_library",
_tf_kernel_tests_linkstatic = "tf_kernel_tests_linkstatic",
_tf_lib_proto_parsing_deps = "tf_lib_proto_parsing_deps",
_tf_proto_library = "tf_proto_library",
_tf_proto_library_cc = "tf_proto_library_cc",
_tf_proto_library_py = "tf_proto_library_py",
_tf_protobuf_compiler_deps = "tf_protobuf_compiler_deps",
_tf_protobuf_deps = "tf_protobuf_deps",
_tf_protos_all = "tf_protos_all",
_tf_protos_all_impl = "tf_protos_all_impl",
_tf_protos_grappler = "tf_protos_grappler",
_tf_protos_grappler_impl = "tf_protos_grappler_impl",
_tf_protos_profiler_impl = "tf_protos_profiler_impl",
_tf_py_clif_cc = "tf_py_clif_cc",
_tf_pyclif_proto_library = "tf_pyclif_proto_library",
)
pyx_library = _pyx_library
tf_additional_all_protos = _tf_additional_all_protos
tf_additional_binary_deps = _tf_additional_binary_deps
tf_additional_core_deps = _tf_additional_core_deps
tf_additional_cupti_test_flags = _tf_additional_cupti_test_flags
tf_additional_cupti_utils_cuda_deps = _tf_additional_cupti_utils_cuda_deps
tf_additional_device_tracer_srcs = _tf_additional_device_tracer_srcs
tf_additional_env_hdrs = _tf_additional_env_hdrs
tf_additional_lib_deps = _tf_additional_lib_deps
tf_additional_lib_hdrs = _tf_additional_lib_hdrs
tf_additional_monitoring_hdrs = _tf_additional_monitoring_hdrs
tf_additional_proto_hdrs = _tf_additional_proto_hdrs
tf_additional_rpc_deps = _tf_additional_rpc_deps
tf_additional_tensor_coding_deps = _tf_additional_tensor_coding_deps
tf_additional_test_deps = _tf_additional_test_deps
tf_additional_test_srcs = _tf_additional_test_srcs
tf_fingerprint_deps = _tf_fingerprint_deps
tf_jspb_proto_library = _tf_jspb_proto_library
tf_kernel_tests_linkstatic = _tf_kernel_tests_linkstatic
tf_lib_proto_parsing_deps = _tf_lib_proto_parsing_deps
tf_proto_library = _tf_proto_library
tf_proto_library_cc = _tf_proto_library_cc
tf_proto_library_py = _tf_proto_library_py
tf_protobuf_compiler_deps = _tf_protobuf_compiler_deps
tf_protobuf_deps = _tf_protobuf_deps
tf_protos_all = _tf_protos_all
tf_protos_all_impl = _tf_protos_all_impl
tf_protos_grappler = _tf_protos_grappler
tf_protos_grappler_impl = _tf_protos_grappler_impl
tf_protos_profiler_impl = _tf_protos_profiler_impl
tf_py_clif_cc = _tf_py_clif_cc
tf_pyclif_proto_library = _tf_pyclif_proto_library
| 50.323944
| 95
| 0.854744
| 527
| 3,573
| 4.950664
| 0.108159
| 0.275968
| 0.171713
| 0.045995
| 0.952089
| 0.952089
| 0.952089
| 0.952089
| 0.952089
| 0.952089
| 0
| 0
| 0.095158
| 3,573
| 70
| 96
| 51.042857
| 0.80699
| 0.024909
| 0
| 0
| 0
| 0
| 0.220529
| 0.173951
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.029851
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
137f3a2d6b0365fb90492e1b344c4687772432d5
| 42,734
|
py
|
Python
|
test/core_tests/test_parameters.py
|
webbjj/amuse
|
83b2eac906a59f999516418192ff0b263420b27f
|
[
"Apache-2.0"
] | null | null | null |
test/core_tests/test_parameters.py
|
webbjj/amuse
|
83b2eac906a59f999516418192ff0b263420b27f
|
[
"Apache-2.0"
] | 1
|
2020-01-27T17:01:49.000Z
|
2020-01-28T02:09:55.000Z
|
test/core_tests/test_parameters.py
|
webbjj/amuse
|
83b2eac906a59f999516418192ff0b263420b27f
|
[
"Apache-2.0"
] | null | null | null |
import warnings
from amuse.test import amusetest
from amuse.support.exceptions import AmuseException, AmuseWarning
from amuse.units import nbody_system, generic_unit_system, generic_unit_converter
from amuse.units import units
from amuse.datamodel import parameters
from amuse.support.interface import HandleParameters
from amuse.support.interface import InCodeComponentImplementation
class BaseTestModule(object):
def before_get_parameter(self):
return
def before_set_parameter(self):
return
class TestMethodParameterDefintions(amusetest.TestCase):
def test1(self):
class TestModule(BaseTestModule):
def get_test(self):
return 123 | units.m
o = TestModule()
set = parameters.Parameters([parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.m)], o)
x = set.get_parameter("test_name")
value = x.get_value()
self.assertTrue(value.unit.has_same_base_as(units.m))
self.assertEqual(value.value_in(units.m), 123)
def test2(self):
definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.m)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
set = parameters.Parameters([definition,], o)
x = set.get_parameter("test_name")
x.set_value(10|units.m)
self.assertEqual(o.x, 10|units.m)
value = x.get_value()
self.assertEqual(value, 10|units.m)
def test3(self):
definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.no_unit)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
set = parameters.Parameters([definition,], o)
x = set.get_parameter("test_name")
x.set_value(10|units.none)
self.assertEqual(o.x, 10|units.none)
value = x.get_value()
self.assertEqual(value, 10)
def test4(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
instance.parameters.test_name = 1 | units.km
self.assertEqual(1 | units.km, instance.parameters.test_name)
self.assertEqual(1000 | units.m, instance.x)
def test5(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
None,
"set_test",
"test_name",
"a test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
instance.parameters.test_name = 1 | units.km
self.assertEqual(1 | units.km, instance.parameters.test_name)
self.assertEqual(1000 | units.m, instance.x)
def test6(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
"bla"
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
instance.parameters.test_name = "bla"
self.assertEqual("bla", instance.x)
instance.parameters.test_name = "bla"
self.assertEqual("bla", instance.x )
def test8(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
instance = TestModule()
p = parameters.Parameters([parameter_definition], instance)
p.set_defaults()
self.assertEqual(11.0 | units.m, instance.x)
def test9(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
instance = TestModule()
p = parameters.Parameters([parameter_definition], instance)
self.assertRaises(AmuseException, lambda: p.unknown,
expected_message = "tried to get unknown parameter 'unknown' for a 'TestModule' object")
with warnings.catch_warnings(record=True) as w:
p.unknown = 10 | units.m
self.assertEqual(len(w), 1)
self.assertEqual("tried to set unknown parameter 'unknown' for a 'TestModule' object", str(w[-1].message))
def test10(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a test parameter",
11.0 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
instance = TestModule()
p = parameters.Parameters([parameter_definition], instance)
instance.x = 1 | units.m
self.assertEqual(p.test_name, 1 | units.m)
def try_set_read_only_parameter(parameter_set):
parameter_set.test_name = 2 | units.m
self.assertRaises(AmuseException, try_set_read_only_parameter, p,
expected_message = "Could not set value for parameter 'test_name' of a 'TestModule' object, parameter is read-only")
def test11(self):
parameter_definition1 = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
parameter_definition2 = parameters.ModuleMethodParameterDefinition(
"get_test1",
"set_test1",
"test_name2",
"a test parameter",
12.0 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
def get_test1(self):
return self.y
def set_test1(self, value):
self.y = value
instance = TestModule()
p = parameters.Parameters([parameter_definition1, parameter_definition2], instance)
instance.x = 1 | units.m
instance.y = 2 | units.m
self.assertEqual(p.test_name, 1 | units.m)
self.assertEqual(p.test_name2, 2 | units.m)
p.test_name = 20 | units.m
p.send_not_set_parameters_to_code()
self.assertEqual(instance.x, 20 | units.m)
self.assertEqual(instance.y, 12 | units.m)
def test12(self):
parameter_definition = parameters.ModuleVectorMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
[0.1, 0.2, 0.3] | units.km,
True
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x, self.y, self.z
def set_test(self, x, y, z):
self.x = x
self.y = y
self.z = z
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
self.assertEqual([0.1, 0.2, 0.3] | units.km, instance.parameters.test_name)
instance.parameters.test_name = [1, 2, 3] | units.km
self.assertEqual([1, 2, 3] | units.km, instance.parameters.test_name)
self.assertEqual(1000 | units.m, instance.x)
class TestInterfaceParameterDefintions(amusetest.TestCase):
def test1(self):
class TestModule(BaseTestModule):
pass
o = TestModule()
set = parameters.Parameters([parameters.InterfaceParameterDefinition(
"test_name",
"a test parameter",
0.1 | units.m)], o)
x = set.get_parameter("test_name")
value = x.get_value()
self.assertTrue(value.unit.has_same_base_as(units.m))
self.assertEqual(value.value_in(units.m), 0.1)
def test2(self):
definition = parameters.InterfaceParameterDefinition(
"test_name",
"a test parameter",
0.1 | units.m)
class TestModule(BaseTestModule):
pass
o = TestModule()
set = parameters.Parameters([definition,], o)
x = set.get_parameter("test_name")
x.set_value(10|units.m)
value = x.get_value()
self.assertEqual(value, 10|units.m)
def test4(self):
parameter_definition = parameters.InterfaceParameterDefinition(
"test_name",
"a test parameter",
0.1 | units.m,
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
instance.parameters.test_name = 1 | units.km
self.assertEqual(1 | units.km, instance.parameters.test_name)
def test5(self):
parameter_definition = parameters.InterfaceParameterDefinition(
"test_name",
"a test parameter",
0.1 | units.m,
"before_"
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
def before_(self):
self.before_called=True
pass
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
self.assertRaises(Exception,lambda: getattr(instance,"before_called"))
instance.parameters.test_name = 1 | units.km
self.assertEqual(1 | units.km, instance.parameters.test_name)
self.assertEqual(instance.before_called,True)
class TestParameters(amusetest.TestCase):
def test1(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
class TestModule(BaseTestModule):
x = 123 | units.m
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.Parameters([parameter_definition], o)
value = x.test_name
self.assertTrue(value.unit.has_same_base_as(units.m))
self.assertEqual(value.value_in(units.m), 123)
def test2(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123 | nbody_system.length
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.Parameters([parameter_definition], o)
self.assertEqual(x.test_name, 123 | nbody_system.length)
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.ParametersWithUnitsConverted(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertAlmostEqual(y.test_name.value_in(units.m), 246.0, 6)
y.test_name = 500 | units.m
self.assertAlmostEqual(y.test_name.value_in(units.m), 500.0, 6)
print(x.test_name, o.x)
self.assertAlmostEqual(x.test_name.value_in(nbody_system.length), 250.0, 6)
self.assertAlmostEqual(o.x, 250.0 | nbody_system.length, 6)
def test3(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a test parameter",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123 | units.m
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.new_parameters_instance_with_docs([parameter_definition], o)
self.assertTrue("test_name" in x.__doc__)
self.assertTrue("a test parameter" in x.__doc__)
self.assertTrue("default" in x.__doc__)
self.assertTrue("11.0 length" in x.__doc__)
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.new_parameters_with_units_converted_instance_with_docs(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertTrue("test_name" in y.__doc__)
self.assertTrue("a test parameter" in y.__doc__)
self.assertTrue("default" in y.__doc__)
self.assertTrue("22.0 m" in y.__doc__)
def test3b(self):
# Same test as test3, but testing on the class, not instance
# This makes sure the python 'help' functionality works on parameters
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a test parameter",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123 | units.m
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.new_parameters_instance_with_docs([parameter_definition], o)
self.assertTrue("test_name" in x.__class__.__doc__)
self.assertTrue("a test parameter" in x.__class__.__doc__)
self.assertTrue("default" in x.__class__.__doc__)
self.assertTrue("11.0 length" in x.__class__.__doc__)
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.new_parameters_with_units_converted_instance_with_docs(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertTrue("test_name" in y.__class__.__doc__)
self.assertTrue("a test parameter" in y.__class__.__doc__)
self.assertTrue("default" in y.__class__.__doc__)
self.assertTrue("22.0 m" in y.__class__.__doc__)
def test4(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a test parameter",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123.0 | nbody_system.length
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.Parameters([parameter_definition], o)
self.assertTrue("test_name" in str(x))
self.assertTrue("123.0 length" in str(x))
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.ParametersWithUnitsConverted(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertTrue("test_name" in str(y))
self.assertTrue("246.0 m" in str(y))
def test5(self):
print("Test 5: testing mixed nbody and physical units")
phys_parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"phys_test_name",
"a test parameter with physical units",
11.0 | units.m
)
nbody_parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test1",
"set_test1",
"nbody_test_name",
"a test parameter with nbody units",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123.0 | units.m
y = 123.0 | nbody_system.length
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
def get_test1(self):
return self.y
def set_test1(self, value):
self.y = value
o = TestModule()
x = parameters.Parameters([phys_parameter_definition, nbody_parameter_definition], o)
self.assertTrue("nbody_test_name" in str(x))
self.assertTrue("123.0 length" in str(x))
self.assertTrue("phys_test_name" in str(x))
self.assertTrue("123.0 m" in str(x))
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.ParametersWithUnitsConverted(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertEqual(getattr(y,"phys_test_name"), 123.0 | units.m)
self.assertAlmostEqual(getattr(y,"nbody_test_name"), 246.0 | units.m)
y.phys_test_name = 1234.0 | units.m
self.assertEqual(y.phys_test_name, 1234.0 | units.m)
y.nbody_test_name = 12345.0 | nbody_system.length
self.assertAlmostEqual(y.nbody_test_name, 24690.0 | units.m)
y.nbody_test_name = 12345.0 | units.m
self.assertEqual(y.nbody_test_name, 12345.0 | units.m)
def test6(self):
print("Test 5: testing mixed nbody and string units")
nbody_parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_nbody",
None,
"nbody_par_name",
"a test parameter with nbody units",
11.0 | nbody_system.length
)
string_parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_string",
None,
"string_par_name",
"a test parameter with string units",
"test string"
)
class TestModule(BaseTestModule):
x = 123.0 | nbody_system.length
def get_nbody(self):
return self.x
def get_string(self):
return str(10 * self.x.number )
o = TestModule()
x = parameters.Parameters([string_parameter_definition, nbody_parameter_definition], o)
self.assertTrue("nbody_par_name" in str(x))
self.assertTrue("123.0 length" in str(x))
self.assertTrue("string_par_name" in str(x))
self.assertTrue("1230.0" in str(x))
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.ParametersWithUnitsConverted(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertEqual(getattr(y,"string_par_name"), "1230.0")
self.assertAlmostEqual(getattr(y,"nbody_par_name"), 246.0 | units.m)
def test7(self):
parameter_definition1 = parameters.ModuleCachingParameterDefinition(
"initialize_vars",
"arg1",
"test_par1",
"a test parameter (1)",
11.0 | units.m
)
parameter_definition2 = parameters.ModuleCachingParameterDefinition(
"initialize_vars",
"arg2",
"test_par2",
"a test parameter (2)",
12.0 | units.m
)
class TestModule(BaseTestModule):
x = 123 | units.m
y = 456 | units.m
def initialize_vars(self, arg1, arg2):
self.x = arg1
self.y = arg2
o = TestModule()
x = parameters.Parameters([parameter_definition1, parameter_definition2], o)
x.test_par1 = 20 | units.m
print(x.test_par1)
self.assertEqual(x.test_par1, 20 | units.m)
self.assertEqual(x.test_par2, 12 | units.m)
self.assertEqual(o.x, 123 | units.m)
self.assertEqual(o.y, 456 | units.m)
x.send_cached_parameters_to_code()
self.assertEqual(o.x, 20 | units.m)
self.assertEqual(o.y, 12 | units.m)
def test8(self):
parameter_definition1 = parameters.ModuleCachingParameterDefinition(
"initialize_vars",
"arg1",
"test_par1",
"a test parameter (1)",
11.0 | units.m
)
parameter_definition2 = parameters.ModuleCachingParameterDefinition(
"initialize_vars",
"arg2",
"test_par2",
"a test parameter (2)",
12.0 | units.m
)
parameter_definition3 = parameters.ModuleCachingParameterDefinition(
"initialize_vars2",
"arg1",
"test_par3",
"a test parameter (3)",
14.0 | units.m
)
class TestModule(BaseTestModule):
x = 123 | units.m
y = 456 | units.m
z = 100 | units.m
def initialize_vars(self, arg1, arg2):
self.x = arg1
self.y = arg2
return 0
def initialize_vars2(self, arg1):
self.z = arg1
return 0
o = TestModule()
x = parameters.Parameters([parameter_definition1, parameter_definition2, parameter_definition3], o)
x.send_cached_parameters_to_code()
self.assertEqual(o.x, 11 | units.m)
self.assertEqual(o.y, 12 | units.m)
self.assertEqual(o.z, 14 | units.m)
def test9(self):
parameter_definition1 = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
parameter_definition2 = parameters.ModuleMethodParameterDefinition(
"get_test1",
"set_test1",
"test_name2",
"a test parameter",
12.0 | units.m
)
paramer_definition3 = parameters.VectorParameterDefinition(
"test_vector",
"vector of parameters",
["test_name", "test_name2"],
[11.0, 12.0] | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
def get_test1(self):
return self.y
def set_test1(self, value):
self.y = value
instance = TestModule()
instance.x = 1 | units.m
instance.y = 2 | units.m
p = parameters.Parameters([parameter_definition1, parameter_definition2, paramer_definition3], instance)
self.assertEqual(p.test_vector, (1,2) | units.m)
p.test_vector = (3,4) | units.m
self.assertEqual(instance.x, 3 | units.m)
self.assertEqual(instance.y, 4 | units.m)
def test10(self):
print("Testing ParametersWithUnitsConverted on vector parameters")
definitions = []
for par_name in ["length_x", "length_y", "length_z"]:
definitions.append(parameters.ModuleMethodParameterDefinition(
"get_"+par_name,
"set_"+par_name,
par_name,
"a test parameter",
10.0 | generic_unit_system.length
))
definitions.append(parameters.VectorParameterDefinition(
"mesh_length",
"length of the model in the x, y and z directions",
("length_x", "length_y", "length_z"),
[10, 10, 10] | generic_unit_system.length
))
class TestModule(BaseTestModule):
x = 123.0 | generic_unit_system.length
y = 456.0 | generic_unit_system.length
z = 789.0 | generic_unit_system.length
def get_length_x(self):
return self.x
def set_length_x(self, value):
self.x = value
def get_length_y(self):
return self.y
def set_length_y(self, value):
self.y = value
def get_length_z(self):
return self.z
def set_length_z(self, value):
self.z = value
o = TestModule()
x = parameters.Parameters(definitions, o)
self.assertTrue("mesh_length" in str(x))
self.assertTrue("[123.0, 456.0, 789.0] length" in str(x))
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(2.0 | units.m, 4.0 | units.kg, 6.0 | units.s)
y = parameters.ParametersWithUnitsConverted(
x,
converter.as_converter_from_si_to_generic()
)
self.assertTrue("mesh_length" in str(y))
self.assertTrue("[246.0, 912.0, 1578.0] m" in str(y))
def test11(self):
print("Testing ParametersWithUnitsConverted on vector parameters, using add_vector_parameter")
class TestModule(BaseTestModule):
x = 123.0 | generic_unit_system.length
y = 456.0 | generic_unit_system.length
z = 789.0 | generic_unit_system.length
def get_length_x(self):
return self.x
def set_length_x(self, value):
self.x = value
def get_length_y(self):
return self.y
def set_length_y(self, value):
self.y = value
def get_length_z(self):
return self.z
def set_length_z(self, value):
self.z = value
o = TestModule()
parameters_handler = HandleParameters(o)
parameters_handler.add_vector_parameter(
"mesh_length",
"length of the model in the x, y and z directions",
("length_x", "length_y", "length_z")
)
for par_name in ["length_x", "length_y", "length_z"]:
parameters_handler.add_method_parameter(
"get_"+par_name,
"set_"+par_name,
par_name,
"a test parameter",
default_value = 10.0 | generic_unit_system.length,
)
x = parameters_handler.get_attribute(None, None)
self.assertTrue("mesh_length" in str(x))
self.assertTrue("[123.0, 456.0, 789.0] length" in str(x))
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(2.0 | units.m, 4.0 | units.kg, 6.0 | units.s)
y = parameters.ParametersWithUnitsConverted(
x,
converter.as_converter_from_si_to_generic()
)
self.assertTrue("mesh_length" in str(y))
self.assertTrue("[246.0, 912.0, 1578.0] m" in str(y))
def test12(self):
definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
set = parameters.Parameters([definition,], o)
set.test_name = 10|units.m
self.assertEqual(o.x, 10|units.m)
self.assertEqual(set.test_name, 10|units.m)
memento = set.copy()
self.assertEqual(memento.test_name, 10|units.m)
set.test_name = 20|units.m
self.assertEqual(o.x, 20|units.m)
self.assertEqual(set.test_name, 20|units.m)
self.assertEqual(memento.test_name, 10|units.m)
set.reset_from_memento(memento)
self.assertEqual(o.x, 10|units.m)
self.assertEqual(set.test_name, 10|units.m)
self.assertEqual(memento.test_name, 10|units.m)
def test13(self):
definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a read-only test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
x = 0.1 | units.m
def get_test(self):
return self.x
o = TestModule()
set = parameters.Parameters([definition,], o)
self.assertRaises(AmuseException, setattr, set, "test_name", 1.0 | units.m,
expected_message = "Could not set value for parameter 'test_name' of a 'TestModule' object, parameter is read-only")
self.assertEqual(o.x, 0.1|units.m)
self.assertEqual(set.test_name, 0.1|units.m)
memento = set.copy()
self.assertEqual(memento.test_name, 0.1|units.m)
set.reset_from_memento(memento)
self.assertEqual(o.x, 0.1|units.m)
self.assertEqual(set.test_name, 0.1|units.m)
memento.test_name = 2.0 | units.m
self.assertEqual(memento.test_name, 2.0|units.m)
with warnings.catch_warnings(record=True) as w:
set.reset_from_memento(memento)
self.assertEqual(len(w), 1)
self.assertEqual("tried to change read-only parameter 'test_name' for a 'TestModule' object", str(w[-1].message))
self.assertEqual(o.x, 0.1|units.m)
self.assertEqual(set.test_name, 0.1|units.m)
self.assertEqual(memento.test_name, 2.0|units.m)
def test14(self):
definition = parameters.InterfaceParameterDefinition(
"test_name",
"a read-only test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
pass
o = TestModule()
set = parameters.Parameters([definition,], o)
self.assertEqual(set.test_name, 0.1|units.m)
memento = set.copy()
self.assertEqual(memento.test_name, 0.1|units.m)
memento.test_name=2.|units.m
set.reset_from_memento(memento)
self.assertEqual(set.test_name, 2.|units.m)
def test15(self):
definition = parameters.InterfaceParameterDefinition(
"test_name",
"a read-only test parameter",
0.1
)
class TestModule(BaseTestModule):
pass
o = TestModule()
set = parameters.Parameters([definition,], o)
import numpy
b=numpy.array(2)
set.test_name=b
b*=2
self.assertEqual(set.test_name,2)
def test16(self):
print("Testing add_interface_parameter")
class TestModule(BaseTestModule):
pass
o = TestModule()
parameters_handler = HandleParameters(o)
parameters_handler.add_vector_parameter(
"mesh_length",
"length of the model in the x, y and z directions",
("length_x", "length_y", "length_z")
)
for i,par_name in enumerate(["length_x", "length_y", "length_z"]):
parameters_handler.add_interface_parameter(
par_name,
"a test parameter",
default_value = i*10.0 | generic_unit_system.length,
)
x = parameters_handler.get_attribute(None, None)
self.assertTrue("mesh_length" in str(x))
self.assertTrue("[0.0, 10.0, 20.0] length" in str(x))
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(2.0 | units.m, 4.0 | units.kg, 6.0 | units.s)
y = parameters.ParametersWithUnitsConverted(
x,
converter.as_converter_from_si_to_generic()
)
self.assertTrue("mesh_length" in str(y))
self.assertTrue("[0.0, 20.0, 40.0] m" in str(y))
def test17(self):
print("Testing ParametersWithUnitsConverted on vector parameters, using add_vector_parameter")
class TestModule(BaseTestModule):
x = [1.,2.,3.] | generic_unit_system.length
def get_length(self,i):
return self.x[i]
def set_length(self, i,value):
self.x[i] = value
def range(self):
return 0,len(self.x)-1
o = TestModule()
parameters_handler = HandleParameters(o)
parameters_handler.add_array_parameter(
"get_length",
"set_length",
"range",
"length",
"description"
)
x = parameters_handler.get_attribute(None, None)
self.assertTrue("length" in str(x))
self.assertTrue("[1.0, 2.0, 3.0] length" in str(x))
def test18(self):
print("Testing array parameters")
definitions = []
definitions.append(parameters.ModuleArrayParameterDefinition(
"get",
"set",
"range",
"param",
"a test parameter"
))
class TestModule(BaseTestModule):
x = [1.,2.,3.] | generic_unit_system.length
def get(self,i):
return self.x[i]
def set(self,i, value):
self.x[i] = value
def range(self):
return 0, len(self.x)-1
o = TestModule()
x = parameters.Parameters(definitions, o)
self.assertEqual(x.param, [1.,2.,3.] | generic_unit_system.length)
x.param*=2
self.assertEqual(x.param, [2.,4.,6.] | generic_unit_system.length)
def test19(self):
print("Testing multiple parameter sets")
class TestModule(BaseTestModule):
x = 123.0 | generic_unit_system.length
y = 456.0 | generic_unit_system.length
z = 789.0 | generic_unit_system.length
def get_length_x(self):
return self.x
def set_length_x(self, value):
self.x = value
def get_length_y(self):
return self.y
def set_length_y(self, value):
self.y = value
def get_length_z(self):
return self.z
def set_length_z(self, value):
self.z = value
o = TestModule()
parameters_handler = HandleParameters(o)
for par_name in ["length_x", "length_y", "length_z"]:
parameters_handler.add_method_parameter(
"get_"+par_name,
"set_"+par_name,
par_name,
"a test parameter",
default_value = 10.0 | generic_unit_system.length,
parameter_set = par_name+"_set"
)
for i,par_name in enumerate(["length_x", "length_y", "length_z"]):
x = parameters_handler.get_attribute(par_name+"_set", None)
self.assertTrue([123.0, 456.0, 789.0][i] == getattr(x,par_name).number)
def test20(self):
print("Testing multiple parameter sets 2")
class TestInterface(BaseTestModule):
x = 123.0
y = 456.0
def get_x(self):
return self.x
def set_x(self, value):
self.x = value
def get_y(self):
return self.y
def set_y(self, value):
self.y = value
class Testing(InCodeComponentImplementation):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, TestInterface(), **options)
def define_parameters(self,object):
object.add_method_parameter(
"get_x", "set_x", "x", "test parameter", 123.
)
object.add_method_parameter(
"get_y", "set_y", "y", "test parameter 2", 456.,
parameter_set="parameters2"
)
object.add_alias_parameter(
"y_alias","y", " new y", parameter_set="parameters2"
)
t=Testing()
self.assertEqual(set(t.parameter_set_names()), set(('parameters','parameters2')))
self.assertEqual(t.parameters.x,123.)
self.assertEqual(t.parameters2.y,456.)
t.parameters2.y=789.
self.assertEqual(t.parameters2.y,789.)
self.assertEqual(t.parameters2.y_alias,789.)
def test21(self):
print("Test change in parameter sets")
class TestInterface(BaseTestModule):
x = 123.0
y = 456.0
def get_x(self):
return self.x
def set_x(self, value):
self.x = value
def get_y(self):
return self.y
def set_y(self, value):
self.y = value
class Testing(InCodeComponentImplementation):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, TestInterface(), **options)
def define_parameters(self,handler):
handler.add_method_parameter(
"get_x", "set_x", "x", "test parameter", 123.
)
def define_additional_parameters(self):
handler=self.get_handler('PARAMETER')
handler.add_method_parameter(
"get_y", "set_y", "y", "test parameter 2", 456.,
parameter_set="parameters2"
)
handler.add_alias_parameter(
"y_alias","y", " new y", parameter_set="parameters2"
)
handler.add_method_parameter(
"get_y", "set_y", "y", "test parameter", 456.
)
t=Testing()
self.assertEqual(set(t.parameter_set_names()), set(('parameters',)))
t.define_additional_parameters()
self.assertEqual(set(t.parameter_set_names()), set(('parameters','parameters2')))
self.assertEqual(t.parameters.x,123.)
self.assertEqual(t.parameters2.y,456.)
t.parameters2.y=789.
self.assertEqual(t.parameters2.y,789.)
self.assertEqual(t.parameters2.y_alias,789.)
self.assertEqual(t.parameters.y,789.)
| 32.847041
| 128
| 0.557846
| 4,576
| 42,734
| 5.006774
| 0.049388
| 0.032735
| 0.025665
| 0.028414
| 0.872027
| 0.841997
| 0.801318
| 0.781852
| 0.737026
| 0.69399
| 0
| 0.029445
| 0.344363
| 42,734
| 1,300
| 129
| 32.872308
| 0.788279
| 0.002948
| 0
| 0.73209
| 0
| 0.001963
| 0.09248
| 0.002512
| 0
| 0
| 0
| 0
| 0.142296
| 1
| 0.140334
| false
| 0.005888
| 0.008832
| 0.045142
| 0.250245
| 0.011776
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b92cf98b271adcfb3b7dd6fe5fb5a26b6512720c
| 47
|
py
|
Python
|
src/evaluation/__init__.py
|
vineethcv/Kaggle-House_price
|
970a7ecf05970bea970fe2d843af5e7c3c0d3e2b
|
[
"MIT"
] | 1
|
2021-06-04T08:43:28.000Z
|
2021-06-04T08:43:28.000Z
|
src/evaluation/__init__.py
|
vineethcv/Kaggle-House_price
|
970a7ecf05970bea970fe2d843af5e7c3c0d3e2b
|
[
"MIT"
] | null | null | null |
src/evaluation/__init__.py
|
vineethcv/Kaggle-House_price
|
970a7ecf05970bea970fe2d843af5e7c3c0d3e2b
|
[
"MIT"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
from __future__ import print_function, division
| 47
| 47
| 0.893617
| 6
| 47
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.860465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
b92e5730a0bf005bdc5ff42938439c38f0dcc281
| 124,606
|
py
|
Python
|
octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py
|
elastx/octavia
|
6253560d22f255c499c91612ac4286dd0d8329e1
|
[
"Apache-2.0"
] | 13
|
2015-01-15T21:18:42.000Z
|
2015-05-22T18:15:54.000Z
|
octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py
|
stackforge/octavia
|
a94b3101e8005ddd84a4333aa237dbbe3e0c2b43
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py
|
stackforge/octavia
|
a94b3101e8005ddd84a4333aa237dbbe3e0c2b43
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import socket
import stat
import subprocess
from unittest import mock
import fixtures
from oslo_config import fixture as oslo_fixture
from oslo_serialization import jsonutils
from oslo_utils.secretutils import md5
from oslo_utils import uuidutils
from octavia.amphorae.backends.agent import api_server
from octavia.amphorae.backends.agent.api_server import certificate_update
from octavia.amphorae.backends.agent.api_server import server
from octavia.amphorae.backends.agent.api_server import util
from octavia.common import config
from octavia.common import constants as consts
from octavia.common import utils as octavia_utils
from octavia.tests.common import utils as test_utils
import octavia.tests.unit.base as base
AMP_AGENT_CONF_PATH = '/etc/octavia/amphora-agent.conf'
RANDOM_ERROR = b'random error'
OK = dict(message='OK')
FAKE_INTERFACE = 'eth33'
class TestServerTestCase(base.TestCase):
app = None
def setUp(self):
super().setUp()
self.conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
self.conf.config(group="haproxy_amphora", base_path='/var/lib/octavia')
self.conf.config(group="controller_worker",
loadbalancer_topology=consts.TOPOLOGY_SINGLE)
self.conf.load_raw_values(project='fake_project')
self.conf.load_raw_values(prog='fake_prog')
self.useFixture(fixtures.MockPatch(
'oslo_config.cfg.find_config_files',
return_value=[AMP_AGENT_CONF_PATH]))
with mock.patch('distro.id', return_value='ubuntu'), mock.patch(
'octavia.amphorae.backends.agent.api_server.plug.'
'Plug.plug_lo'):
self.ubuntu_test_server = server.Server()
self.ubuntu_app = self.ubuntu_test_server.app.test_client()
with mock.patch('distro.id', return_value='centos'), mock.patch(
'octavia.amphorae.backends.agent.api_server.plug.'
'Plug.plug_lo'):
self.centos_test_server = server.Server()
self.centos_app = self.centos_test_server.app.test_client()
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_ubuntu_haproxy_systemd(self, mock_init_system):
self._test_haproxy(consts.INIT_SYSTEMD, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_centos_haproxy_systemd(self, mock_init_system):
self._test_haproxy(consts.INIT_SYSTEMD, consts.CENTOS,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSVINIT)
def test_ubuntu_haproxy_sysvinit(self, mock_init_system):
self._test_haproxy(consts.INIT_SYSVINIT, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_UPSTART)
def test_ubuntu_haproxy_upstart(self, mock_init_system):
self._test_haproxy(consts.INIT_UPSTART, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'haproxy_compatibility.get_haproxy_versions')
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
@mock.patch('os.rename')
@mock.patch('subprocess.check_output')
def _test_haproxy(self, init_system, distro, mock_init_system,
mock_subprocess, mock_rename,
mock_makedirs, mock_exists, mock_get_version):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
mock_get_version.return_value = [1, 6]
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mock_exists.return_value = True
file_name = '/var/lib/octavia/123/haproxy.cfg.new'
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
# happy case upstart file exists
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'distro.id') as mock_distro_id:
mock_open.return_value = 123
mock_distro_id.return_value = distro
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
mode = stat.S_IRUSR | stat.S_IWUSR
mock_open.assert_called_with(file_name, flags, mode)
mock_fdopen.assert_called_with(123, 'w')
self.assertEqual(202, rv.status_code)
m().write.assert_called_once_with('test')
mock_subprocess.assert_any_call(
"haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format(
config_file=file_name,
haproxy_ug=consts.HAPROXY_USER_GROUP_CFG,
peer=(octavia_utils.
base64_sha1_string('amp_123').rstrip('='))).split(),
stderr=-2)
mock_rename.assert_called_with(
'/var/lib/octavia/123/haproxy.cfg.new',
'/var/lib/octavia/123/haproxy.cfg')
if init_system == consts.INIT_SYSTEMD:
mock_subprocess.assert_any_call(
"systemctl enable haproxy-123".split(),
stderr=subprocess.STDOUT)
elif init_system == consts.INIT_SYSVINIT:
mock_subprocess.assert_any_call(
"insserv /etc/init.d/haproxy-123".split(),
stderr=subprocess.STDOUT)
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
# exception writing
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
m.side_effect = IOError() # open crashes
with mock.patch('os.open'), mock.patch.object(
os, 'fdopen', m), mock.patch('distro.id') as mock_distro_id:
mock_distro_id.return_value = distro
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
self.assertEqual(500, rv.status_code)
# check if files get created
mock_exists.return_value = False
if init_system == consts.INIT_SYSTEMD:
init_path = consts.SYSTEMD_DIR + '/haproxy-123.service'
elif init_system == consts.INIT_UPSTART:
init_path = consts.UPSTART_DIR + '/haproxy-123.conf'
elif init_system == consts.INIT_SYSVINIT:
init_path = consts.SYSVINIT_DIR + '/haproxy-123'
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
m = self.useFixture(test_utils.OpenFixture(init_path)).mock_open
# happy case upstart file exists
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'distro.id') as mock_distro_id:
mock_open.return_value = 123
mock_distro_id.return_value = distro
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
self.assertEqual(202, rv.status_code)
if init_system == consts.INIT_SYSTEMD:
mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP |
stat.S_IROTH)
else:
mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
mock_open.assert_called_with(init_path, flags, mode)
mock_fdopen.assert_called_with(123, 'w')
handle = mock_fdopen()
handle.write.assert_any_call('test')
# skip the template stuff
mock_makedirs.assert_called_with('/var/lib/octavia/123')
# unhappy case haproxy check fails
mock_exists.return_value = True
mock_subprocess.side_effect = [subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)]
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'distro.id') as mock_distro_id:
mock_open.return_value = 123
mock_distro_id.return_value = distro
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
self.assertEqual(400, rv.status_code)
self.assertEqual(
{'message': 'Invalid request', u'details': u'random error'},
jsonutils.loads(rv.data.decode('utf-8')))
mode = stat.S_IRUSR | stat.S_IWUSR
mock_open.assert_called_with(file_name, flags, mode)
mock_fdopen.assert_called_with(123, 'w')
handle = mock_fdopen()
handle.write.assert_called_with('test')
mock_subprocess.assert_called_with(
"haproxy -c -L {peer} -f {config_file} -f {haproxy_ug}".format(
config_file=file_name,
haproxy_ug=consts.HAPROXY_USER_GROUP_CFG,
peer=(octavia_utils.
base64_sha1_string('amp_123').rstrip('='))).split(),
stderr=-2)
mock_rename.assert_called_with(
'/var/lib/octavia/123/haproxy.cfg.new',
'/var/lib/octavia/123/haproxy.cfg.new-failed')
# unhappy path with bogus init system
mock_init_system.return_value = 'bogus'
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'distro.id') as mock_distro_id:
mock_open.return_value = 123
mock_distro_id.return_value = distro
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
elif distro == consts.CENTOS:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/amp_123/123/haproxy',
data='test')
self.assertEqual(500, rv.status_code)
def test_ubuntu_start(self):
self._test_start(consts.UBUNTU)
def test_centos_start(self):
self._test_start(consts.CENTOS)
@mock.patch('os.listdir')
@mock.patch('os.path.exists')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'vrrp_check_script_update')
@mock.patch('subprocess.check_output')
def _test_start(self, distro, mock_subprocess, mock_vrrp, mock_exists,
mock_listdir):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/error')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/error')
self.assertEqual(400, rv.status_code)
self.assertEqual(
{'message': 'Invalid Request',
'details': 'Unknown action: error', },
jsonutils.loads(rv.data.decode('utf-8')))
mock_exists.reset_mock()
mock_exists.return_value = False
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/start')
self.assertEqual(404, rv.status_code)
self.assertEqual(
{'message': 'Loadbalancer Not Found',
'details': 'No loadbalancer with UUID: 123'},
jsonutils.loads(rv.data.decode('utf-8')))
mock_exists.assert_called_with('/var/lib/octavia')
mock_exists.return_value = True
mock_listdir.return_value = ['123']
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/start')
self.assertEqual(202, rv.status_code)
self.assertEqual(
{'message': 'OK',
'details': 'Configuration file is valid\nhaproxy daemon for'
' 123 started'},
jsonutils.loads(rv.data.decode('utf-8')))
mock_subprocess.assert_called_with(
['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2)
mock_exists.return_value = True
mock_subprocess.side_effect = subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/start')
self.assertEqual(500, rv.status_code)
self.assertEqual(
{
'message': 'Error starting haproxy',
'details': RANDOM_ERROR.decode('utf-8'),
}, jsonutils.loads(rv.data.decode('utf-8')))
mock_subprocess.assert_called_with(
['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2)
def test_ubuntu_reload(self):
self._test_reload(consts.UBUNTU)
def test_centos_reload(self):
self._test_reload(consts.CENTOS)
@mock.patch('os.listdir')
@mock.patch('os.path.exists')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'vrrp_check_script_update')
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
'Loadbalancer._check_haproxy_status')
@mock.patch('subprocess.check_output')
@mock.patch('octavia.amphorae.backends.utils.haproxy_query.HAProxyQuery')
def _test_reload(self, distro, mock_haproxy_query, mock_subprocess,
mock_haproxy_status, mock_vrrp, mock_exists,
mock_listdir):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
# Process running so reload
mock_exists.return_value = True
mock_listdir.return_value = ['123']
mock_haproxy_status.return_value = consts.ACTIVE
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/reload')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/reload')
self.assertEqual(202, rv.status_code)
self.assertEqual(
{'message': 'OK',
'details': 'Listener 123 reloaded'},
jsonutils.loads(rv.data.decode('utf-8')))
mock_subprocess.assert_called_with(
['/usr/sbin/service', 'haproxy-123', 'reload'], stderr=-2)
# Process not running so start
mock_exists.return_value = True
mock_haproxy_status.return_value = consts.OFFLINE
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/reload')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/reload')
self.assertEqual(202, rv.status_code)
self.assertEqual(
{'message': 'OK',
'details': 'Configuration file is valid\nhaproxy daemon for'
' 123 started'},
jsonutils.loads(rv.data.decode('utf-8')))
mock_subprocess.assert_called_with(
['/usr/sbin/service', 'haproxy-123', 'start'], stderr=-2)
def test_ubuntu_info(self):
self._test_info(consts.UBUNTU)
def test_centos_info(self):
self._test_info(consts.CENTOS)
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._get_extend_body_from_lvs_driver',
return_value={})
@mock.patch('socket.gethostname')
@mock.patch('subprocess.check_output')
def _test_info(self, distro, mock_subbprocess, mock_hostname,
mock_get_extend_body):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
mock_hostname.side_effect = ['test-host']
mock_subbprocess.side_effect = ['9.9.99-9']
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/info')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/info')
self.assertEqual(200, rv.status_code)
self.assertEqual(dict(
api_version='1.0',
haproxy_version='9.9.99-9',
hostname='test-host'),
jsonutils.loads(rv.data.decode('utf-8')))
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_backend_for_lb_object', return_value='HAPROXY')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_delete_ubuntu_listener_systemd(self, mock_init_system,
mock_get_proto):
self._test_delete_listener(consts.INIT_SYSTEMD, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_backend_for_lb_object', return_value='HAPROXY')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_delete_centos_listener_systemd(self, mock_init_system,
mock_get_proto):
self._test_delete_listener(consts.INIT_SYSTEMD, consts.CENTOS,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_backend_for_lb_object', return_value='HAPROXY')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSVINIT)
def test_delete_ubuntu_listener_sysvinit(self, mock_init_system,
mock_get_proto):
self._test_delete_listener(consts.INIT_SYSVINIT, consts.UBUNTU,
mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_backend_for_lb_object', return_value='HAPROXY')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_UPSTART)
def test_delete_ubuntu_listener_upstart(self, mock_init_system,
mock_get_proto):
self._test_delete_listener(consts.INIT_UPSTART, consts.UBUNTU,
mock_init_system)
@mock.patch('os.listdir')
@mock.patch('os.path.exists')
@mock.patch('subprocess.check_output')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'vrrp_check_script_update')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.' +
'get_haproxy_pid')
@mock.patch('shutil.rmtree')
@mock.patch('os.remove')
def _test_delete_listener(self, init_system, distro, mock_init_system,
mock_remove, mock_rmtree, mock_pid, mock_vrrp,
mock_check_output, mock_exists, mock_listdir):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
# no listener
mock_exists.return_value = False
mock_listdir.return_value = ['123']
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8')))
mock_exists.assert_called_once_with('/var/lib/octavia')
# service is stopped + no upstart script + no vrrp
mock_exists.side_effect = [True, True, False, False, False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
jsonutils.loads(rv.data.decode('utf-8')))
mock_rmtree.assert_called_with('/var/lib/octavia/123')
if init_system == consts.INIT_SYSTEMD:
mock_exists.assert_called_with(consts.SYSTEMD_DIR +
'/haproxy-123.service')
elif init_system == consts.INIT_UPSTART:
mock_exists.assert_called_with(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_exists.assert_called_with(consts.SYSVINIT_DIR +
'/haproxy-123')
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
mock_exists.assert_any_call('/var/lib/octavia/123/123.pid')
# service is stopped + no upstart script + vrrp
mock_exists.side_effect = [True, True, False, True, False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
jsonutils.loads(rv.data.decode('utf-8')))
mock_rmtree.assert_called_with('/var/lib/octavia/123')
if init_system == consts.INIT_SYSTEMD:
mock_exists.assert_called_with(consts.SYSTEMD_DIR +
'/haproxy-123.service')
elif init_system == consts.INIT_UPSTART:
mock_exists.assert_called_with(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_exists.assert_called_with(consts.SYSVINIT_DIR +
'/haproxy-123')
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
mock_exists.assert_any_call('/var/lib/octavia/123/123.pid')
# service is stopped + upstart script + no vrrp
mock_exists.side_effect = [True, True, False, False, True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
jsonutils.loads(rv.data.decode('utf-8')))
if init_system == consts.INIT_SYSTEMD:
mock_remove.assert_called_with(consts.SYSTEMD_DIR +
'/haproxy-123.service')
elif init_system == consts.INIT_UPSTART:
mock_remove.assert_called_with(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_remove.assert_called_with(consts.SYSVINIT_DIR +
'/haproxy-123')
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
# service is stopped + upstart script + vrrp
mock_exists.side_effect = [True, True, False, True, True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
jsonutils.loads(rv.data.decode('utf-8')))
if init_system == consts.INIT_SYSTEMD:
mock_remove.assert_called_with(consts.SYSTEMD_DIR +
'/haproxy-123.service')
elif init_system == consts.INIT_UPSTART:
mock_remove.assert_called_with(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_remove.assert_called_with(consts.SYSVINIT_DIR +
'/haproxy-123')
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
# service is running + upstart script + no vrrp
mock_exists.side_effect = [True, True, True, True, False, True]
mock_pid.return_value = '456'
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
jsonutils.loads(rv.data.decode('utf-8')))
mock_pid.assert_called_once_with('123')
mock_check_output.assert_any_call(
['/usr/sbin/service', 'haproxy-123', 'stop'], stderr=-2)
if init_system == consts.INIT_SYSTEMD:
mock_check_output.assert_any_call(
"systemctl disable haproxy-123".split(),
stderr=subprocess.STDOUT)
elif init_system == consts.INIT_UPSTART:
mock_remove.assert_any_call(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_check_output.assert_any_call(
"insserv -r /etc/init.d/haproxy-123".split(),
stderr=subprocess.STDOUT)
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
# service is running + upstart script + vrrp
mock_exists.side_effect = [True, True, True, True, True, True]
mock_pid.return_value = '456'
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(200, rv.status_code)
self.assertEqual({u'message': u'OK'},
jsonutils.loads(rv.data.decode('utf-8')))
mock_pid.assert_called_with('123')
mock_check_output.assert_any_call(
['/usr/sbin/service', 'haproxy-123', 'stop'], stderr=-2)
if init_system == consts.INIT_SYSTEMD:
mock_check_output.assert_any_call(
"systemctl disable haproxy-123".split(),
stderr=subprocess.STDOUT)
elif init_system == consts.INIT_UPSTART:
mock_remove.assert_any_call(consts.UPSTART_DIR +
'/haproxy-123.conf')
elif init_system == consts.INIT_SYSVINIT:
mock_check_output.assert_any_call(
"insserv -r /etc/init.d/haproxy-123".split(),
stderr=subprocess.STDOUT)
else:
self.assertIn(init_system, consts.VALID_INIT_SYSTEMS)
# service is running + stopping fails
mock_exists.side_effect = [True, True, True, True]
mock_check_output.side_effect = subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete('/' + api_server.VERSION +
'/listeners/123')
elif distro == consts.CENTOS:
rv = self.centos_app.delete('/' + api_server.VERSION +
'/listeners/123')
self.assertEqual(500, rv.status_code)
self.assertEqual(
{'details': 'random error', 'message': 'Error stopping haproxy'},
jsonutils.loads(rv.data.decode('utf-8')))
# that's the last call before exception
mock_exists.assert_called_with('/proc/456')
def test_ubuntu_get_haproxy(self):
self._test_get_haproxy(consts.UBUNTU)
def test_centos_get_haproxy(self):
self._test_get_haproxy(consts.CENTOS)
@mock.patch('os.listdir')
@mock.patch('os.path.exists')
def _test_get_haproxy(self, distro, mock_exists, mock_listdir):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
CONTENT = "bibble\nbibble"
mock_exists.side_effect = [False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/loadbalancer/123/haproxy')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/loadbalancer/123/haproxy')
self.assertEqual(404, rv.status_code)
mock_exists.side_effect = [True, True]
path = util.config_path('123')
self.useFixture(test_utils.OpenFixture(path, CONTENT))
mock_listdir.return_value = ['123']
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/loadbalancer/123/haproxy')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/loadbalancer/123/haproxy')
self.assertEqual(200, rv.status_code)
self.assertEqual(octavia_utils.b(CONTENT), rv.data)
self.assertEqual('text/plain; charset=utf-8',
rv.headers['Content-Type'].lower())
def test_ubuntu_get_all_listeners(self):
self._test_get_all_listeners(consts.UBUNTU)
def test_get_all_listeners(self):
self._test_get_all_listeners(consts.CENTOS)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_loadbalancers')
@mock.patch('octavia.amphorae.backends.agent.api_server.loadbalancer.'
'Loadbalancer._check_haproxy_status')
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'parse_haproxy_file')
def _test_get_all_listeners(self, distro, mock_parse, mock_status,
mock_lbs):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
# no listeners
mock_lbs.side_effect = [[]]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/listeners')
self.assertEqual(200, rv.status_code)
self.assertFalse(jsonutils.loads(rv.data.decode('utf-8')))
# one listener ACTIVE
mock_lbs.side_effect = [['123']]
mock_parse.side_effect = [['fake_socket', {'123': {'mode': 'test'}}]]
mock_status.side_effect = [consts.ACTIVE]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/listeners')
self.assertEqual(200, rv.status_code)
self.assertEqual(
[{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'}],
jsonutils.loads(rv.data.decode('utf-8')))
# two listeners, two modes
mock_lbs.side_effect = [['123', '456']]
mock_parse.side_effect = [['fake_socket', {'123': {'mode': 'test'}}],
['fake_socket', {'456': {'mode': 'http'}}]]
mock_status.return_value = consts.ACTIVE
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/listeners')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/listeners')
self.assertEqual(200, rv.status_code)
self.assertEqual(
[{'status': consts.ACTIVE, 'type': 'test', 'uuid': '123'},
{'status': consts.ACTIVE, 'type': 'http', 'uuid': '456'}],
jsonutils.loads(rv.data.decode('utf-8')))
def test_ubuntu_delete_cert(self):
self._test_delete_cert(consts.UBUNTU)
def test_centos_delete_cert(self):
self._test_delete_cert(consts.CENTOS)
@mock.patch('os.path.exists')
@mock.patch('os.remove')
def _test_delete_cert(self, distro, mock_remove, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
mock_exists.side_effect = [False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete(
'/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.pem')
elif distro == consts.CENTOS:
rv = self.centos_app.delete(
'/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.pem')
self.assertEqual(200, rv.status_code)
self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8')))
mock_exists.assert_called_once_with(
'/var/lib/octavia/certs/123/test.pem')
# wrong file name
mock_exists.side_effect = [True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete(
'/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.bla')
elif distro == consts.CENTOS:
rv = self.centos_app.delete(
'/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.bla')
self.assertEqual(400, rv.status_code)
mock_exists.side_effect = [True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.delete(
'/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.pem')
elif distro == consts.CENTOS:
rv = self.centos_app.delete(
'/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.pem')
self.assertEqual(200, rv.status_code)
self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8')))
mock_remove.assert_called_once_with(
'/var/lib/octavia/certs/123/test.pem')
def test_ubuntu_get_certificate_md5(self):
self._test_get_certificate_md5(consts.UBUNTU)
def test_centos_get_certificate_md5(self):
self._test_get_certificate_md5(consts.CENTOS)
@mock.patch('os.path.exists')
def _test_get_certificate_md5(self, distro, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
CONTENT = "TestTest"
mock_exists.side_effect = [False]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.pem')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.pem')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(
details='No certificate with filename: test.pem',
message='Certificate Not Found'),
jsonutils.loads(rv.data.decode('utf-8')))
mock_exists.assert_called_with('/var/lib/octavia/certs/123/test.pem')
# wrong file name
mock_exists.side_effect = [True]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.bla',
data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.bla',
data='TestTest')
self.assertEqual(400, rv.status_code)
mock_exists.return_value = True
mock_exists.side_effect = None
if distro == consts.UBUNTU:
path = self.ubuntu_test_server._loadbalancer._cert_file_path(
'123', 'test.pem')
elif distro == consts.CENTOS:
path = self.centos_test_server._loadbalancer._cert_file_path(
'123', 'test.pem')
self.useFixture(test_utils.OpenFixture(path, CONTENT))
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.pem')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.pem')
self.assertEqual(200, rv.status_code)
self.assertEqual(dict(md5sum=md5(octavia_utils.b(CONTENT),
usedforsecurity=False).hexdigest()),
jsonutils.loads(rv.data.decode('utf-8')))
def test_ubuntu_upload_certificate_md5(self):
self._test_upload_certificate_md5(consts.UBUNTU)
def test_centos_upload_certificate_md5(self):
self._test_upload_certificate_md5(consts.CENTOS)
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
def _test_upload_certificate_md5(self, distro, mock_makedir, mock_exists):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
# wrong file name
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.bla',
data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/certificates/test.bla',
data='TestTest')
self.assertEqual(400, rv.status_code)
mock_exists.return_value = True
if distro == consts.UBUNTU:
path = self.ubuntu_test_server._loadbalancer._cert_file_path(
'123', 'test.pem')
elif distro == consts.CENTOS:
path = self.centos_test_server._loadbalancer._cert_file_path(
'123', 'test.pem')
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/certificates/'
'test.pem', data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/certificates/'
'test.pem', data='TestTest')
self.assertEqual(200, rv.status_code)
self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8')))
handle = m()
handle.write.assert_called_once_with(octavia_utils.b('TestTest'))
mock_exists.return_value = False
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/loadbalancer/123/certificates/'
'test.pem', data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/loadbalancer/123/certificates/'
'test.pem', data='TestTest')
self.assertEqual(200, rv.status_code)
self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8')))
handle = m()
handle.write.assert_called_once_with(octavia_utils.b('TestTest'))
mock_makedir.assert_called_once_with('/var/lib/octavia/certs/123')
def test_ubuntu_upload_server_certificate(self):
self._test_upload_server_certificate(consts.UBUNTU)
def test_centos_upload_server_certificate(self):
self._test_upload_server_certificate(consts.CENTOS)
def _test_upload_server_certificate(self, distro):
certificate_update.BUFFER = 5 # test the while loop
path = '/etc/octavia/certs/server.pem'
m = self.useFixture(test_utils.OpenFixture(path)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/certificate', data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/certificate', data='TestTest')
self.assertEqual(202, rv.status_code)
self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8')))
handle = m()
handle.write.assert_any_call(octavia_utils.b('TestT'))
handle.write.assert_any_call(octavia_utils.b('est'))
def test_ubuntu_plug_network(self):
self._test_plug_network(consts.UBUNTU)
def test_centos_plug_network(self):
self._test_plug_network(consts.CENTOS)
@mock.patch('os.chmod')
@mock.patch('pyroute2.IPRoute', create=True)
@mock.patch('pyroute2.NetNS', create=True)
@mock.patch('subprocess.check_output')
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'plug.Plug._netns_interface_exists')
@mock.patch('os.path.isfile')
def _test_plug_network(self, distro, mock_isfile, mock_int_exists,
mock_check_output, mock_netns, mock_pyroute2,
mock_os_chmod):
mock_ipr = mock.MagicMock()
mock_ipr_instance = mock.MagicMock()
mock_ipr_instance.link_lookup.side_effect = [
[], [], [33], [33], [33], [33], [33], [33], [33], [33]]
mock_ipr_instance.get_links.return_value = ({
'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},)
mock_ipr.__enter__.return_value = mock_ipr_instance
mock_pyroute2.return_value = mock_ipr
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
port_info = {'mac_address': '123'}
test_int_num = random.randint(0, 9999)
mock_int_exists.return_value = False
netns_handle = mock_netns.return_value.__enter__.return_value
netns_handle.get_links.return_value = [0] * test_int_num
mock_isfile.return_value = True
test_int_num = str(test_int_num)
# Interface already plugged
mock_int_exists.return_value = True
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(409, rv.status_code)
self.assertEqual(dict(message="Interface already exists"),
jsonutils.loads(rv.data.decode('utf-8')))
mock_int_exists.return_value = False
# No interface at all
file_name = '/sys/bus/pci/rescan'
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
mock_open.assert_called_with(file_name, os.O_WRONLY)
mock_fdopen.assert_called_with(123, 'w')
m().write.assert_called_once_with('1')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
jsonutils.loads(rv.data.decode('utf-8')))
# No interface down
m().reset_mock()
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
mock_open.assert_called_with(file_name, os.O_WRONLY)
mock_fdopen.assert_called_with(123, 'w')
m().write.assert_called_once_with('1')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
jsonutils.loads(rv.data.decode('utf-8')))
# One Interface down, Happy Path
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
else:
file_name = ('/etc/octavia/interfaces/'
'eth{}.json'.format(test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
expected_dict = {
consts.NAME: "eth{}".format(test_int_num),
consts.ADDRESSES: [
{
consts.DHCP: True,
consts.IPV6AUTO: True
}
],
consts.ROUTES: [
],
consts.RULES: [
],
consts.SCRIPTS: {
consts.IFACE_UP: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh add ipv4 "
"eth{}".format(test_int_num))
}, {
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh add ipv6 "
"eth{}".format(test_int_num))
}],
consts.IFACE_DOWN: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh delete ipv4 "
"eth{}".format(test_int_num))
}, {
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh delete ipv6 "
"eth{}".format(test_int_num))
}]
}
}
mock_dump.assert_called_once()
args = mock_dump.mock_calls[0][1]
test_utils.assert_interface_files_equal(
self, args[0], expected_dict)
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'amphora-interface', 'up', 'eth' + test_int_num], stderr=-2)
# fixed IPs happy path
port_info = {'mac_address': '123', 'mtu': 1450, 'fixed_ips': [
{'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]}
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
else:
file_name = ('/etc/octavia/interfaces/'
'eth{}.json'.format(test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
expected_dict = {
consts.NAME: "eth{}".format(test_int_num),
consts.MTU: 1450,
consts.ADDRESSES: [
{consts.ADDRESS: '10.0.0.5', consts.PREFIXLEN: 24}
],
consts.ROUTES: [],
consts.RULES: [],
consts.SCRIPTS: {
consts.IFACE_UP: [
{consts.COMMAND:
'/usr/local/bin/lvs-masquerade.sh add ipv4 '
'eth{}'.format(test_int_num)}],
consts.IFACE_DOWN: [
{consts.COMMAND:
'/usr/local/bin/lvs-masquerade.sh delete ipv4 '
'eth{}'.format(test_int_num)}]
}
}
mock_dump.assert_called_once()
args = mock_dump.mock_calls[0][1]
test_utils.assert_interface_files_equal(
self, args[0], expected_dict)
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'amphora-interface', 'up', 'eth' + test_int_num], stderr=-2)
# fixed IPs happy path IPv6
port_info = {'mac_address': '123', 'mtu': 1450, 'fixed_ips': [
{'ip_address': '2001:db8::2', 'subnet_cidr': '2001:db8::/32'}]}
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
else:
file_name = ('/etc/octavia/interfaces/'
'eth{}.json'.format(test_int_num))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
expected_dict = {
consts.NAME: "eth{}".format(test_int_num),
consts.MTU: 1450,
consts.ADDRESSES: [
{consts.ADDRESS: '2001:0db8::2',
consts.PREFIXLEN: 32}],
consts.ROUTES: [],
consts.RULES: [],
consts.SCRIPTS: {
consts.IFACE_UP: [
{consts.COMMAND:
'/usr/local/bin/lvs-masquerade.sh add ipv6 '
'eth{}'.format(test_int_num)}],
consts.IFACE_DOWN: [
{consts.COMMAND:
'/usr/local/bin/lvs-masquerade.sh delete ipv6 '
'eth{}'.format(test_int_num)}]
}
}
mock_dump.assert_called_once()
args = mock_dump.mock_calls[0][1]
test_utils.assert_interface_files_equal(
self, args[0], expected_dict)
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'amphora-interface', 'up', 'eth' + test_int_num], stderr=-2)
# fixed IPs, bogus IP
port_info = {'mac_address': '123', 'fixed_ips': [
{'ip_address': '10005', 'subnet_cidr': '10.0.0.0/24'}]}
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
file_name = ('/etc/octavia/interfaces/'
'eth{}.json'.format(test_int_num))
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(400, rv.status_code)
# same as above but ifup fails
port_info = {'mac_address': '123', 'fixed_ips': [
{'ip_address': '10.0.0.5', 'subnet_cidr': '10.0.0.0/24'}]}
mock_check_output.side_effect = [subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR), subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)]
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(500, rv.status_code)
self.assertEqual(
{'details': RANDOM_ERROR.decode('utf-8'),
'message': 'Error plugging network'},
jsonutils.loads(rv.data.decode('utf-8')))
# Bad port_info tests
port_info = 'Bad data'
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(400, rv.status_code)
port_info = {'fixed_ips': [{'ip_address': '10.0.0.5',
'subnet_cidr': '10.0.0.0/24'}]}
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(400, rv.status_code)
def test_ubuntu_plug_network_host_routes(self):
self._test_plug_network_host_routes(consts.UBUNTU)
self.conf.config(group="amphora_agent",
agent_server_network_file="/path/to/interfaces_file")
def test_centos_plug_network_host_routes(self):
self._test_plug_network_host_routes(consts.CENTOS)
@mock.patch('os.chmod')
@mock.patch('pyroute2.IPRoute', create=True)
@mock.patch('pyroute2.NetNS', create=True)
@mock.patch('subprocess.check_output')
def _test_plug_network_host_routes(self, distro, mock_check_output,
mock_netns, mock_pyroute2,
mock_os_chmod):
mock_ipr = mock.MagicMock()
mock_ipr_instance = mock.MagicMock()
mock_ipr_instance.link_lookup.return_value = [33]
mock_ipr_instance.get_links.return_value = ({
'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},)
mock_ipr.__enter__.return_value = mock_ipr_instance
mock_pyroute2.return_value = mock_ipr
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
SUBNET_CIDR = '192.0.2.0/24'
PREFIXLEN = 24
IP = '192.0.1.5'
MAC = '123'
DEST1 = '198.51.100.0/24'
DEST2 = '203.0.113.1/32'
NEXTHOP = '192.0.2.1'
netns_handle = mock_netns.return_value.__enter__.return_value
netns_handle.get_links.return_value = [{
'attrs': [['IFLA_IFNAME', consts.NETNS_PRIMARY_INTERFACE]]}]
port_info = {'mac_address': MAC, 'mtu': 1450, 'fixed_ips': [
{'ip_address': IP, 'subnet_cidr': SUBNET_CIDR,
'host_routes': [{'destination': DEST1, 'nexthop': NEXTHOP},
{'destination': DEST2, 'nexthop': NEXTHOP}]}]}
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
file_name = '/etc/octavia/interfaces/{}.json'.format(
consts.NETNS_PRIMARY_INTERFACE)
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/network",
content_type='application/json',
data=jsonutils.dumps(port_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
expected_dict = {
consts.NAME: consts.NETNS_PRIMARY_INTERFACE,
consts.MTU: 1450,
consts.ADDRESSES: [
{
consts.ADDRESS: IP,
consts.PREFIXLEN: PREFIXLEN
}
],
consts.ROUTES: [
{
consts.DST: DEST1,
consts.GATEWAY: NEXTHOP
}, {
consts.DST: DEST2,
consts.GATEWAY: NEXTHOP
}
],
consts.RULES: [
],
consts.SCRIPTS: {
consts.IFACE_UP: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh add ipv4 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}],
consts.IFACE_DOWN: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh delete ipv4 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}]
}
}
mock_dump.assert_called_once()
args = mock_dump.mock_calls[0][1]
test_utils.assert_interface_files_equal(
self, args[0], expected_dict)
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'amphora-interface', 'up',
consts.NETNS_PRIMARY_INTERFACE], stderr=-2)
def test_ubuntu_plug_VIP4(self):
self._test_plug_VIP4(consts.UBUNTU)
self._test_plug_VIP4(consts.CENTOS)
@mock.patch('os.chmod')
@mock.patch('shutil.copy2')
@mock.patch('pyroute2.NSPopen', create=True)
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'plug.Plug._netns_interface_exists')
@mock.patch('pyroute2.IPRoute', create=True)
@mock.patch('pyroute2.netns.create', create=True)
@mock.patch('pyroute2.NetNS', create=True)
@mock.patch('subprocess.check_output')
@mock.patch('shutil.copytree')
@mock.patch('os.makedirs')
@mock.patch('os.path.isfile')
def _test_plug_VIP4(self, distro, mock_isfile, mock_makedirs,
mock_copytree, mock_check_output, mock_netns,
mock_netns_create, mock_pyroute2, mock_int_exists,
mock_nspopen, mock_copy2, mock_os_chmod):
mock_ipr = mock.MagicMock()
mock_ipr_instance = mock.MagicMock()
mock_ipr_instance.link_lookup.side_effect = [[], [], [33], [33], [33],
[33], [33], [33]]
mock_ipr_instance.get_links.return_value = ({
'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},)
mock_ipr.__enter__.return_value = mock_ipr_instance
mock_pyroute2.return_value = mock_ipr
mock_isfile.return_value = True
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
subnet_info = {
'subnet_cidr': '203.0.113.0/24',
'gateway': '203.0.113.1',
'mac_address': '123'
}
# malformed ip
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=jsonutils.dumps(subnet_info),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=jsonutils.dumps(subnet_info),
content_type='application/json')
self.assertEqual(400, rv.status_code)
# No subnet info
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
'/plug/vip/error')
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
'/plug/vip/error')
self.assertEqual(400, rv.status_code)
# Interface already plugged
mock_int_exists.return_value = True
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
self.assertEqual(409, rv.status_code)
self.assertEqual(dict(message="Interface already exists"),
jsonutils.loads(rv.data.decode('utf-8')))
mock_int_exists.return_value = False
# No interface at all
file_name = '/sys/bus/pci/rescan'
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
mock_open.assert_called_with(file_name, os.O_WRONLY)
mock_fdopen.assert_called_with(123, 'w')
m().write.assert_called_once_with('1')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
jsonutils.loads(rv.data.decode('utf-8')))
# Two interfaces down
m().reset_mock()
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
mock_open.assert_called_with(file_name, os.O_WRONLY)
mock_fdopen.assert_called_with(123, 'w')
m().write.assert_called_once_with('1')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
jsonutils.loads(rv.data.decode('utf-8')))
# Happy Path IPv4, with VRRP_IP and host route
full_subnet_info = {
'subnet_cidr': '203.0.113.0/24',
'gateway': '203.0.113.1',
'mac_address': '123',
'vrrp_ip': '203.0.113.4',
'mtu': 1450,
'host_routes': [{'destination': '203.0.114.0/24',
'nexthop': '203.0.113.5'},
{'destination': '203.0.115.1/32',
'nexthop': '203.0.113.5'}]
}
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
else:
file_name = ('/etc/octavia/interfaces/{netns_int}.json'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(
full_subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(
full_subnet_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
expected_dict = {
consts.NAME: consts.NETNS_PRIMARY_INTERFACE,
consts.MTU: 1450,
consts.ADDRESSES: [
{
consts.ADDRESS: "203.0.113.4",
consts.PREFIXLEN: 24
}, {
consts.ADDRESS: "203.0.113.2",
consts.PREFIXLEN: 24
}
],
consts.ROUTES: [
{
consts.DST: '0.0.0.0/0',
consts.GATEWAY: '203.0.113.1',
consts.FLAGS: [consts.ONLINK]
}, {
consts.DST: '0.0.0.0/0',
consts.GATEWAY: '203.0.113.1',
consts.TABLE: 1,
consts.FLAGS: [consts.ONLINK]
}, {
consts.DST: '203.0.113.0/24',
consts.PREFSRC: '203.0.113.2',
consts.SCOPE: 'link',
consts.TABLE: 1
}, {
consts.DST: '203.0.114.0/24',
consts.GATEWAY: '203.0.113.5'
}, {
consts.DST: '203.0.115.1/32',
consts.GATEWAY: '203.0.113.5'
}, {
consts.DST: '203.0.114.0/24',
consts.GATEWAY: '203.0.113.5',
consts.TABLE: 1
}, {
consts.DST: '203.0.115.1/32',
consts.GATEWAY: '203.0.113.5',
consts.TABLE: 1
}
],
consts.RULES: [
{
consts.SRC: '203.0.113.2',
consts.SRC_LEN: 32,
consts.TABLE: 1
}
],
consts.SCRIPTS: {
consts.IFACE_UP: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh add ipv4 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}],
consts.IFACE_DOWN: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh delete ipv4 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}]
}
}
mock_dump.assert_called_once()
args = mock_dump.mock_calls[0][1]
test_utils.assert_interface_files_equal(
self, args[0], expected_dict)
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'amphora-interface', 'up',
consts.NETNS_PRIMARY_INTERFACE], stderr=-2)
# Verify sysctl was loaded
calls = [mock.call('amphora-haproxy', ['/sbin/sysctl', '--system'],
stdout=subprocess.PIPE),
mock.call('amphora-haproxy', ['modprobe', 'ip_vs'],
stdout=subprocess.PIPE),
mock.call('amphora-haproxy',
['/sbin/sysctl', '-w', 'net.ipv4.ip_forward=1'],
stdout=subprocess.PIPE),
mock.call('amphora-haproxy',
['/sbin/sysctl', '-w', 'net.ipv4.vs.conntrack=1'],
stdout=subprocess.PIPE)]
mock_nspopen.assert_has_calls(calls, any_order=True)
# One Interface down, Happy Path IPv4
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if self.conf.conf.amphora_agent.agent_server_network_file:
file_name = self.conf.conf.amphora_agent.agent_server_network_file
flags = os.O_WRONLY | os.O_CREAT | os.O_APPEND
else:
file_name = ('/etc/octavia/interfaces/'
'{}.json'.format(consts.NETNS_PRIMARY_INTERFACE))
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
expected_dict = {
consts.NAME: consts.NETNS_PRIMARY_INTERFACE,
consts.ADDRESSES: [
{
consts.DHCP: True
}, {
consts.ADDRESS: "203.0.113.2",
consts.PREFIXLEN: 24
}
],
consts.ROUTES: [
{
consts.DST: '0.0.0.0/0',
consts.GATEWAY: '203.0.113.1',
consts.FLAGS: [consts.ONLINK]
}, {
consts.DST: '0.0.0.0/0',
consts.GATEWAY: '203.0.113.1',
consts.FLAGS: [consts.ONLINK],
consts.TABLE: 1
}, {
consts.DST: '203.0.113.0/24',
consts.PREFSRC: '203.0.113.2',
consts.SCOPE: 'link',
consts.TABLE: 1
}
],
consts.RULES: [
{
consts.SRC: '203.0.113.2',
consts.SRC_LEN: 32,
consts.TABLE: 1
}
],
consts.SCRIPTS: {
consts.IFACE_UP: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh add ipv4 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}],
consts.IFACE_DOWN: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh delete ipv4 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}]
}
}
mock_dump.assert_called_once()
args = mock_dump.mock_calls[0][1]
test_utils.assert_interface_files_equal(
self, args[0], expected_dict)
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'amphora-interface', 'up',
consts.NETNS_PRIMARY_INTERFACE], stderr=-2)
mock_check_output.side_effect = [
'unplug1',
subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR), subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)]
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/203.0.113.2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
self.assertEqual(500, rv.status_code)
self.assertEqual(
{'details': RANDOM_ERROR.decode('utf-8'),
'message': 'Error plugging VIP'},
jsonutils.loads(rv.data.decode('utf-8')))
def test_ubuntu_plug_VIP6(self):
self._test_plug_vip6(consts.UBUNTU)
def test_centos_plug_VIP6(self):
self._test_plug_vip6(consts.CENTOS)
@mock.patch('os.chmod')
@mock.patch('shutil.copy2')
@mock.patch('pyroute2.NSPopen', create=True)
@mock.patch('pyroute2.IPRoute', create=True)
@mock.patch('pyroute2.netns.create', create=True)
@mock.patch('pyroute2.NetNS', create=True)
@mock.patch('subprocess.check_output')
@mock.patch('shutil.copytree')
@mock.patch('os.makedirs')
@mock.patch('os.path.isfile')
def _test_plug_vip6(self, distro, mock_isfile, mock_makedirs,
mock_copytree, mock_check_output, mock_netns,
mock_netns_create, mock_pyroute2, mock_nspopen,
mock_copy2, mock_os_chmod):
mock_ipr = mock.MagicMock()
mock_ipr_instance = mock.MagicMock()
mock_ipr_instance.link_lookup.side_effect = [[], [], [33], [33], [33],
[33], [33], [33]]
mock_ipr_instance.get_links.return_value = ({
'attrs': [('IFLA_IFNAME', FAKE_INTERFACE)]},)
mock_ipr.__enter__.return_value = mock_ipr_instance
mock_pyroute2.return_value = mock_ipr
mock_isfile.return_value = True
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
subnet_info = {
'subnet_cidr': '2001:db8::/32',
'gateway': '2001:db8::1',
'mac_address': '123'
}
# malformed ip
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=jsonutils.dumps(
subnet_info),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=jsonutils.dumps(
subnet_info),
content_type='application/json')
self.assertEqual(400, rv.status_code)
# No subnet info
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=jsonutils.dumps(subnet_info),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
'/plug/vip/error',
data=jsonutils.dumps(subnet_info),
content_type='application/json')
self.assertEqual(400, rv.status_code)
# No interface at all
file_name = '/sys/bus/pci/rescan'
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
mock_open.assert_called_with(file_name, os.O_WRONLY)
mock_fdopen.assert_called_with(123, 'w')
m().write.assert_called_once_with('1')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
jsonutils.loads(rv.data.decode('utf-8')))
# Two interfaces down
m().reset_mock()
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
mock_open.assert_called_with(file_name, os.O_WRONLY)
mock_fdopen.assert_called_with(123, 'w')
m().write.assert_called_once_with('1')
self.assertEqual(404, rv.status_code)
self.assertEqual(dict(details="No suitable network interface found"),
jsonutils.loads(rv.data.decode('utf-8')))
# Happy Path IPv6, with VRRP_IP and host route
full_subnet_info = {
'subnet_cidr': '2001:db8::/32',
'gateway': '2001:db8::1',
'mac_address': '123',
'vrrp_ip': '2001:db8::4',
'mtu': 1450,
'host_routes': [{'destination': '2001:db9::/32',
'nexthop': '2001:db8::5'},
{'destination': '2001:db9::1/128',
'nexthop': '2001:db8::5'}]
}
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
file_name = ('/etc/octavia/interfaces/{netns_int}.json'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(
full_subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(
full_subnet_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
expected_dict = {
consts.NAME: consts.NETNS_PRIMARY_INTERFACE,
consts.MTU: 1450,
consts.ADDRESSES: [
{
consts.ADDRESS: '2001:db8::4',
consts.PREFIXLEN: 32
}, {
consts.ADDRESS: '2001:0db8::2',
consts.PREFIXLEN: 32
}
],
consts.ROUTES: [
{
consts.DST: '::/0',
consts.GATEWAY: '2001:db8::1',
consts.FLAGS: [consts.ONLINK]
}, {
consts.DST: '::/0',
consts.GATEWAY: '2001:db8::1',
consts.FLAGS: [consts.ONLINK],
consts.TABLE: 1
}, {
consts.DST: '2001:0db8::/32',
consts.PREFSRC: '2001:0db8::2',
consts.SCOPE: 'link',
consts.TABLE: 1
}, {
consts.DST: '2001:db9::/32',
consts.GATEWAY: '2001:db8::5'
}, {
consts.DST: '2001:db9::1/128',
consts.GATEWAY: '2001:db8::5'
}, {
consts.DST: '2001:db9::/32',
consts.GATEWAY: '2001:db8::5',
consts.TABLE: 1
}, {
consts.DST: '2001:db9::1/128',
consts.GATEWAY: '2001:db8::5',
consts.TABLE: 1
}
],
consts.RULES: [
{
consts.SRC: '2001:0db8::2',
consts.SRC_LEN: 128,
consts.TABLE: 1
}
],
consts.SCRIPTS: {
consts.IFACE_UP: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh add ipv6 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}],
consts.IFACE_DOWN: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh delete ipv6 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}]
}
}
mock_dump.assert_called_once()
args = mock_dump.mock_calls[0][1]
test_utils.assert_interface_files_equal(
self, args[0], expected_dict)
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'amphora-interface', 'up', '{netns_int}'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2)
# Verify sysctl was loaded
calls = [mock.call('amphora-haproxy', ['/sbin/sysctl', '--system'],
stdout=subprocess.PIPE),
mock.call('amphora-haproxy', ['modprobe', 'ip_vs'],
stdout=subprocess.PIPE),
mock.call('amphora-haproxy',
['/sbin/sysctl', '-w',
'net.ipv6.conf.all.forwarding=1'],
stdout=subprocess.PIPE),
mock.call('amphora-haproxy',
['/sbin/sysctl', '-w', 'net.ipv4.vs.conntrack=1'],
stdout=subprocess.PIPE)]
mock_nspopen.assert_has_calls(calls, any_order=True)
# One Interface down, Happy Path IPv6
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
file_name = ('/etc/octavia/interfaces/{netns_int}.json'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE))
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen, mock.patch(
'octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.dump') as mock_dump:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
self.assertEqual(202, rv.status_code)
mock_open.assert_any_call(file_name, flags, mode)
mock_fdopen.assert_any_call(123, 'w')
plug_inf_file = '/var/lib/octavia/plugged_interfaces'
flags = os.O_RDWR | os.O_CREAT
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_any_call(plug_inf_file, flags, mode)
mock_fdopen.assert_any_call(123, 'r+')
expected_dict = {
consts.NAME: consts.NETNS_PRIMARY_INTERFACE,
consts.MTU: None,
consts.ADDRESSES: [
{
consts.IPV6AUTO: True
},
{
consts.ADDRESS: '2001:db8::2',
consts.PREFIXLEN: 32
}
],
consts.ROUTES: [
{
consts.DST: '::/0',
consts.GATEWAY: '2001:db8::1',
consts.FLAGS: [consts.ONLINK]
}, {
consts.DST: '::/0',
consts.GATEWAY: '2001:db8::1',
consts.FLAGS: [consts.ONLINK],
consts.TABLE: 1
}, {
consts.DST: '2001:db8::/32',
consts.PREFSRC: '2001:db8::2',
consts.SCOPE: 'link',
consts.TABLE: 1
}
],
consts.RULES: [
{
consts.SRC: '2001:db8::2',
consts.SRC_LEN: 128,
consts.TABLE: 1
}
],
consts.SCRIPTS: {
consts.IFACE_UP: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh add ipv6 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}],
consts.IFACE_DOWN: [{
consts.COMMAND: (
"/usr/local/bin/lvs-masquerade.sh delete ipv6 "
"{}".format(consts.NETNS_PRIMARY_INTERFACE))
}]
}
}
mock_dump.assert_called_once()
args = mock_dump.mock_calls[0][1]
test_utils.assert_interface_files_equal(
self, args[0], expected_dict)
mock_check_output.assert_called_with(
['ip', 'netns', 'exec', consts.AMPHORA_NAMESPACE,
'amphora-interface', 'up', '{netns_int}'.format(
netns_int=consts.NETNS_PRIMARY_INTERFACE)], stderr=-2)
mock_check_output.side_effect = [
'unplug1',
subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR), subprocess.CalledProcessError(
7, 'test', RANDOM_ERROR)]
m = self.useFixture(test_utils.OpenFixture(file_name)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
elif distro == consts.CENTOS:
rv = self.centos_app.post('/' + api_server.VERSION +
"/plug/vip/2001:db8::2",
content_type='application/json',
data=jsonutils.dumps(subnet_info))
self.assertEqual(500, rv.status_code)
self.assertEqual(
{'details': RANDOM_ERROR.decode('utf-8'),
'message': 'Error plugging VIP'},
jsonutils.loads(rv.data.decode('utf-8')))
def test_ubuntu_get_interface(self):
self._test_get_interface(consts.UBUNTU)
def test_centos_get_interface(self):
self._test_get_interface(consts.CENTOS)
@mock.patch('pyroute2.NetNS', create=True)
def _test_get_interface(self, distro, mock_netns):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
netns_handle = mock_netns.return_value.__enter__.return_value
interface_res = {'interface': 'eth0'}
# Happy path
netns_handle.get_addr.return_value = [{
'index': 3, 'family': socket.AF_INET,
'attrs': [['IFA_ADDRESS', '203.0.113.2']]}]
netns_handle.get_links.return_value = [{
'attrs': [['IFLA_IFNAME', 'eth0']]}]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/interface/203.0.113.2',
data=jsonutils.dumps(interface_res),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/interface/203.0.113.2',
data=jsonutils.dumps(interface_res),
content_type='application/json')
self.assertEqual(200, rv.status_code)
# Happy path with IPv6 address normalization
netns_handle.get_addr.return_value = [{
'index': 3, 'family': socket.AF_INET6,
'attrs': [['IFA_ADDRESS',
'0000:0000:0000:0000:0000:0000:0000:0001']]}]
netns_handle.get_links.return_value = [{
'attrs': [['IFLA_IFNAME', 'eth0']]}]
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/interface/::1',
data=jsonutils.dumps(interface_res),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/interface/::1',
data=jsonutils.dumps(interface_res),
content_type='application/json')
self.assertEqual(200, rv.status_code)
# Nonexistent interface
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/interface/10.0.0.1',
data=jsonutils.dumps(interface_res),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/interface/10.0.0.1',
data=jsonutils.dumps(interface_res),
content_type='application/json')
self.assertEqual(404, rv.status_code)
# Invalid IP address
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION +
'/interface/00:00:00:00:00:00',
data=jsonutils.dumps(interface_res),
content_type='application/json')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION +
'/interface/00:00:00:00:00:00',
data=jsonutils.dumps(interface_res),
content_type='application/json')
self.assertEqual(400, rv.status_code)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_ubuntu_upload_keepalived_config_systemd(self, mock_init_system):
with mock.patch('distro.id', return_value='ubuntu'):
self._test_upload_keepalived_config(
consts.INIT_SYSTEMD, consts.UBUNTU, mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSTEMD)
def test_centos_upload_keepalived_config_systemd(self, mock_init_system):
with mock.patch('distro.id', return_value='centos'):
self._test_upload_keepalived_config(
consts.INIT_SYSTEMD, consts.CENTOS, mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_UPSTART)
def test_ubuntu_upload_keepalived_config_upstart(self, mock_init_system):
self._test_upload_keepalived_config(consts.INIT_UPSTART,
consts.UBUNTU, mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_os_init_system', return_value=consts.INIT_SYSVINIT)
def test_ubuntu_upload_keepalived_config_sysvinit(self, mock_init_system):
self._test_upload_keepalived_config(consts.INIT_SYSVINIT,
consts.UBUNTU, mock_init_system)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'vrrp_check_script_update')
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
@mock.patch('os.rename')
@mock.patch('subprocess.check_output')
@mock.patch('os.remove')
def _test_upload_keepalived_config(self, init_system, distro,
mock_init_system, mock_remove,
mock_subprocess, mock_rename,
mock_makedirs, mock_exists,
mock_vrrp_check):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
mock_exists.return_value = True
cfg_path = util.keepalived_cfg_path()
m = self.useFixture(test_utils.OpenFixture(cfg_path)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/vrrp/upload', data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/vrrp/upload', data='test')
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
mock_open.assert_called_with(cfg_path, flags, mode)
mock_fdopen.assert_called_with(123, 'wb')
self.assertEqual(200, rv.status_code)
mock_vrrp_check.assert_called_once_with(None,
consts.AMP_ACTION_START)
mock_exists.return_value = False
mock_vrrp_check.reset_mock()
script_path = util.keepalived_check_script_path()
m = self.useFixture(test_utils.OpenFixture(script_path)).mock_open
with mock.patch('os.open') as mock_open, mock.patch.object(
os, 'fdopen', m) as mock_fdopen:
mock_open.return_value = 123
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/vrrp/upload', data='test')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/vrrp/upload', data='test')
mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
mock_open.assert_called_with(script_path, flags, mode)
mock_fdopen.assert_called_with(123, 'w')
self.assertEqual(200, rv.status_code)
mock_vrrp_check.assert_called_once_with(None,
consts.AMP_ACTION_START)
def test_ubuntu_manage_service_vrrp(self):
self._test_manage_service_vrrp(consts.UBUNTU)
def test_centos_manage_service_vrrp(self):
self._test_manage_service_vrrp(consts.CENTOS)
@mock.patch('subprocess.check_output')
def _test_manage_service_vrrp(self, distro, mock_check_output):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/start')
self.assertEqual(202, rv.status_code)
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/vrrp/restart')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/vrrp/restart')
self.assertEqual(400, rv.status_code)
mock_check_output.side_effect = subprocess.CalledProcessError(1,
'blah!')
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION + '/vrrp/start')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION + '/vrrp/start')
self.assertEqual(500, rv.status_code)
def test_ubuntu_details(self):
self._test_details(consts.UBUNTU)
def test_centos_details(self):
self._test_details(consts.CENTOS)
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
'get_lvs_listeners',
return_value=[])
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'amphora_info.AmphoraInfo.'
'_get_extend_body_from_lvs_driver',
return_value={
"keepalived_version": '1.1.11-1',
"ipvsadm_version": '2.2.22-2'
})
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'amphora_info.AmphoraInfo.'
'_count_lvs_listener_processes', return_value=0)
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._count_haproxy_processes')
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._get_networks')
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._load')
@mock.patch('os.statvfs')
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._cpu')
@mock.patch('octavia.amphorae.backends.agent.api_server.amphora_info.'
'AmphoraInfo._get_meminfo')
@mock.patch('octavia.amphorae.backends.agent.api_server.'
'util.get_listeners')
@mock.patch('socket.gethostname')
@mock.patch('subprocess.check_output')
def _test_details(self, distro, mock_subbprocess, mock_hostname,
mock_get_listeners, mock_get_mem, mock_cpu,
mock_statvfs, mock_load, mock_get_nets,
mock_count_haproxy, mock_count_lvs_listeners,
mock_get_ext_from_lvs_driver, mock_get_lvs_listeners):
self.assertIn(distro, [consts.UBUNTU, consts.CENTOS])
listener_id = uuidutils.generate_uuid()
mock_get_listeners.return_value = [listener_id]
mock_hostname.side_effect = ['test-host']
mock_subbprocess.side_effect = ['9.9.99-9']
MemTotal = random.randrange(0, 1000)
MemFree = random.randrange(0, 1000)
Buffers = random.randrange(0, 1000)
Cached = random.randrange(0, 1000)
SwapCached = random.randrange(0, 1000)
Shmem = random.randrange(0, 1000)
Slab = random.randrange(0, 1000)
memory_dict = {'CmaFree': 0, 'Mapped': 38244, 'CommitLimit': 508048,
'MemFree': MemFree, 'AnonPages': 92384,
'DirectMap2M': 997376, 'SwapTotal': 0,
'NFS_Unstable': 0, 'SReclaimable': 34168,
'Writeback': 0, 'PageTables': 3760, 'Shmem': Shmem,
'Hugepagesize': 2048, 'MemAvailable': 738356,
'HardwareCorrupted': 0, 'SwapCached': SwapCached,
'Dirty': 80, 'Active': 237060, 'VmallocUsed': 0,
'Inactive(anon)': 2752, 'Slab': Slab, 'Cached': Cached,
'Inactive(file)': 149588, 'SUnreclaim': 17796,
'Mlocked': 3656, 'AnonHugePages': 6144, 'SwapFree': 0,
'Active(file)': 145512, 'CmaTotal': 0,
'Unevictable': 3656, 'KernelStack': 2368,
'Inactive': 152340, 'MemTotal': MemTotal, 'Bounce': 0,
'Committed_AS': 401884, 'Active(anon)': 91548,
'VmallocTotal': 34359738367, 'VmallocChunk': 0,
'DirectMap4k': 51072, 'WritebackTmp': 0,
'Buffers': Buffers}
mock_get_mem.return_value = memory_dict
cpu_total = random.randrange(0, 1000)
cpu_user = random.randrange(0, 1000)
cpu_system = random.randrange(0, 1000)
cpu_softirq = random.randrange(0, 1000)
cpu_dict = {'idle': '7168848', 'system': cpu_system,
'total': cpu_total, 'softirq': cpu_softirq, 'nice': '31',
'iowait': '902', 'user': cpu_user, 'irq': '0'}
mock_cpu.return_value = cpu_dict
f_blocks = random.randrange(0, 1000)
f_bfree = random.randrange(0, 1000)
f_frsize = random.randrange(0, 1000)
f_bavail = random.randrange(0, 1000)
stats = mock.MagicMock()
stats.f_blocks = f_blocks
stats.f_bfree = f_bfree
stats.f_frsize = f_frsize
stats.f_bavail = f_bavail
disk_used = (f_blocks - f_bfree) * f_frsize
disk_available = f_bavail * f_frsize
mock_statvfs.return_value = stats
load_1min = random.randrange(0, 10)
load_5min = random.randrange(0, 10)
load_15min = random.randrange(0, 10)
mock_load.return_value = [load_1min, load_5min, load_15min]
eth1_rx = random.randrange(0, 1000)
eth1_tx = random.randrange(0, 1000)
eth2_rx = random.randrange(0, 1000)
eth2_tx = random.randrange(0, 1000)
eth3_rx = random.randrange(0, 1000)
eth3_tx = random.randrange(0, 1000)
net_dict = {'eth2': {'network_rx': eth2_rx, 'network_tx': eth2_tx},
'eth1': {'network_rx': eth1_rx, 'network_tx': eth1_tx},
'eth3': {'network_rx': eth3_rx, 'network_tx': eth3_tx}}
mock_get_nets.return_value = net_dict
haproxy_count = random.randrange(0, 100)
mock_count_haproxy.return_value = haproxy_count
expected_dict = {'active': True, 'api_version': '1.0',
'cpu': {'soft_irq': cpu_softirq, 'system': cpu_system,
'total': cpu_total, 'user': cpu_user},
'disk': {'available': disk_available,
'used': disk_used},
'haproxy_count': haproxy_count,
'haproxy_version': '9.9.99-9',
'hostname': 'test-host',
'ipvsadm_version': u'2.2.22-2',
'keepalived_version': u'1.1.11-1',
'listeners': [listener_id],
'load': [load_1min, load_5min, load_15min],
'memory': {'buffers': Buffers,
'cached': Cached,
'free': MemFree,
'shared': Shmem,
'slab': Slab,
'swap_used': SwapCached,
'total': MemTotal},
'networks': {'eth1': {'network_rx': eth1_rx,
'network_tx': eth1_tx},
'eth2': {'network_rx': eth2_rx,
'network_tx': eth2_tx},
'eth3': {'network_rx': eth3_rx,
'network_tx': eth3_tx}},
'packages': {},
'topology': consts.TOPOLOGY_SINGLE,
'topology_status': consts.TOPOLOGY_STATUS_OK,
'lvs_listener_process_count': 0}
if distro == consts.UBUNTU:
rv = self.ubuntu_app.get('/' + api_server.VERSION + '/details')
elif distro == consts.CENTOS:
rv = self.centos_app.get('/' + api_server.VERSION + '/details')
self.assertEqual(200, rv.status_code)
self.assertEqual(expected_dict,
jsonutils.loads(rv.data.decode('utf-8')))
def test_ubuntu_upload_config(self):
self._test_upload_config(consts.UBUNTU)
def test_centos_upload_config(self):
self._test_upload_config(consts.CENTOS)
@mock.patch('oslo_config.cfg.CONF.mutate_config_files')
def _test_upload_config(self, distro, mock_mutate):
server.BUFFER = 5 # test the while loop
m = self.useFixture(
test_utils.OpenFixture(AMP_AGENT_CONF_PATH)).mock_open
with mock.patch('os.open'), mock.patch.object(os, 'fdopen', m):
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/config', data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/config', data='TestTest')
self.assertEqual(202, rv.status_code)
self.assertEqual(OK, jsonutils.loads(rv.data.decode('utf-8')))
handle = m()
handle.write.assert_any_call(octavia_utils.b('TestT'))
handle.write.assert_any_call(octavia_utils.b('est'))
mock_mutate.assert_called_once_with()
# Test the exception handling
mock_mutate.side_effect = Exception('boom')
if distro == consts.UBUNTU:
rv = self.ubuntu_app.put('/' + api_server.VERSION +
'/config', data='TestTest')
elif distro == consts.CENTOS:
rv = self.centos_app.put('/' + api_server.VERSION +
'/config', data='TestTest')
self.assertEqual(500, rv.status_code)
def test_version_discovery(self):
with mock.patch('distro.id', return_value='ubuntu'), mock.patch(
'octavia.amphorae.backends.agent.api_server.plug.'
'Plug.plug_lo'):
self.test_client = server.Server().app.test_client()
expected_dict = {'api_version': api_server.VERSION}
rv = self.test_client.get('/')
self.assertEqual(200, rv.status_code)
self.assertEqual(expected_dict,
jsonutils.loads(rv.data.decode('utf-8')))
| 47.378707
| 79
| 0.525833
| 12,967
| 124,606
| 4.814606
| 0.047814
| 0.027679
| 0.037674
| 0.024027
| 0.894395
| 0.87314
| 0.852061
| 0.838157
| 0.818472
| 0.796656
| 0
| 0.029802
| 0.362599
| 124,606
| 2,629
| 80
| 47.396729
| 0.756245
| 0.01654
| 0
| 0.769803
| 0
| 0
| 0.140437
| 0.059114
| 0
| 0
| 0
| 0
| 0.124289
| 1
| 0.029322
| false
| 0
| 0.008753
| 0
| 0.03895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b94c39e73f354ee0844306b6fdeda5242c9c0e88
| 110
|
py
|
Python
|
venv/lib/python3.6/site-packages/murmurhash/__init__.py
|
lumierra/project-flask
|
6e27148299a283c92f5d758d269f3b5fc6e2163e
|
[
"MIT"
] | 1
|
2018-10-30T07:19:27.000Z
|
2018-10-30T07:19:27.000Z
|
venv/lib/python3.6/site-packages/murmurhash/__init__.py
|
lumierra/project-flask
|
6e27148299a283c92f5d758d269f3b5fc6e2163e
|
[
"MIT"
] | 4
|
2020-07-26T02:10:42.000Z
|
2021-03-31T18:48:58.000Z
|
venv/lib/python3.6/site-packages/murmurhash/__init__.py
|
lumierra/project-flask
|
6e27148299a283c92f5d758d269f3b5fc6e2163e
|
[
"MIT"
] | 1
|
2020-07-25T23:57:23.000Z
|
2020-07-25T23:57:23.000Z
|
import os
def get_include():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'include')
| 18.333333
| 78
| 0.718182
| 17
| 110
| 4.352941
| 0.647059
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118182
| 110
| 5
| 79
| 22
| 0.762887
| 0
| 0
| 0
| 0
| 0
| 0.063636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
b96e4f1957e74e86d054c04b12fb3239033886b0
| 3,369
|
py
|
Python
|
display_tools.py
|
sorix6/CarND-Vehicle-Detection
|
49c51f0872f5f390feef2c13d04832f541ec76ec
|
[
"MIT"
] | null | null | null |
display_tools.py
|
sorix6/CarND-Vehicle-Detection
|
49c51f0872f5f390feef2c13d04832f541ec76ec
|
[
"MIT"
] | null | null | null |
display_tools.py
|
sorix6/CarND-Vehicle-Detection
|
49c51f0872f5f390feef2c13d04832f541ec76ec
|
[
"MIT"
] | null | null | null |
import cv2
from tools import get_hog_features
from random import randint
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def displayDifferentHOG(veh_images, nonveh_images):
rand_indexes = []
for i in range(10):
rand_indexes.append(randint(0, len(veh_images)))
for i in range(len(rand_indexes)):
vehicle = mpimg.imread(veh_images[rand_indexes[i]])
#non_vehicle = mpimg.imread(nonveh_images[rand_indexes[i]])
gray = cv2.cvtColor(vehicle, cv2.COLOR_RGB2GRAY)
# Call our function with vis=True to see an image output
features_or9, hog_image_or9 = get_hog_features(gray, orient= 9,
pix_per_cell= 8, cell_per_block= 2,
vis=True, feature_vec=False)
features_or11, hog_image_or11 = get_hog_features(gray, orient= 11,
pix_per_cell= 8, cell_per_block= 2,
vis=True, feature_vec=False)
features_or9_pix16, hog_image_or9_pix16 = get_hog_features(gray, orient= 9,
pix_per_cell= 16, cell_per_block= 2,
vis=True, feature_vec=False)
features_or11_pix16, hog_image_or11_pix16 = get_hog_features(gray, orient= 11,
pix_per_cell= 16, cell_per_block= 2,
vis=True, feature_vec=False)
# Plot the examples
f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, figsize=(32,32))
ax1.imshow(vehicle, cmap='gray')
ax1.set_title('Example Car Image', fontsize=12)
ax2.imshow(hog_image_or9, cmap='gray')
ax2.set_title('HOG OR: 9, Pix_cell: 8', fontsize=12)
ax3.imshow(hog_image_or11, cmap='gray')
ax3.set_title('HOG OR: 11, Pix_cell: 8', fontsize=12)
ax4.imshow(hog_image_or9_pix16, cmap='gray')
ax4.set_title('HOG OR: 9, Pix_cell: 16', fontsize=12)
ax5.imshow(hog_image_or11_pix16, cmap='gray')
ax5.set_title('HOG OR: 11, Pix_cell: 16', fontsize=12)
rand_indexes = []
for i in range(10):
rand_indexes.append(randint(0, len(nonveh_images)))
for i in range(len(rand_indexes)):
nonvehicle = mpimg.imread(nonveh_images[rand_indexes[i]])
#non_vehicle = mpimg.imread(nonveh_images[rand_indexes[i]])
gray = cv2.cvtColor(nonvehicle, cv2.COLOR_RGB2GRAY)
# Call our function with vis=True to see an image output
features_or9, hog_image_or9 = get_hog_features(gray, orient= 9,
pix_per_cell= 8, cell_per_block= 2,
vis=True, feature_vec=False)
features_or11, hog_image_or11 = get_hog_features(gray, orient= 11,
pix_per_cell= 8, cell_per_block= 2,
vis=True, feature_vec=False)
features_or9_pix16, hog_image_or9_pix16 = get_hog_features(gray, orient= 9,
pix_per_cell= 16, cell_per_block= 2,
vis=True, feature_vec=False)
features_or11_pix16, hog_image_or11_pix16 = get_hog_features(gray, orient= 11,
pix_per_cell= 16, cell_per_block= 2,
vis=True, feature_vec=False)
# Plot the examples
f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, figsize=(32,32))
ax1.imshow(nonvehicle, cmap='gray')
ax1.set_title('Example Non Car Image', fontsize=12)
ax2.imshow(hog_image_or9, cmap='gray')
ax2.set_title('HOG OR: 9, Pix_cell: 8', fontsize=12)
ax3.imshow(hog_image_or11, cmap='gray')
ax3.set_title('HOG OR: 11, Pix_cell: 8', fontsize=12)
ax4.imshow(hog_image_or9_pix16, cmap='gray')
ax4.set_title('HOG OR: 9, Pix_cell: 16', fontsize=12)
ax5.imshow(hog_image_or11_pix16, cmap='gray')
ax5.set_title('HOG OR: 11, Pix_cell: 16', fontsize=12)
| 35.09375
| 81
| 0.711487
| 549
| 3,369
| 4.092896
| 0.15847
| 0.056965
| 0.056075
| 0.064085
| 0.894971
| 0.894971
| 0.871829
| 0.864263
| 0.836671
| 0.836671
| 0
| 0.068141
| 0.168002
| 3,369
| 96
| 82
| 35.09375
| 0.7335
| 0.077768
| 0
| 0.75
| 0
| 0
| 0.084462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015625
| false
| 0
| 0.078125
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b9b5f2d41170db5042e4a14deaf25cc4173a5782
| 128
|
py
|
Python
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_1/_mod0_1_1_0_1_0.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_1/_mod0_1_1_0_1_0.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_1/_mod0_1_1_0_1_0.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
name0_1_1_0_1_0_0 = None
name0_1_1_0_1_0_1 = None
name0_1_1_0_1_0_2 = None
name0_1_1_0_1_0_3 = None
name0_1_1_0_1_0_4 = None
| 14.222222
| 24
| 0.820313
| 40
| 128
| 1.875
| 0.175
| 0.266667
| 0.24
| 0.533333
| 0.88
| 0.88
| 0.746667
| 0
| 0
| 0
| 0
| 0.318182
| 0.140625
| 128
| 9
| 25
| 14.222222
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
b9c73da6c9f356e413d59c1b058ebe757d89a012
| 6,900
|
py
|
Python
|
cs3/auth/registry/v1beta1/registry_api_pb2_grpc.py
|
cs3org/python-cs3apis
|
33f84befa7c6009ce87fb7594128d26ff6e49bbd
|
[
"Apache-2.0"
] | 1
|
2020-12-17T14:39:57.000Z
|
2020-12-17T14:39:57.000Z
|
cs3/auth/registry/v1beta1/registry_api_pb2_grpc.py
|
cs3org/python-cs3apis
|
33f84befa7c6009ce87fb7594128d26ff6e49bbd
|
[
"Apache-2.0"
] | 1
|
2020-05-06T10:23:07.000Z
|
2020-05-12T09:07:08.000Z
|
cs3/auth/registry/v1beta1/registry_api_pb2_grpc.py
|
cs3org/python-cs3apis
|
33f84befa7c6009ce87fb7594128d26ff6e49bbd
|
[
"Apache-2.0"
] | 1
|
2020-05-05T09:24:54.000Z
|
2020-05-05T09:24:54.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from cs3.auth.registry.v1beta1 import registry_api_pb2 as cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2
class RegistryAPIStub(object):
"""Auth Registry API
The Auth Registry API is meant to as registry to obtain
information of available auth providers.
For example, to use OIDC or Kerberos for authentication.
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and
"OPTIONAL" in this document are to be interpreted as described in
RFC 2119.
The following are global requirements that apply to all methods:
Any method MUST return CODE_OK on a succesful operation.
Any method MAY return NOT_IMPLEMENTED.
Any method MAY return INTERNAL.
Any method MAY return UNKNOWN.
Any method MAY return UNAUTHENTICATED.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAuthProvider = channel.unary_unary(
'/cs3.auth.registry.v1beta1.RegistryAPI/GetAuthProvider',
request_serializer=cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.GetAuthProviderRequest.SerializeToString,
response_deserializer=cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.GetAuthProviderResponse.FromString,
)
self.ListAuthProviders = channel.unary_unary(
'/cs3.auth.registry.v1beta1.RegistryAPI/ListAuthProviders',
request_serializer=cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.ListAuthProvidersRequest.SerializeToString,
response_deserializer=cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.ListAuthProvidersResponse.FromString,
)
class RegistryAPIServicer(object):
"""Auth Registry API
The Auth Registry API is meant to as registry to obtain
information of available auth providers.
For example, to use OIDC or Kerberos for authentication.
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and
"OPTIONAL" in this document are to be interpreted as described in
RFC 2119.
The following are global requirements that apply to all methods:
Any method MUST return CODE_OK on a succesful operation.
Any method MAY return NOT_IMPLEMENTED.
Any method MAY return INTERNAL.
Any method MAY return UNKNOWN.
Any method MAY return UNAUTHENTICATED.
"""
def GetAuthProvider(self, request, context):
"""Returns the auth provider that is reponsible for the given
resource reference.
MUST return CODE_NOT_FOUND if the reference does not exist.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListAuthProviders(self, request, context):
"""Returns a list of the available auth providers known by this registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RegistryAPIServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAuthProvider': grpc.unary_unary_rpc_method_handler(
servicer.GetAuthProvider,
request_deserializer=cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.GetAuthProviderRequest.FromString,
response_serializer=cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.GetAuthProviderResponse.SerializeToString,
),
'ListAuthProviders': grpc.unary_unary_rpc_method_handler(
servicer.ListAuthProviders,
request_deserializer=cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.ListAuthProvidersRequest.FromString,
response_serializer=cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.ListAuthProvidersResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'cs3.auth.registry.v1beta1.RegistryAPI', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class RegistryAPI(object):
"""Auth Registry API
The Auth Registry API is meant to as registry to obtain
information of available auth providers.
For example, to use OIDC or Kerberos for authentication.
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and
"OPTIONAL" in this document are to be interpreted as described in
RFC 2119.
The following are global requirements that apply to all methods:
Any method MUST return CODE_OK on a succesful operation.
Any method MAY return NOT_IMPLEMENTED.
Any method MAY return INTERNAL.
Any method MAY return UNKNOWN.
Any method MAY return UNAUTHENTICATED.
"""
@staticmethod
def GetAuthProvider(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cs3.auth.registry.v1beta1.RegistryAPI/GetAuthProvider',
cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.GetAuthProviderRequest.SerializeToString,
cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.GetAuthProviderResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListAuthProviders(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cs3.auth.registry.v1beta1.RegistryAPI/ListAuthProviders',
cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.ListAuthProvidersRequest.SerializeToString,
cs3_dot_auth_dot_registry_dot_v1beta1_dot_registry__api__pb2.ListAuthProvidersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 44.516129
| 145
| 0.715217
| 785
| 6,900
| 6
| 0.187261
| 0.060722
| 0.041614
| 0.035881
| 0.804459
| 0.797452
| 0.783864
| 0.767728
| 0.746497
| 0.746497
| 0
| 0.015453
| 0.221594
| 6,900
| 154
| 146
| 44.805195
| 0.861478
| 0.34029
| 0
| 0.444444
| 1
| 0
| 0.088728
| 0.059851
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.027778
| 0.027778
| 0.180556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b9d78207d0ee6d322eb93e1e85d5af64e30fce1c
| 10,505
|
py
|
Python
|
tests/unit_tests/logic/dome/test_domeIndi.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 16
|
2020-01-11T22:32:26.000Z
|
2022-03-31T15:18:14.000Z
|
tests/unit_tests/logic/dome/test_domeIndi.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 196
|
2020-01-16T13:56:01.000Z
|
2022-03-29T02:06:51.000Z
|
tests/unit_tests/logic/dome/test_domeIndi.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 6
|
2019-12-01T19:39:33.000Z
|
2021-05-27T13:14:20.000Z
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
import pytest
import unittest.mock as mock
# external packages
from PyQt5.QtCore import QThreadPool, QObject, pyqtSignal
from indibase.indiBase import Device, Client
# local import
from logic.dome.domeIndi import DomeIndi
from base.driverDataClass import Signals
@pytest.fixture(autouse=True, scope='function')
def module_setup_teardown():
class Test(QObject):
threadPool = QThreadPool()
message = pyqtSignal(str, int)
update1s = pyqtSignal()
global app
app = DomeIndi(app=Test(), signals=Signals(), data={})
yield
def test_setUpdateConfig_1():
app.deviceName = ''
suc = app.setUpdateConfig('test')
assert not suc
def test_setUpdateConfig_2():
app.deviceName = 'test'
app.device = None
suc = app.setUpdateConfig('test')
assert not suc
def test_setUpdateConfig_3():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={'Test': 1}):
suc = app.setUpdateConfig('test')
assert not suc
def test_setUpdateConfig_4():
app.deviceName = 'test'
app.device = Device()
app.UPDATE_RATE = 1
with mock.patch.object(app.device,
'getNumber',
return_value={'PERIOD_MS': 1}):
suc = app.setUpdateConfig('test')
assert suc
def test_setUpdateConfig_5():
app.deviceName = 'test'
app.device = Device()
app.client = Client()
app.UPDATE_RATE = 0
with mock.patch.object(app.device,
'getNumber',
return_value={'PERIOD_MS': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=False):
suc = app.setUpdateConfig('test')
assert not suc
def test_setUpdateConfig_6():
app.deviceName = 'test'
app.device = Device()
app.client = Client()
app.UPDATE_RATE = 0
with mock.patch.object(app.device,
'getNumber',
return_value={'PERIOD_MS': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=True):
suc = app.setUpdateConfig('test')
assert suc
def test_updateStatus_1():
app.device = Device()
app.client = Client()
app.client.connected = False
suc = app.updateStatus()
assert not suc
def test_updateStatus_2():
app.device = Device()
app.client = Client()
app.client.connected = True
suc = app.updateStatus()
assert suc
def test_updateNumber_1():
app.device = None
suc = app.updateNumber('test', 'test')
assert not suc
def test_updateNumber_2():
app.device = Device()
app.deviceName = 'test'
setattr(app.device, 'ABS_DOME_POSITION', {'state': 'Busy'})
with mock.patch.object(app.device,
'getNumber',
return_value={'TEST': 1,
'DOME_ABSOLUTE_POSITION': 2}):
suc = app.updateNumber('test', 'ABS_DOME_POSITION')
assert suc
def test_updateNumber_3():
app.device = Device()
app.deviceName = 'test'
setattr(app.device, 'DOME_SHUTTER', {'state': 'Busy'})
with mock.patch.object(app.device,
'getNumber',
return_value={'TEST': 1,
'SHUTTER_OPEN': 2}):
suc = app.updateNumber('test', 'SHUTTER_OPEN')
assert suc
def test_updateNumber_4():
app.device = Device()
app.deviceName = 'test'
setattr(app.device, 'DOME_SHUTTER', {'state': 'test'})
with mock.patch.object(app.device,
'getNumber',
return_value={'TEST': 1,
'SHUTTER_OPEN': 2}):
suc = app.updateNumber('test', 'SHUTTER_OPEN')
assert suc
def test_slewToAltAz_1():
suc = app.slewToAltAz()
assert not suc
def test_slewToAltAz_2():
app.device = Device()
suc = app.slewToAltAz()
assert not suc
def test_slewToAltAz_3():
app.device = Device()
app.deviceName = 'test'
suc = app.slewToAltAz()
assert not suc
def test_slewToAltAz_4():
app.device = Device()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getNumber',
return_value={'DOME_ABSOLUTE_POSITION': 1}):
suc = app.slewToAltAz()
assert not suc
def test_slewToAltAz_5():
app.device = Device()
app.client = Client()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getNumber',
return_value={'DOME_ABSOLUTE_POSITION': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=False):
suc = app.slewToAltAz()
assert not suc
def test_slewToAltAz_6():
app.device = Device()
app.client = Client()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getNumber',
return_value={'DOME_ABSOLUTE_POSITION': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=True):
suc = app.slewToAltAz()
assert suc
def test_openShutter_1():
suc = app.openShutter()
assert not suc
def test_openShutter_2():
app.device = Device()
suc = app.openShutter()
assert not suc
def test_openShutter_3():
app.device = Device()
app.deviceName = 'test'
suc = app.openShutter()
assert not suc
def test_openShutter_4():
app.device = Device()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'SHUTTER_OPEN': 1}):
suc = app.openShutter()
assert not suc
def test_openShutter_5():
app.device = Device()
app.client = Client()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'SHUTTER_OPEN': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=False):
suc = app.openShutter()
assert not suc
def test_openShutter_6():
app.device = Device()
app.client = Client()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'SHUTTER_OPEN': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=True):
suc = app.openShutter()
assert suc
def test_closeShutter_1():
suc = app.closeShutter()
assert not suc
def test_closeShutter_2():
app.device = Device()
suc = app.closeShutter()
assert not suc
def test_closeShutter_3():
app.device = Device()
app.deviceName = 'test'
suc = app.closeShutter()
assert not suc
def test_closeShutter_4():
app.device = Device()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'SHUTTER_CLOSE': 1}):
suc = app.closeShutter()
assert not suc
def test_closeShutter_5():
app.device = Device()
app.client = Client()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'SHUTTER_CLOSE': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=False):
suc = app.closeShutter()
assert not suc
def test_closeShutter_6():
app.device = Device()
app.client = Client()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'SHUTTER_CLOSE': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=True):
suc = app.closeShutter()
assert suc
def test_abortSlew_1():
suc = app.abortSlew()
assert not suc
def test_abortSlew_2():
app.device = Device()
suc = app.abortSlew()
assert not suc
def test_abortSlew_3():
app.device = Device()
app.deviceName = 'test'
suc = app.abortSlew()
assert not suc
def test_abortSlew_4():
app.device = Device()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'ABORT': 1}):
suc = app.abortSlew()
assert not suc
def test_abortSlew_5():
app.device = Device()
app.client = Client()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'ABORT': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=False):
suc = app.abortSlew()
assert not suc
def test_abortSlew_6():
app.device = Device()
app.client = Client()
app.deviceName = 'test'
with mock.patch.object(app.device,
'getSwitch',
return_value={'ABORT': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=True):
suc = app.abortSlew()
assert suc
| 26.197007
| 71
| 0.52337
| 1,043
| 10,505
| 5.143816
| 0.106424
| 0.08891
| 0.065238
| 0.102703
| 0.830382
| 0.800373
| 0.778006
| 0.771109
| 0.756943
| 0.605965
| 0
| 0.011515
| 0.355164
| 10,505
| 400
| 72
| 26.2625
| 0.780484
| 0.0297
| 0
| 0.785467
| 0
| 0
| 0.08022
| 0.008791
| 0
| 0
| 0
| 0
| 0.124567
| 1
| 0.128028
| false
| 0
| 0.020761
| 0
| 0.16263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a47feca1ae31d3cb7dc4846ebdd5fc24b2a7ee1
| 7,934
|
py
|
Python
|
tests/test_turning_t_maze.py
|
GPittia/PyTorch-NEAT
|
7eb04af0e1e854a5e268358a6d4f4dccb576b635
|
[
"Apache-2.0"
] | 486
|
2018-09-21T17:44:36.000Z
|
2022-03-30T23:41:49.000Z
|
tests/test_turning_t_maze.py
|
GPittia/PyTorch-NEAT
|
7eb04af0e1e854a5e268358a6d4f4dccb576b635
|
[
"Apache-2.0"
] | 16
|
2018-09-21T20:38:54.000Z
|
2022-03-25T11:47:19.000Z
|
tests/test_turning_t_maze.py
|
ykeuter/PyTorch-NEAT
|
f78587d3a83df189b01be5a5daea3996b8fd9866
|
[
"Apache-2.0"
] | 103
|
2018-09-15T06:08:24.000Z
|
2022-03-24T17:02:11.000Z
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pytorch_neat.turning_t_maze import TurningTMazeEnv
def test_default_initialization():
env = TurningTMazeEnv()
assert env.hall_len == 3
assert env.n_trials == 100
assert env.maze.shape == (6, 9)
assert (
env.maze
== [
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
]
).all()
def test_step_without_reset():
env = TurningTMazeEnv()
with pytest.raises(AssertionError):
env.step(1)
def test_render():
env = TurningTMazeEnv()
with pytest.raises(NotImplementedError):
env.render()
def test_step_with_reset():
env = TurningTMazeEnv()
obs = env.reset()
assert obs.shape == (4,)
assert env.row_pos == env.col_pos == 4
assert (obs == [1, 0, 1, 0]).all()
obs, reward, done, _ = env.step(0)
assert (obs == [1, 1, 0, 0]).all()
assert reward == 0.0
assert not done
obs, reward, done, _ = env.step(1)
assert (obs == [1, 1, 0, 0]).all()
assert reward == -0.4
assert not done
def test_full_trial():
env = TurningTMazeEnv()
obs = env.reset()
for _ in range(3):
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
obs, reward, done, _ = env.step(1)
assert not done
assert reward == 0
assert (obs == [0, 1, 0, 0]).all()
assert env.direction == 0
assert reward == 0
obs, reward, done, _ = env.step(2)
assert env.direction == 1
assert (obs == [1, 0, 0, 0]).all()
assert reward == 0
assert not done
for _ in range(2):
obs, reward, done, _ = env.step(1)
assert env.direction == 1
assert (obs == [1, 0, 1, 0]).all()
assert reward == 0
assert not done
obs, reward, done, _ = env.step(1)
assert (obs == [1, 1, 1, 1]).all()
assert reward == 1
assert env.direction == 1
assert not done
obs, reward, done, _ = env.step(2)
assert reward == 0
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
assert env.row_pos == env.col_pos == 4
assert not done
def test_init_reward_side():
env = TurningTMazeEnv(init_reward_side=0)
obs = env.reset()
for _ in range(3):
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
obs, reward, done, _ = env.step(1)
assert not done
assert reward == 0
assert (obs == [0, 1, 0, 0]).all()
assert env.direction == 0
assert reward == 0
obs, reward, done, _ = env.step(0)
assert env.direction == 3
assert (obs == [0, 0, 1, 0]).all()
assert reward == 0
assert not done
for _ in range(2):
obs, reward, done, _ = env.step(1)
assert env.direction == 3
assert (obs == [1, 0, 1, 0]).all()
assert reward == 0
assert not done
obs, reward, done, _ = env.step(1)
assert (obs == [1, 1, 1, 1]).all()
assert reward == 1
assert env.direction == 3
assert not done
obs, reward, done, _ = env.step(2)
assert reward == 0
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
assert env.row_pos == env.col_pos == 4
assert not done
def test_low_reward():
env = TurningTMazeEnv()
obs = env.reset()
for _ in range(3):
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
obs, reward, done, _ = env.step(1)
assert not done
assert reward == 0
assert (obs == [0, 1, 0, 0]).all()
assert env.direction == 0
assert reward == 0
obs, reward, done, _ = env.step(0)
assert env.direction == 3
assert (obs == [0, 0, 1, 0]).all()
assert reward == 0
assert not done
for _ in range(2):
obs, reward, done, _ = env.step(1)
assert env.direction == 3
assert (obs == [1, 0, 1, 0]).all()
assert reward == 0
assert not done
obs, reward, done, _ = env.step(1)
assert (obs == [1, 1, 1, 0.2]).all()
assert reward == 0.2
assert env.direction == 3
assert not done
obs, reward, done, _ = env.step(2)
assert reward == 0
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
assert env.row_pos == env.col_pos == 4
assert not done
def test_deployment():
env = TurningTMazeEnv(n_trials=3)
for _ in range(5):
obs = env.reset()
for _ in range(3):
for _ in range(3):
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
obs, reward, done, _ = env.step(1)
assert not done
assert reward == 0
assert (obs == [0, 1, 0, 0]).all()
assert env.direction == 0
assert reward == 0
obs, reward, done, _ = env.step(2)
assert env.direction == 1
assert (obs == [1, 0, 0, 0]).all()
assert reward == 0
assert not done
for _ in range(2):
obs, reward, done, _ = env.step(1)
assert env.direction == 1
assert (obs == [1, 0, 1, 0]).all()
assert reward == 0
assert not done
obs, reward, done, _ = env.step(1)
assert (obs == [1, 1, 1, 1]).all()
assert reward == 1
assert env.direction == 1
assert not done
obs, reward, done, _ = env.step(2)
assert reward == 0
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
assert env.row_pos == env.col_pos == 4
assert done
def test_reward_flip():
env = TurningTMazeEnv(n_trials=10, reward_flip_mean=5, reward_flip_range=3)
for _ in range(5):
obs = env.reset()
for i in range(10):
for _ in range(3):
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
obs, reward, done, _ = env.step(1)
assert not done
assert reward == 0
assert (obs == [0, 1, 0, 0]).all()
assert env.direction == 0
assert reward == 0
obs, reward, done, _ = env.step(2)
assert env.direction == 1
assert (obs == [1, 0, 0, 0]).all()
assert reward == 0
assert not done
for _ in range(2):
obs, reward, done, _ = env.step(1)
assert env.direction == 1
assert (obs == [1, 0, 1, 0]).all()
assert reward == 0
assert not done
obs, reward, done, _ = env.step(1)
assert (obs[:-1] == [1, 1, 1]).all()
assert reward == obs[-1]
assert reward in {0.2, 1.0}
if i < 2:
assert reward == 1.0
elif i > 8:
assert reward == 0.2
assert env.direction == 1
assert not done
obs, reward, done, _ = env.step(2)
assert reward == 0
assert (obs == [1, 0, 1, 0]).all()
assert env.direction == 0
assert env.row_pos == env.col_pos == 4
assert done
| 31.113725
| 79
| 0.51336
| 1,103
| 7,934
| 3.6165
| 0.108794
| 0.027576
| 0.032339
| 0.033091
| 0.755578
| 0.729255
| 0.722487
| 0.705691
| 0.705691
| 0.684633
| 0
| 0.064441
| 0.350643
| 7,934
| 254
| 80
| 31.23622
| 0.709821
| 0.073607
| 0
| 0.826484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.625571
| 1
| 0.041096
| false
| 0
| 0.009132
| 0
| 0.050228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a49ee832d81f804133f4f79e63589c6f7bd84c9
| 53,080
|
py
|
Python
|
rom_generator/scenes/imported/Forest.py
|
ikarth/game-boy-rom-generator
|
29576a4bbe87a0032f80967d4b740059a65ea5c9
|
[
"MIT"
] | 3
|
2021-08-07T03:38:02.000Z
|
2021-09-17T14:33:27.000Z
|
rom_generator/scenes/imported/Forest.py
|
ikarth/game-boy-rom-generator
|
29576a4bbe87a0032f80967d4b740059a65ea5c9
|
[
"MIT"
] | null | null | null |
rom_generator/scenes/imported/Forest.py
|
ikarth/game-boy-rom-generator
|
29576a4bbe87a0032f80967d4b740059a65ea5c9
|
[
"MIT"
] | null | null | null |
# Generated Scene Functions
# Forest.py
from rom_generator import generator
from rom_generator import script_functions as script
import random
test_generation_destination_path = "../gbprojects/generated_export_test_Forest/"
def scene_generation():
sprite_sheet_data = [
generator.makeSpriteSheet('actor.png', name='actor', type='actor', frames=3),
generator.makeSpriteSheet('actor_animated.png', name='actor_animated', type='actor_animated', frames=6),
generator.makeSpriteSheet('hatch.png', name='hatch', type='static', frames=1),
generator.makeSpriteSheet('invisible.png', name='invisible', type='static', frames=1),
generator.makeSpriteSheet('shovel.png', name='shovel', type='static', frames=1),
generator.makeSpriteSheet('stairsdown.png', name='stairsdown', type='static', frames=1),
generator.makeSpriteSheet('static.png', name='static', type='static', frames=1)]
def findSpriteByName(sprite_name):
'''
Returns first sprite that matches the name given.
'''
try:
return [s for s in sprite_sheet_data if (s['name'] == sprite_name)][0]
except:
return None
def getBySceneLabel(scene_label):
'''
This is mostly here so we can get the matching scene from the original
template data. As used here it just grabs the first scene that was made
from that template, so if the template is used more than once it won't
behave as expected and you should generate a proper relationship instad.
'''
s_id = generator.getSceneIdByLabel(scene_label)
if s_id == None:
return '<♔' + scene_label + '♔>'
return s_id
def scene_gen_Forest1_00001(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 0, 12, 1, 2)
trigger_01 = generator.makeTrigger('trigger_01', 9, 7, 2, 1)
trigger_02 = generator.makeTrigger('trigger_02', 16, 17, 2, 1)
trigger_list = []
collision_data_list = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 240, 255, 3, 199, 255, 48, 156, 31, 3, 192, 23, 0, 252, 225, 1, 30, 30, 192, 224, 255, 12, 254, 135, 255, 127, 248, 255, 131, 255, 255, 252, 255, 207]
gen_scene_bkg = generator.makeBackground("Forest_01_2a.png")
gen_scene_scn = generator.makeScene("_gen_Forest1", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest1_00001")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 12), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 12), 'entrance_size': (1, 2) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (9, 8), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (9, 7), 'entrance_size': (2, 1) }, 'tags': ['D'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (16, 16), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (16, 17), 'entrance_size': (2, 1) }, 'tags': ['C'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest->Sewer"]}
return scene_data
def scene_gen_Forest2_00002(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 6, 17, 2, 1)
trigger_01 = generator.makeTrigger('trigger_01', 6, 0, 2, 1)
trigger_02 = generator.makeTrigger('trigger_02', 19, 6, 1, 4)
trigger_list = []
collision_data_list = [32, 1, 0, 34, 0, 48, 2, 0, 35, 0, 48, 3, 0, 227, 254, 112, 248, 1, 7, 31, 240, 3, 0, 31, 0, 240, 241, 15, 15, 255, 248, 248, 131, 135, 63, 120, 252, 131, 195, 63, 56, 254, 129, 227, 31]
gen_scene_bkg = generator.makeBackground("Forest_01_2b.png")
gen_scene_scn = generator.makeScene("_gen_Forest2", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest2_00002")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (6, 16), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (6, 17), 'entrance_size': (2, 1) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (7, 2), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (6, 0), 'entrance_size': (2, 1) }, 'tags': ['C'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (17, 8), 'exit_direction': 'left', 'entrance': gen_scene_scn['id'], 'entrance_location': (19, 6), 'entrance_size': (1, 4) }, 'tags': ['C'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest"]}
return scene_data
def scene_gen_Forest3_00003(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 0, 14, 1, 2)
trigger_01 = generator.makeTrigger('trigger_01', 19, 10, 1, 4)
trigger_02 = generator.makeTrigger('trigger_02', 9, 6, 2, 2)
trigger_list = []
collision_data_list = [225, 0, 0, 254, 7, 112, 224, 0, 3, 12, 48, 207, 129, 241, 24, 16, 137, 3, 145, 48, 48, 0, 15, 2, 224, 32, 126, 224, 227, 3, 2, 255, 48, 240, 7, 128, 255, 15, 248, 255, 255, 255, 255, 255, 255]
gen_scene_bkg = generator.makeBackground("Forest_01_2c.png")
gen_scene_scn = generator.makeScene("_gen_Forest3", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest3_00003")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 14), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 14), 'entrance_size': (1, 2) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (17, 11), 'exit_direction': 'left', 'entrance': gen_scene_scn['id'], 'entrance_location': (19, 10), 'entrance_size': (1, 4) }, 'tags': ['C'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (9, 8), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (9, 6), 'entrance_size': (2, 2) }, 'tags': ['D'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest->Sewer"]}
return scene_data
def scene_gen_Forest4_00004(callback):
actor_name_table = {}
actor_00 = generator.makeActor(None, 14, 9, 'static', moveSpeed=1, animSpeed=3, direction='down', script=[], sprite_id=findSpriteByName('hatch')['id'], name='actor_8d4b2968-7f8c-430e-a48d-a255880b0607')
actor_name_table.update({'actor_8d4b2968-7f8c-430e-a48d-a255880b0607': actor_00})
actor_00['script'] = [
script.text(text=['You move the\nhatch.'], avatarId=''),
script.actorSetPosition(actorId='♔REFERENCE_TO_ACTORS_<$self$>♔', x=16, y=9),
script.end()
]
actor_list = [actor_00]
trigger_00 = generator.makeTrigger('trigger_00', 0, 8, 1, 2)
trigger_01 = generator.makeTrigger('trigger_01', 8, 0, 4, 1)
trigger_02 = generator.makeTrigger('trigger_02', 14, 8, 2, 2)
trigger_list = []
collision_data_list = [255, 48, 16, 4, 2, 193, 97, 48, 12, 4, 195, 195, 240, 31, 8, 231, 231, 112, 54, 28, 64, 195, 1, 40, 48, 159, 6, 146, 112, 32, 56, 15, 131, 97, 16, 114, 224, 49, 3, 2, 193, 63, 0, 0, 0]
gen_scene_bkg = generator.makeBackground("Forest_01_2d.png")
gen_scene_scn = generator.makeScene("_gen_Forest4", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest4_00004")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 9), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 8), 'entrance_size': (1, 2) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (10, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 0), 'entrance_size': (4, 1) }, 'tags': ['C'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (14, 10), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (14, 8), 'entrance_size': (2, 2) }, 'tags': ['D'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest->Sewer"]}
return scene_data
def scene_gen_Forest5_00005(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 8, 0, 4, 1)
trigger_01 = generator.makeTrigger('trigger_01', 0, 12, 1, 2)
trigger_02 = generator.makeTrigger('trigger_02', 10, 17, 2, 1)
trigger_list = []
collision_data_list = [128, 112, 0, 8, 7, 128, 249, 0, 158, 31, 32, 192, 1, 1, 28, 16, 143, 129, 240, 24, 12, 143, 113, 240, 24, 1, 143, 17, 0, 24, 96, 192, 1, 6, 12, 255, 225, 0, 12, 6, 128, 115, 0, 56, 7]
gen_scene_bkg = generator.makeBackground("Forest_01_2e.png")
gen_scene_scn = generator.makeScene("_gen_Forest5", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest5_00005")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (9, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 0), 'entrance_size': (4, 1) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (1, 12), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 12), 'entrance_size': (1, 2) }, 'tags': ['C'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (10, 16), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (10, 17), 'entrance_size': (2, 1) }, 'tags': ['C'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest"]}
return scene_data
def scene_gen_Forest6_00006(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 8, 17, 2, 1)
trigger_01 = generator.makeTrigger('trigger_01', 9, 6, 2, 1)
trigger_list = []
collision_data_list = [1, 0, 0, 0, 0, 128, 15, 0, 248, 0, 192, 27, 0, 254, 1, 224, 121, 0, 158, 7, 240, 80, 0, 15, 7, 240, 57, 128, 15, 3, 248, 60, 128, 135, 3, 120, 120, 128, 3, 7, 248, 124, 128, 207, 7]
gen_scene_bkg = generator.makeBackground("Forest_01_2f.png")
gen_scene_scn = generator.makeScene("_gen_Forest6", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest6_00006")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (8, 16), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 17), 'entrance_size': (2, 1) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (9, 7), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (9, 6), 'entrance_size': (2, 1) }, 'tags': ['D'] }
gen_scene_connections = [connection_00, connection_01]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest->Sewer"]}
return scene_data
def scene_gen_Forest7_00007(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 8, 17, 4, 1)
trigger_list = []
collision_data_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 224, 255, 1, 254, 31, 224, 225, 1, 12, 60, 192, 204, 3, 204, 124, 192, 225, 7, 28, 126, 192, 243, 7, 28, 126, 192, 225, 7, 14, 124, 224, 240, 15, 14, 255]
gen_scene_bkg = generator.makeBackground("Forest_01_2g.png")
gen_scene_scn = generator.makeScene("_gen_Forest7", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest7_00007")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (10, 16), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 17), 'entrance_size': (4, 1) }, 'tags': ['C'] }
gen_scene_connections = [connection_00]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest"]}
return scene_data
def scene_gen_Forest8_00008(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 10, 17, 3, 1)
trigger_01 = generator.makeTrigger('trigger_01', 4, 0, 2, 1)
trigger_02 = generator.makeTrigger('trigger_02', 12, 0, 2, 1)
trigger_list = []
collision_data_list = [205, 207, 207, 120, 56, 144, 15, 6, 241, 96, 24, 15, 7, 241, 48, 144, 159, 3, 241, 24, 48, 207, 1, 98, 12, 96, 230, 0, 4, 6, 192, 112, 0, 8, 3, 128, 49, 0, 16, 1, 0, 51, 0, 32, 2]
gen_scene_bkg = generator.makeBackground("Forest_01_2h.png")
gen_scene_scn = generator.makeScene("_gen_Forest8", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest8_00008")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (10, 16), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (10, 17), 'entrance_size': (3, 1) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (5, 2), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (4, 0), 'entrance_size': (2, 1) }, 'tags': ['C'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (13, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (12, 0), 'entrance_size': (2, 1) }, 'tags': ['C'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest"]}
return scene_data
def scene_gen_Forest9_00009(callback):
actor_name_table = {}
actor_list = []
trigger_list = []
collision_data_list = []
gen_scene_bkg = generator.makeBackground("Forest_01_2i.png")
gen_scene_scn = generator.makeScene("_gen_Forest9", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest9_00009")
gen_scene_connections = []
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest"]}
return scene_data
def scene_gen_Forest10_00010(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 2, 0, 2, 1)
trigger_01 = generator.makeTrigger('trigger_01', 9, 6, 2, 2)
trigger_list = []
collision_data_list = [243, 255, 63, 255, 255, 225, 255, 31, 254, 255, 227, 255, 63, 254, 255, 231, 249, 127, 156, 255, 15, 240, 255, 0, 254, 31, 224, 255, 1, 252, 63, 252, 255, 195, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255]
gen_scene_bkg = generator.makeBackground("Forest_01_2k.png")
gen_scene_scn = generator.makeScene("_gen_Forest10", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest10_00010")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (2, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (2, 0), 'entrance_size': (2, 1) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (9, 8), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (9, 6), 'entrance_size': (2, 2) }, 'tags': ['D'] }
gen_scene_connections = [connection_00, connection_01]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest->Sewer"]}
return scene_data
def scene_gen_Forest11_00011(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 0, 6, 1, 4)
trigger_01 = generator.makeTrigger('trigger_01', 19, 6, 1, 2)
trigger_list = []
collision_data_list = [255, 255, 255, 255, 255, 159, 255, 255, 240, 255, 3, 255, 63, 224, 255, 96, 230, 0, 6, 14, 252, 64, 204, 7, 196, 127, 96, 254, 3, 224, 63, 0, 255, 3, 240, 127, 248, 255, 3, 255, 255, 255, 255, 255, 255]
gen_scene_bkg = generator.makeBackground("Forest_01_2l.png")
gen_scene_scn = generator.makeScene("_gen_Forest11", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest11_00011")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 7), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 6), 'entrance_size': (1, 4) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (17, 7), 'exit_direction': 'left', 'entrance': gen_scene_scn['id'], 'entrance_location': (19, 6), 'entrance_size': (1, 2) }, 'tags': ['C'] }
gen_scene_connections = [connection_00, connection_01]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest"]}
return scene_data
def scene_gen_Forest12_00012_shovel(callback):
actor_name_table = {}
actor_00 = generator.makeActor(None, 12, 9, 'static', moveSpeed=1, animSpeed=3, direction='down', script=[], sprite_id=findSpriteByName('shovel')['id'], name='actor_1699a77d-10d6-4e2f-941d-04cf112fba61')
actor_name_table.update({'actor_1699a77d-10d6-4e2f-941d-04cf112fba61': actor_00})
actor_00['startScript'] = [
script.ifFlagsCompare(variable='27', flag='0', children = {
'true': [script.actorHide(actorId='♔REFERENCE_TO_ACTORS_<$self$>♔'), script.end()],
'false': [script.end()]
}),
script.end()
]
actor_00['script'] = [
script.actorHide(actorId='♔REFERENCE_TO_ACTORS_<$self$>♔'),
script.text(text=['You picked up\nthe shovel'], avatarId='96894897-c21a-49b4-8d1e-214ba5735525'),
script.addFlags(variable='27', flag1=True, flag2=False, flag3=False, flag4=False, flag5=False, flag6=False, flag7=False, flag8=False),
script.end()
]
actor_list = [actor_00]
trigger_00 = generator.makeTrigger('trigger_00', 0, 7, 1, 3)
trigger_01 = generator.makeTrigger('trigger_01', 8, 0, 4, 1)
trigger_02 = generator.makeTrigger('trigger_02', 19, 12, 1, 2)
trigger_list = []
collision_data_list = [192, 240, 15, 4, 129, 96, 24, 8, 130, 128, 35, 207, 248, 241, 143, 129, 135, 8, 56, 144, 248, 3, 201, 32, 144, 7, 134, 9, 64, 240, 0, 12, 0, 128, 0, 0, 248, 15, 0, 255, 0, 0, 0, 0, 0]
gen_scene_bkg = generator.makeBackground("Forest_01_2m.png")
gen_scene_scn = generator.makeScene("_gen_Forest12_shovel", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest12_00012")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 7), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 7), 'entrance_size': (1, 3) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (9, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 0), 'entrance_size': (4, 1) }, 'tags': ['C'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (17, 13), 'exit_direction': 'left', 'entrance': gen_scene_scn['id'], 'entrance_location': (19, 12), 'entrance_size': (1, 2) }, 'tags': ['C'] }
gen_scene_connections = [connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest"]}
return scene_data
def scene_gen_Forest12_00012(callback):
actor_name_table = {}
actor_00 = generator.makeActor(None, 12, 9, 'static', moveSpeed=1, animSpeed=3, direction='down', script=[], sprite_id=findSpriteByName('shovel')['id'], name='actor_1699a77d-10d6-4e2f-941d-04cf112fba61')
actor_name_table.update({'actor_1699a77d-10d6-4e2f-941d-04cf112fba61': actor_00})
actor_00['startScript'] = [
script.ifFlagsCompare(variable='27', flag='0', children = {
'true': [script.actorHide(actorId='♔REFERENCE_TO_ACTORS_<$self$>♔'), script.end()],
'false': [script.end()]
}),
script.end()
]
actor_00['script'] = [
script.actorHide(actorId='♔REFERENCE_TO_ACTORS_<$self$>♔'),
script.text(text=['You picked up\nthe shovel'], avatarId='96894897-c21a-49b4-8d1e-214ba5735525'),
script.addFlags(variable='27', flag1=True, flag2=False, flag3=False, flag4=False, flag5=False, flag6=False, flag7=False, flag8=False),
script.end()
]
actor_list = [actor_00]
trigger_00 = generator.makeTrigger('trigger_00', 0, 7, 1, 3)
trigger_01 = generator.makeTrigger('trigger_01', 8, 0, 4, 1)
trigger_02 = generator.makeTrigger('trigger_02', 19, 12, 1, 2)
trigger_list = []
collision_data_list = [255, 240, 255, 7, 255, 63, 248, 255, 131, 255, 31, 254, 255, 225, 255, 129, 135, 15, 56, 240, 248, 3, 143, 63, 240, 255, 135, 255, 127, 240, 255, 15, 240, 255, 0, 192, 255, 15, 240, 255, 0, 0, 0, 0, 0]
gen_scene_bkg = generator.makeBackground("Forest_01_2m.png")
gen_scene_scn = generator.makeScene("_gen_Forest12", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest12_00012")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 7), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 7), 'entrance_size': (1, 3) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (9, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 0), 'entrance_size': (4, 1) }, 'tags': ['C'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (17, 13), 'exit_direction': 'left', 'entrance': gen_scene_scn['id'], 'entrance_location': (19, 12), 'entrance_size': (1, 2) }, 'tags': ['C'] }
gen_scene_connections = [connection_00, connection_01]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest"]}
return scene_data
def scene_gen_Forest13_00013(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 0, 4, 1, 2)
trigger_01 = generator.makeTrigger('trigger_01', 10, 8, 2, 2)
trigger_list = []
collision_data_list = [255, 255, 255, 255, 0, 255, 31, 240, 255, 7, 240, 127, 3, 255, 63, 225, 255, 19, 254, 255, 243, 243, 31, 62, 255, 231, 1, 56, 28, 128, 15, 255, 124, 224, 135, 127, 0, 254, 7, 192, 255, 63, 255, 255, 227]
gen_scene_bkg = generator.makeBackground("Forest_01_2n.png")
gen_scene_scn = generator.makeScene("_gen_Forest13", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest13_00013")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 5), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 4), 'entrance_size': (1, 2) }, 'tags': ['C'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (10, 10), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (10, 8), 'entrance_size': (2, 2) }, 'tags': ['D'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (14, 16), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (14, 17), 'entrance_size': (2, 1) }, 'tags': ['C'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest->Sewer"]}
return scene_data
def scene_gen_Forest14_00014(callback):
actor_name_table = {}
actor_00 = generator.makeActor(None, 8, 8, 'static', moveSpeed=1, animSpeed=3, direction='down', script=[], sprite_id=findSpriteByName('invisible')['id'], name='actor_f2e5a00f-cbf1-4d54-ae48-11b0ac1caf1d')
actor_name_table.update({'actor_f2e5a00f-cbf1-4d54-ae48-11b0ac1caf1d': actor_00})
actor_00['startScript'] = [
script.ifFlagsCompare(variable='27', flag='0', children = {
'true': [script.actorHide(actorId='♔REFERENCE_TO_ACTORS_<$self$>♔'), script.end()],
'false': [script.end()]
}),
script.end()
]
actor_00['script'] = [
script.text(text=["There's something\nburied at the\nbottom of the well"], avatarId=''),
script.end()
]
actor_list = [actor_00]
trigger_00 = generator.makeTrigger('trigger_00', 8, 8, 2, 1)
trigger_01 = generator.makeTrigger('trigger_01', 12, 0, 2, 1)
trigger_02 = generator.makeTrigger('trigger_02', 0, 12, 1, 2)
trigger_list = []
collision_data_list = [1, 200, 15, 128, 248, 0, 152, 15, 128, 241, 224, 63, 15, 254, 227, 112, 120, 14, 3, 199, 48, 240, 12, 51, 199, 62, 112, 254, 3, 227, 252, 51, 135, 31, 113, 1, 128, 19, 0, 48, 255, 255, 1, 0, 0]
gen_scene_bkg = generator.makeBackground("Forest_01_2o.png")
gen_scene_scn = generator.makeScene("_gen_Forest14", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_Forest14_00014")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (8, 7), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 8), 'entrance_size': (2, 1) }, 'tags': ['D'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (13, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (12, 0), 'entrance_size': (2, 1) }, 'tags': ['C'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (1, 13), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 12), 'entrance_size': (1, 2) }, 'tags': ['C'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Forest->Sewer"]}
return scene_data
def catalog(sample=True):
"""
Returns a list of scene functions from this part of the library.
"""
cat = [scene_gen_Forest1_00001,
scene_gen_Forest2_00002,
scene_gen_Forest3_00003,
scene_gen_Forest4_00004,
scene_gen_Forest5_00005,
scene_gen_Forest6_00006,
scene_gen_Forest7_00007,
scene_gen_Forest8_00008,
scene_gen_Forest10_00010,
scene_gen_Forest11_00011,
scene_gen_Forest12_00012,
scene_gen_Forest14_00014]
cat_well = [scene_gen_Forest12_00012_shovel,
scene_gen_Forest13_00013]
if sample != True:
return cat + cat_well
if random.random() > 0.35:
return []
return random.sample(cat,6) + random.choice([cat_well, [], [], []])
return catalog, sprite_sheet_data
def createExampleProject():
"""
Demonstration of how the scene generators in this file can be used.
"""
project = generator.makeBasicProject()
# Create sprite sheet for the player sprite
player_sprite_sheet = generator.addSpriteSheet(project, "actor_animated.png", "actor_animated", "actor_animated")
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
scene_data_list = []
catalog, sprites = scene_generation()
for scn_func in catalog():
scene_data_list.append(scn_func(None))
for element_sprite in sprites:
project.spriteSheets.append(element_sprite)
generator.connectScenesRandomlySymmetric(scene_data_list)
for sdata in scene_data_list:
generator.addSceneData(project, generator.translateReferences(sdata, scene_data_list))
# Add some music
project.music.append(generator.makeMusic("template", "template.mod"))
# Set the starting scene
project.settings["startSceneId"] = project.scenes[0]["id"]
project.settings["startX"] = 7
project.settings["startY"] = 21
return project
def runTest(test_dir):
generator.initializeGenerator()
project = createExampleProject()
generator.writeProjectToDisk(project, output_path = test_dir)
# test creating scenes...
if __name__ == '__main__':
destination = test_generation_destination_path
runTest(destination)
| 71.826793
| 252
| 0.654616
| 6,378
| 53,080
| 5.151929
| 0.057855
| 0.034572
| 0.040537
| 0.027025
| 0.867951
| 0.863021
| 0.853678
| 0.821236
| 0.805685
| 0.792781
| 0
| 0.082083
| 0.207479
| 53,080
| 738
| 253
| 71.924119
| 0.698695
| 0.011492
| 0
| 0.599002
| 1
| 0
| 0.156302
| 0.019024
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096506
| false
| 0
| 0.004992
| 0
| 0.202995
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbf02f0f67669a30b6ccc86941848d2fa1ccb8c5
| 38,751
|
py
|
Python
|
simpleml/tests/unit/test_hashing.py
|
aolopez/SimpleML
|
9e3237c243863400372a493164a107b74f770ef0
|
[
"BSD-3-Clause"
] | 15
|
2018-08-19T19:36:23.000Z
|
2021-11-09T17:47:18.000Z
|
simpleml/tests/unit/test_hashing.py
|
aolopez/SimpleML
|
9e3237c243863400372a493164a107b74f770ef0
|
[
"BSD-3-Clause"
] | 75
|
2020-10-11T17:58:59.000Z
|
2022-03-29T22:34:54.000Z
|
simpleml/tests/unit/test_hashing.py
|
aolopez/SimpleML
|
9e3237c243863400372a493164a107b74f770ef0
|
[
"BSD-3-Clause"
] | 4
|
2018-04-30T23:09:42.000Z
|
2022-01-19T08:03:18.000Z
|
'''
Hashing related tests
'''
__author__ = 'Elisha Yadgaran'
import unittest
import pandas as pd
from simpleml.persistables.hashing import CustomHasherMixin
from simpleml._external.joblib import hash as deterministic_hash
class _Test123(object):
random_attribute = 'abc'
def __init__(self):
pass
def fancy_method(self):
pass
def __repr__(self):
return 'pretty repr of test class'
class CustomHasherTests(unittest.TestCase):
'''
Hashing tests for consistency across environment and machines.
Expectations generated on Mac running python 3.7
Tests trace recursive behavior via log assertions
'''
def test_initialized_class_hashing(self):
'''
Hashes the initialized object as (name, __dict__)
'''
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
hash_object = _Test123()
self.maxDiff = None
# results are sensitive to entrypoint (relative path names)
if __name__ == 'simpleml.tests.unit.test_hashing':
# entry from loader
# input/output
expected_final_hash = 'adfdad10e2f1e6e2f423824c7b6df461'
expected_logs = [
"DEBUG:simpleml.persistables.hashing:Hashing input: pretty repr of test class",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'simpleml.tests.unit.test_hashing._Test123'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: (<class 'simpleml.tests.unit.test_hashing._Test123'>, {})",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: <class 'simpleml.tests.unit.test_hashing._Test123'>",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'type'>",
"WARNING:simpleml.persistables.hashing:Hashing class import path for <class 'simpleml.tests.unit.test_hashing._Test123'>, if a fully qualified import path is not used, calling again from a different location will yield different results!",
"DEBUG:simpleml.persistables.hashing:Hashing input: simpleml.tests.unit.test_hashing._Test123",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
'DEBUG:simpleml.persistables.hashing:Hashing output: eddefe8dd7b1dd0d06078e9198eae04c',
'DEBUG:simpleml.persistables.hashing:Hashing output: eddefe8dd7b1dd0d06078e9198eae04c',
'DEBUG:simpleml.persistables.hashing:Hashing input: {}',
"DEBUG:simpleml.persistables.hashing:hash type: <class 'dict'>",
'DEBUG:simpleml.persistables.hashing:Hashing output: 7aa3631cc45701e2df0e03ef7162f2cb',
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"
]
elif __name__ == '__main__':
# entry from this file
# input/output
expected_final_hash = 'ad105926db464bf085b64b3b7a908fa7'
expected_logs = [
"DEBUG:simpleml.persistables.hashing:Hashing input: pretty repr of test class",
"DEBUG:simpleml.persistables.hashing:hash type: <class '__main__._Test123'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: (<class '__main__._Test123'>, {})",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: <class '__main__._Test123'>",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'type'>",
"WARNING:simpleml.persistables.hashing:Hashing class import path for <class '__main__._Test123'>, if a fully qualified import path is not used, calling again from a different location will yield different results!",
"DEBUG:simpleml.persistables.hashing:Hashing input: __main__._Test123",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
'DEBUG:simpleml.persistables.hashing:Hashing output: e7196e9a7496ebb28620e2a88854398f',
'DEBUG:simpleml.persistables.hashing:Hashing output: e7196e9a7496ebb28620e2a88854398f',
'DEBUG:simpleml.persistables.hashing:Hashing input: {}',
"DEBUG:simpleml.persistables.hashing:hash type: <class 'dict'>",
'DEBUG:simpleml.persistables.hashing:Hashing output: 7aa3631cc45701e2df0e03ef7162f2cb',
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"
]
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(hash_object), expected_final_hash)
self.assertEqual(logs.output, expected_logs)
def test_uninitialized_class_hashing(self):
'''
Hashes the repr(cls) for initialized objects
'''
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
hash_object = _Test123
self.maxDiff = None
# results are sensitive to entrypoint (relative path names)
if __name__ == 'simpleml.tests.unit.test_hashing':
# entry from loader
# input/output
expected_final_hash = 'eddefe8dd7b1dd0d06078e9198eae04c'
expected_logs = [
"DEBUG:simpleml.persistables.hashing:Hashing input: <class 'simpleml.tests.unit.test_hashing._Test123'>",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'type'>",
"WARNING:simpleml.persistables.hashing:Hashing class import path for <class 'simpleml.tests.unit.test_hashing._Test123'>, if a fully qualified import path is not used, calling again from a different location will yield different results!",
"DEBUG:simpleml.persistables.hashing:Hashing input: simpleml.tests.unit.test_hashing._Test123",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"
]
elif __name__ == '__main__':
# entry from this file
# input/output
expected_final_hash = 'e7196e9a7496ebb28620e2a88854398f'
expected_logs = [
"DEBUG:simpleml.persistables.hashing:Hashing input: <class '__main__._Test123'>",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'type'>",
"WARNING:simpleml.persistables.hashing:Hashing class import path for <class '__main__._Test123'>, if a fully qualified import path is not used, calling again from a different location will yield different results!",
"DEBUG:simpleml.persistables.hashing:Hashing input: __main__._Test123",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"
]
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(hash_object), expected_final_hash)
self.assertEqual(logs.output, expected_logs)
def test_uninitialized_class_dict_hashing(self):
'''
Hashes just class attributes (input via cls.__dict__)
Recursively includes all public methods and class attributes
'''
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
expected_final_hash = 'f327094b997618017ae36b8251885a8f'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(_Test123.__dict__), expected_final_hash)
# internal behavior
# hash class dict -> hash dict
self.maxDiff = None
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {_Test123.__dict__}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'dict'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: ('random_attribute', 'abc')",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
'DEBUG:simpleml.persistables.hashing:Hashing input: random_attribute',
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
'DEBUG:simpleml.persistables.hashing:Hashing output: 2ca4e7f734729525d18e56f1fa5862b7',
'DEBUG:simpleml.persistables.hashing:Hashing input: abc',
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
'DEBUG:simpleml.persistables.hashing:Hashing output: a5a2f6c8adba6852e4d3888ce0c26016',
'DEBUG:simpleml.persistables.hashing:Hashing output: a4391ea84fdef203422c770de28a05f7',
f"DEBUG:simpleml.persistables.hashing:Hashing input: ('fancy_method', {_Test123.fancy_method})",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
'DEBUG:simpleml.persistables.hashing:Hashing input: fancy_method',
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
'DEBUG:simpleml.persistables.hashing:Hashing output: 4518d84f1fde3a4f6d9830df8ca4721c',
f'DEBUG:simpleml.persistables.hashing:Hashing input: {_Test123.fancy_method}',
"DEBUG:simpleml.persistables.hashing:hash type: <class 'function'>",
'DEBUG:simpleml.persistables.hashing:Hashing input: def fancy_method(self):\n pass\n',
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
'DEBUG:simpleml.persistables.hashing:Hashing output: c60ec24e327caf1cdb2f409ae9a1fd6f',
'DEBUG:simpleml.persistables.hashing:Hashing output: c60ec24e327caf1cdb2f409ae9a1fd6f',
'DEBUG:simpleml.persistables.hashing:Hashing output: 1751bf1c56fc8c1027ec11f83ba264dd',
f'DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}'])
def test_pandas_series_hashing(self):
# series
for d, expected_final_hash in zip(
[range(20), ['a'], [1]],
[7008921389990319782, -4496393130729816112, 6238072747940578789]
):
with self.subTest(d=d, expected_final_hash=expected_final_hash):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = pd.Series(d)
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
# hash series
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.series.Series'>",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_pandas_frame_hashing(self):
# frame
for d, expected_final_hash in zip(
[[range(10), range(10)], ['a'], [1]],
[6716675364149054294, 5694802365760992243, -7087755961261762286]
):
with self.subTest(d=d, expected_final_hash=expected_final_hash):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = pd.DataFrame(d)
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
# hash dataframe
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.frame.DataFrame'>",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_none_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = None
expected_final_hash = -12345678987654321
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
# hash None
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'NoneType'>",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_complex_list_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = [
'a',
2,
['b', 3],
{'d': 4},
lambda: 0,
pd.Series(['a']),
pd.DataFrame([1])
]
expected_final_hash = '68e95c072ffb1a8271e7e472f9fee504'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
# hash list -> hash items in list
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'list'>",
# primitives
"DEBUG:simpleml.persistables.hashing:Hashing input: a",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 0357109b163771392cc674173d921e4b",
"DEBUG:simpleml.persistables.hashing:Hashing input: 2",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 76f34d73a1a6753d1243c9ba0afe3457",
# simple containers
"DEBUG:simpleml.persistables.hashing:Hashing input: ['b', 3]",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'list'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: b",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 10b474053f957b5c70dd5f01c695b8a0",
"DEBUG:simpleml.persistables.hashing:Hashing input: 3",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 56615ea01687173ebab08c915ad7e500",
"DEBUG:simpleml.persistables.hashing:Hashing output: 38b1de0299d81decb1341f9f2bfb4c8b",
"DEBUG:simpleml.persistables.hashing:Hashing input: {'d': 4}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'dict'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: ('d', 4)",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: d",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 5adbbd6cebbee97eda238235075de7ea",
"DEBUG:simpleml.persistables.hashing:Hashing input: 4",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: a8216e26a2093b48a0b7c57159313c8e",
"DEBUG:simpleml.persistables.hashing:Hashing output: 0bd9aca51ddaab2f96485637ec4c21ed",
"DEBUG:simpleml.persistables.hashing:Hashing output: 21065bb299df9d8a902754661f1dcf08",
# functions
f"DEBUG:simpleml.persistables.hashing:Hashing input: {data[4]}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'function'>",
# source inspection pulls the line the function is defined on with all whitespace
# depending on source, this could be more variables than just the function
"DEBUG:simpleml.persistables.hashing:Hashing input: lambda: 0,\n",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 1f55d5d00641bc583fef1c244a94116d",
"DEBUG:simpleml.persistables.hashing:Hashing output: 1f55d5d00641bc583fef1c244a94116d",
# data
f"DEBUG:simpleml.persistables.hashing:Hashing input: {data[5]}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.series.Series'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: -4496393130729816112",
f"DEBUG:simpleml.persistables.hashing:Hashing input: {data[6]}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.frame.DataFrame'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: -7087755961261762286",
# Final
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_primitive_list_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = ['a', 2, ['b', 3], {'d': 4}]
expected_final_hash = 'c3ee3ea76093a4ffa266010db2a19748'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
# hash list -> hash items in list
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'list'>",
# primitives
"DEBUG:simpleml.persistables.hashing:Hashing input: a",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 0357109b163771392cc674173d921e4b",
"DEBUG:simpleml.persistables.hashing:Hashing input: 2",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 76f34d73a1a6753d1243c9ba0afe3457",
# simple containers
"DEBUG:simpleml.persistables.hashing:Hashing input: ['b', 3]",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'list'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: b",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 10b474053f957b5c70dd5f01c695b8a0",
"DEBUG:simpleml.persistables.hashing:Hashing input: 3",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 56615ea01687173ebab08c915ad7e500",
"DEBUG:simpleml.persistables.hashing:Hashing output: 38b1de0299d81decb1341f9f2bfb4c8b",
"DEBUG:simpleml.persistables.hashing:Hashing input: {'d': 4}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'dict'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: ('d', 4)",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: d",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 5adbbd6cebbee97eda238235075de7ea",
"DEBUG:simpleml.persistables.hashing:Hashing input: 4",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: a8216e26a2093b48a0b7c57159313c8e",
"DEBUG:simpleml.persistables.hashing:Hashing output: 0bd9aca51ddaab2f96485637ec4c21ed",
"DEBUG:simpleml.persistables.hashing:Hashing output: 21065bb299df9d8a902754661f1dcf08",
# Final
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_pandas_list_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = [pd.Series(['a']), pd.DataFrame([1])]
expected_final_hash = '9357fb780e7774f3426bc93d5eccdcc0'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
# hash list -> hash items in list
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'list'>",
# data
f"DEBUG:simpleml.persistables.hashing:Hashing input: {data[0]}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.series.Series'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: -4496393130729816112",
f"DEBUG:simpleml.persistables.hashing:Hashing input: {data[1]}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.frame.DataFrame'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: -7087755961261762286",
# Final
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_complex_dict_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = {
'a': 2,
'b': ['b', 3],
'c': {'d': 4},
'd': lambda: 0,
'e': pd.Series(['a']),
'f': pd.DataFrame([1])
}
expected_final_hash = '1cc5ab5d0c77f755358fe7f4d77ea04a'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
# hash dict -> hash items in dict
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'dict'>",
# primitives
"DEBUG:simpleml.persistables.hashing:Hashing input: ('a', 2)",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: a",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 0357109b163771392cc674173d921e4b",
"DEBUG:simpleml.persistables.hashing:Hashing input: 2",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 76f34d73a1a6753d1243c9ba0afe3457",
"DEBUG:simpleml.persistables.hashing:Hashing output: 4168a931adf69a5c1cfd58cc89a5934b",
# simple containers
"DEBUG:simpleml.persistables.hashing:Hashing input: ('b', ['b', 3])",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: b",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 10b474053f957b5c70dd5f01c695b8a0",
"DEBUG:simpleml.persistables.hashing:Hashing input: ['b', 3]",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'list'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: b",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 10b474053f957b5c70dd5f01c695b8a0",
"DEBUG:simpleml.persistables.hashing:Hashing input: 3",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 56615ea01687173ebab08c915ad7e500",
"DEBUG:simpleml.persistables.hashing:Hashing output: 38b1de0299d81decb1341f9f2bfb4c8b",
"DEBUG:simpleml.persistables.hashing:Hashing output: ddfeb8c7d0f3b5e186ea6d5f75dc3a42",
"DEBUG:simpleml.persistables.hashing:Hashing input: ('c', {'d': 4})",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: c",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: eb5af44d447eeee22659894e100629ba",
"DEBUG:simpleml.persistables.hashing:Hashing input: {'d': 4}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'dict'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: ('d', 4)",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: d",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 5adbbd6cebbee97eda238235075de7ea",
"DEBUG:simpleml.persistables.hashing:Hashing input: 4",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: a8216e26a2093b48a0b7c57159313c8e",
"DEBUG:simpleml.persistables.hashing:Hashing output: 0bd9aca51ddaab2f96485637ec4c21ed",
"DEBUG:simpleml.persistables.hashing:Hashing output: 21065bb299df9d8a902754661f1dcf08",
"DEBUG:simpleml.persistables.hashing:Hashing output: 23b65131a3c1e7692718ce5e16dbc6e1",
# functions
f"DEBUG:simpleml.persistables.hashing:Hashing input: ('d', {data['d']})",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: d",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 5adbbd6cebbee97eda238235075de7ea",
f"DEBUG:simpleml.persistables.hashing:Hashing input: {data['d']}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'function'>",
# source inspection pulls the line the function is defined on with all whitespace
# depending on source, this could be more variables than just the function
"DEBUG:simpleml.persistables.hashing:Hashing input: 'd': lambda: 0,\n",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: bface6eb385c3eda922dae2ea0b1392d",
"DEBUG:simpleml.persistables.hashing:Hashing output: bface6eb385c3eda922dae2ea0b1392d",
"DEBUG:simpleml.persistables.hashing:Hashing output: db969ff10c6c237542b1244b2a54d4c3",
# data
f"DEBUG:simpleml.persistables.hashing:Hashing input: ('e', {data['e']})",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: e",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: f97a2d5131312082a54b26e764026dfd",
f"DEBUG:simpleml.persistables.hashing:Hashing input: {data['e']}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.series.Series'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: -4496393130729816112",
"DEBUG:simpleml.persistables.hashing:Hashing output: 022f7f3c9c3c4f477b8537dce4eb7b11",
f"DEBUG:simpleml.persistables.hashing:Hashing input: ('f', {data['f']})",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: f",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: d6a88b3c515fcfac7a70b4ee89ecc94d",
f"DEBUG:simpleml.persistables.hashing:Hashing input: {data['f']}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.frame.DataFrame'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: -7087755961261762286",
"DEBUG:simpleml.persistables.hashing:Hashing output: 214e5e5e60ff60baee6174e1846e0625",
# Final
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_string_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = 'a'
expected_final_hash = '0357109b163771392cc674173d921e4b'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
f"DEBUG:simpleml.persistables.hashing:hash type: {type(data)}",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_int_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = 2
expected_final_hash = '76f34d73a1a6753d1243c9ba0afe3457'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
f"DEBUG:simpleml.persistables.hashing:hash type: {type(data)}",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_simple_list_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = ['b', 3]
expected_final_hash = '38b1de0299d81decb1341f9f2bfb4c8b'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'list'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: b",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 10b474053f957b5c70dd5f01c695b8a0",
"DEBUG:simpleml.persistables.hashing:Hashing input: 3",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 56615ea01687173ebab08c915ad7e500",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_simple_dict_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = {'d': 4}
expected_final_hash = '21065bb299df9d8a902754661f1dcf08'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'dict'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: ('d', 4)",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'tuple'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: d",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: 5adbbd6cebbee97eda238235075de7ea",
"DEBUG:simpleml.persistables.hashing:Hashing input: 4",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'int'>",
"DEBUG:simpleml.persistables.hashing:Hashing output: a8216e26a2093b48a0b7c57159313c8e",
"DEBUG:simpleml.persistables.hashing:Hashing output: 0bd9aca51ddaab2f96485637ec4c21ed",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_lambda_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
def data():
return 0
expected_final_hash = 'd7ab3b20053da4fb93531950ad4ffb66'
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'function'>",
"DEBUG:simpleml.persistables.hashing:Hashing input: def data():\n return 0\n",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'str'>",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_empty_pandas_series_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = pd.Series()
expected_final_hash = 0
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.series.Series'>",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
def test_empty_pandas_dataframe_hashing(self):
with self.assertLogs(logger='simpleml.persistables.hashing', level='DEBUG') as logs:
# input/output
data = pd.DataFrame()
expected_final_hash = 0
with self.subTest():
self.assertEqual(CustomHasherMixin.custom_hasher(data), expected_final_hash)
# internal behavior
self.assertEqual(
logs.output,
[f"DEBUG:simpleml.persistables.hashing:Hashing input: {data}",
"DEBUG:simpleml.persistables.hashing:hash type: <class 'pandas.core.frame.DataFrame'>",
f"DEBUG:simpleml.persistables.hashing:Hashing output: {expected_final_hash}"])
class DeterministicHasherTests(unittest.TestCase):
def test_tuple_hash(self):
'''
set/tuple/list/dict/mappingproxy reduce to a tuple of hashes
'''
data = ('0357109b163771392cc674173d921e4b', '76f34d73a1a6753d1243c9ba0afe3457', '38b1de0299d81decb1341f9f2bfb4c8b', '21065bb299df9d8a902754661f1dcf08')
expected_hash = 'c3ee3ea76093a4ffa266010db2a19748'
self.assertEqual(deterministic_hash(data), expected_hash)
def test_string_hash(self):
data = 'abc'
expected_hash = 'a5a2f6c8adba6852e4d3888ce0c26016'
self.assertEqual(deterministic_hash(data), expected_hash)
def test_int_hash(self):
data = 12
expected_hash = 'feb1c5cac6acf399a62e281ca8aaac96'
self.assertEqual(deterministic_hash(data), expected_hash)
def test_float_hash(self):
data = 0.045
expected_hash = '900c461ea0f92e9dba4eaef616dbfd35'
self.assertEqual(deterministic_hash(data), expected_hash)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 59.071646
| 259
| 0.627623
| 3,562
| 38,751
| 6.726558
| 0.062886
| 0.24374
| 0.329048
| 0.360601
| 0.894783
| 0.889649
| 0.875417
| 0.864691
| 0.840442
| 0.82116
| 0
| 0.07003
| 0.267064
| 38,751
| 655
| 260
| 59.161832
| 0.773572
| 0.047121
| 0
| 0.692308
| 0
| 0.037422
| 0.571067
| 0.434978
| 0
| 0
| 0
| 0
| 0.114345
| 1
| 0.051975
| false
| 0.006237
| 0.016632
| 0.004158
| 0.081081
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
dbf3aa5b60213513cf87db10114901ab4541db0c
| 162
|
py
|
Python
|
example/consumption_tax.py
|
hi1280/pytest-example
|
1fa03b5d118d47d52beb707b5d8ce2ffffc786a1
|
[
"MIT"
] | null | null | null |
example/consumption_tax.py
|
hi1280/pytest-example
|
1fa03b5d118d47d52beb707b5d8ce2ffffc786a1
|
[
"MIT"
] | null | null | null |
example/consumption_tax.py
|
hi1280/pytest-example
|
1fa03b5d118d47d52beb707b5d8ce2ffffc786a1
|
[
"MIT"
] | null | null | null |
class ConsumptionTax:
def __init__(self, tax_rate):
self.tax_rate = tax_rate
def apply(self, price):
return int((price * self.tax_rate) / 100) + price
| 32.4
| 53
| 0.697531
| 24
| 162
| 4.375
| 0.5
| 0.266667
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.185185
| 162
| 5
| 53
| 32.4
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e012570b4b058323830d992df1b1b1b3e61f6722
| 13,744
|
py
|
Python
|
sdk/python/pulumi_azure/servicebus/subscription.py
|
apollo2030/pulumi-azure
|
034665c61665f4dc7e291b8813747012d34fa044
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/servicebus/subscription.py
|
apollo2030/pulumi-azure
|
034665c61665f4dc7e291b8813747012d34fa044
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/servicebus/subscription.py
|
apollo2030/pulumi-azure
|
034665c61665f4dc7e291b8813747012d34fa044
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Subscription(pulumi.CustomResource):
auto_delete_on_idle: pulumi.Output[str]
"""
The idle interval after which the
Subscription is automatically deleted, minimum of 5 minutes. Provided in the
TimeSpan format.
"""
dead_lettering_on_filter_evaluation_exceptions: pulumi.Output[bool]
dead_lettering_on_message_expiration: pulumi.Output[bool]
"""
Boolean flag which controls
whether the Subscription has dead letter support when a message expires. Defaults
to false.
"""
default_message_ttl: pulumi.Output[str]
"""
The TTL of messages sent to this Subscription
if no TTL value is set on the message itself. Provided in the TimeSpan
format.
"""
enable_batched_operations: pulumi.Output[bool]
"""
Boolean flag which controls whether the
Subscription supports batched operations. Defaults to false.
"""
forward_dead_lettered_messages_to: pulumi.Output[str]
"""
The name of a Queue or Topic to automatically forward Dead Letter messages to.
"""
forward_to: pulumi.Output[str]
"""
The name of a Queue or Topic to automatically forward messages to.
"""
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists.
Changing this forces a new resource to be created.
"""
lock_duration: pulumi.Output[str]
"""
The lock duration for the subscription, maximum
supported value is 5 minutes. Defaults to 1 minute.
"""
max_delivery_count: pulumi.Output[float]
"""
The maximum number of deliveries.
"""
name: pulumi.Output[str]
"""
Specifies the name of the ServiceBus Subscription resource.
Changing this forces a new resource to be created.
"""
namespace_name: pulumi.Output[str]
"""
The name of the ServiceBus Namespace to create
this Subscription in. Changing this forces a new resource to be created.
"""
requires_session: pulumi.Output[bool]
"""
Boolean flag which controls whether this Subscription
supports the concept of a session. Defaults to false. Changing this forces a
new resource to be created.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to
create the namespace. Changing this forces a new resource to be created.
"""
topic_name: pulumi.Output[str]
"""
The name of the ServiceBus Topic to create
this Subscription in. Changing this forces a new resource to be created.
"""
def __init__(__self__, resource_name, opts=None, auto_delete_on_idle=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_ttl=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, location=None, lock_duration=None, max_delivery_count=None, name=None, namespace_name=None, requires_session=None, resource_group_name=None, topic_name=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a ServiceBus Subscription.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] auto_delete_on_idle: The idle interval after which the
Subscription is automatically deleted, minimum of 5 minutes. Provided in the
TimeSpan format.
:param pulumi.Input[bool] dead_lettering_on_message_expiration: Boolean flag which controls
whether the Subscription has dead letter support when a message expires. Defaults
to false.
:param pulumi.Input[str] default_message_ttl: The TTL of messages sent to this Subscription
if no TTL value is set on the message itself. Provided in the TimeSpan
format.
:param pulumi.Input[bool] enable_batched_operations: Boolean flag which controls whether the
Subscription supports batched operations. Defaults to false.
:param pulumi.Input[str] forward_dead_lettered_messages_to: The name of a Queue or Topic to automatically forward Dead Letter messages to.
:param pulumi.Input[str] forward_to: The name of a Queue or Topic to automatically forward messages to.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists.
Changing this forces a new resource to be created.
:param pulumi.Input[str] lock_duration: The lock duration for the subscription, maximum
supported value is 5 minutes. Defaults to 1 minute.
:param pulumi.Input[float] max_delivery_count: The maximum number of deliveries.
:param pulumi.Input[str] name: Specifies the name of the ServiceBus Subscription resource.
Changing this forces a new resource to be created.
:param pulumi.Input[str] namespace_name: The name of the ServiceBus Namespace to create
this Subscription in. Changing this forces a new resource to be created.
:param pulumi.Input[bool] requires_session: Boolean flag which controls whether this Subscription
supports the concept of a session. Defaults to false. Changing this forces a
new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the namespace. Changing this forces a new resource to be created.
:param pulumi.Input[str] topic_name: The name of the ServiceBus Topic to create
this Subscription in. Changing this forces a new resource to be created.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/servicebus_subscription.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auto_delete_on_idle'] = auto_delete_on_idle
__props__['dead_lettering_on_filter_evaluation_exceptions'] = dead_lettering_on_filter_evaluation_exceptions
__props__['dead_lettering_on_message_expiration'] = dead_lettering_on_message_expiration
__props__['default_message_ttl'] = default_message_ttl
__props__['enable_batched_operations'] = enable_batched_operations
__props__['forward_dead_lettered_messages_to'] = forward_dead_lettered_messages_to
__props__['forward_to'] = forward_to
__props__['location'] = location
__props__['lock_duration'] = lock_duration
if max_delivery_count is None:
raise TypeError("Missing required property 'max_delivery_count'")
__props__['max_delivery_count'] = max_delivery_count
__props__['name'] = name
if namespace_name is None:
raise TypeError("Missing required property 'namespace_name'")
__props__['namespace_name'] = namespace_name
__props__['requires_session'] = requires_session
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if topic_name is None:
raise TypeError("Missing required property 'topic_name'")
__props__['topic_name'] = topic_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure:eventhub/subscription:Subscription")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Subscription, __self__).__init__(
'azure:servicebus/subscription:Subscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, auto_delete_on_idle=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_ttl=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, location=None, lock_duration=None, max_delivery_count=None, name=None, namespace_name=None, requires_session=None, resource_group_name=None, topic_name=None):
"""
Get an existing Subscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] auto_delete_on_idle: The idle interval after which the
Subscription is automatically deleted, minimum of 5 minutes. Provided in the
TimeSpan format.
:param pulumi.Input[bool] dead_lettering_on_message_expiration: Boolean flag which controls
whether the Subscription has dead letter support when a message expires. Defaults
to false.
:param pulumi.Input[str] default_message_ttl: The TTL of messages sent to this Subscription
if no TTL value is set on the message itself. Provided in the TimeSpan
format.
:param pulumi.Input[bool] enable_batched_operations: Boolean flag which controls whether the
Subscription supports batched operations. Defaults to false.
:param pulumi.Input[str] forward_dead_lettered_messages_to: The name of a Queue or Topic to automatically forward Dead Letter messages to.
:param pulumi.Input[str] forward_to: The name of a Queue or Topic to automatically forward messages to.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists.
Changing this forces a new resource to be created.
:param pulumi.Input[str] lock_duration: The lock duration for the subscription, maximum
supported value is 5 minutes. Defaults to 1 minute.
:param pulumi.Input[float] max_delivery_count: The maximum number of deliveries.
:param pulumi.Input[str] name: Specifies the name of the ServiceBus Subscription resource.
Changing this forces a new resource to be created.
:param pulumi.Input[str] namespace_name: The name of the ServiceBus Namespace to create
this Subscription in. Changing this forces a new resource to be created.
:param pulumi.Input[bool] requires_session: Boolean flag which controls whether this Subscription
supports the concept of a session. Defaults to false. Changing this forces a
new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the namespace. Changing this forces a new resource to be created.
:param pulumi.Input[str] topic_name: The name of the ServiceBus Topic to create
this Subscription in. Changing this forces a new resource to be created.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/servicebus_subscription.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["auto_delete_on_idle"] = auto_delete_on_idle
__props__["dead_lettering_on_filter_evaluation_exceptions"] = dead_lettering_on_filter_evaluation_exceptions
__props__["dead_lettering_on_message_expiration"] = dead_lettering_on_message_expiration
__props__["default_message_ttl"] = default_message_ttl
__props__["enable_batched_operations"] = enable_batched_operations
__props__["forward_dead_lettered_messages_to"] = forward_dead_lettered_messages_to
__props__["forward_to"] = forward_to
__props__["location"] = location
__props__["lock_duration"] = lock_duration
__props__["max_delivery_count"] = max_delivery_count
__props__["name"] = name
__props__["namespace_name"] = namespace_name
__props__["requires_session"] = requires_session
__props__["resource_group_name"] = resource_group_name
__props__["topic_name"] = topic_name
return Subscription(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 57.991561
| 494
| 0.711365
| 1,750
| 13,744
| 5.306857
| 0.118286
| 0.035534
| 0.048239
| 0.040917
| 0.825024
| 0.79832
| 0.777323
| 0.764294
| 0.738021
| 0.733929
| 0
| 0.000941
| 0.22679
| 13,744
| 236
| 495
| 58.237288
| 0.872965
| 0.395227
| 0
| 0.021505
| 1
| 0
| 0.182704
| 0.064316
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043011
| false
| 0.010753
| 0.064516
| 0.021505
| 0.311828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e01f94eef06abb85142e8818a2105d10358b6b59
| 6,563
|
py
|
Python
|
static/dataset/cook_data.py
|
chikobvore/Examinations
|
e01f62d82ad591e82696db0189c6a66f72ba6a96
|
[
"MIT"
] | null | null | null |
static/dataset/cook_data.py
|
chikobvore/Examinations
|
e01f62d82ad591e82696db0189c6a66f72ba6a96
|
[
"MIT"
] | null | null | null |
static/dataset/cook_data.py
|
chikobvore/Examinations
|
e01f62d82ad591e82696db0189c6a66f72ba6a96
|
[
"MIT"
] | null | null | null |
import pandas as pd
from numpy.random import randint
data = pd.read_csv('mubeena1.csv')
TCW = []
EM = []
TM = []
Grade = []
Comment = []
for i in range(len(data)):
CW = 0.4 * data.iloc[i][0]
TEM = 0.6 * data.iloc[i][1]
if CW > 35:
CW = 40
print("New Cw is " + str(CW))
if TEM > 55:
TEM = 60
total = CW +TEM
if total > 30 and total < 45:
CW = randint(0,43)
TEM = 44 - CW
total = 44
EM.append(round(TEM))
TCW.append(round(CW))
TM.append(round(total))
if total < 45:
grade = 'F'
Grade.append(grade)
elif total > 44 and total < 55:
grade = 'P'
Grade.append(grade)
elif total >54 and total <65:
grade = '2.2'
Grade.append(grade)
elif total >64 and total < 75:
grade = '2.1'
Grade.append(grade)
else:
grade = '1'
Grade.append(grade)
if CW > 12:
if TEM > 15:
if total == 44:
comment = "Borderline Failure"
Comment.append(comment)
else:
if CW > 38:
comment = "Perfect Course Work Score"
Comment.append(comment)
else:
if TEM > 58:
comment = "Perfect Exam Mark Score"
Comment.append(comment)
else:
comment = "Normal"
Comment.append(comment)
else:
comment = "Disproportinate TCW TO TOTAL Exam Mark"
Comment.append(comment)
else:
if TEM > 32:
if CW > 9:
if TEM > 40:
comment = "Disproportinate TCW TO TOTAL Exam Mark"
Comment.append(comment)
else:
if TEM > 59:
comment = "Perfect Exam mark Score"
Comment.append(comment)
else:
if total == 44:
comment = "Boarderline Failure"
Comment.append(comment)
else:
if CW > 39:
comment = "Perfect Course Work Score"
Comment.append(comment)
else:
comment = "Normal"
Comment.append(comment)
else:
comment = "Disproportinate TCW TO TOTAL Exam Mark"
Comment.append(comment)
else:
if CW > 39:
comment = "Perfect Course Work Score"
Comment.append(comment)
else:
if total == 44:
comment = "Borderline Failure"
Comment.append(comment)
else:
if TEM > 58:
comment = "Perfect Exam mark Score"
Comment.append(comment)
else:
comment = "Normal"
Comment.append(comment)
for i in range(len(data)):
CW = 0.4 * data.iloc[i][2]
TEM = 0.6 * data.iloc[i][3]
if CW > 35:
CW = 40
print("New Cw is " + str(CW))
if TEM > 55:
TEM = 60
total = CW +TEM
if total > 30 and total < 45:
CW = randint(0,43)
TEM = 44 - CW
total = 44
EM.append(round(TEM))
TCW.append(round(CW))
TM.append(round(total))
if total < 45:
grade = 'F'
Grade.append(grade)
elif total > 44 and total < 55:
grade = 'P'
Grade.append(grade)
elif total >54 and total <65:
grade = '2.2'
Grade.append(grade)
elif total >64 and total < 75:
grade = '2.1'
Grade.append(grade)
else:
grade = '1'
Grade.append(grade)
if CW > 12:
if TEM > 15:
if total == 44:
comment = "Borderline Failure"
Comment.append(comment)
else:
if CW > 38:
comment = "Perfect Course Work Score"
Comment.append(comment)
else:
if TEM > 58:
comment = "Perfect Exam Mark Score"
Comment.append(comment)
else:
comment = "Normal"
Comment.append(comment)
else:
comment = "Disproportinate TCW TO TOTAL Exam Mark"
Comment.append(comment)
else:
if TEM > 32:
if CW > 9:
if TEM > 40:
comment = "Disproportinate TCW TO TOTAL Exam Mark"
Comment.append(comment)
else:
if TEM > 59:
comment = "Perfect Exam mark Score"
Comment.append(comment)
else:
if total == 44:
comment = "Boarderline Failure"
Comment.append(comment)
else:
if CW > 39:
comment = "Perfect Course Work Score"
Comment.append(comment)
else:
comment = "Normal"
Comment.append(comment)
else:
comment = "Disproportinate TCW TO TOTAL Exam Mark"
Comment.append(comment)
else:
if CW > 39:
comment = "Perfect Course Work Score"
Comment.append(comment)
else:
if total == 44:
comment = "Borderline Failure"
Comment.append(comment)
else:
if TEM > 58:
comment = "Perfect Exam mark Score"
Comment.append(comment)
else:
comment = "Normal"
Comment.append(comment)
NewData = {
"Course Work": TCW,
"Exam Mark": EM,
"Total": TM,
"Comment": Comment
}
Dataset = pd.DataFrame(NewData,columns= ['Course Work','Exam Mark','Total','Comment'])
Export = Dataset.to_csv('newdata.csv',index=None,header=True)
| 29.696833
| 86
| 0.413683
| 616
| 6,563
| 4.404221
| 0.137987
| 0.143752
| 0.221157
| 0.247696
| 0.910063
| 0.910063
| 0.899742
| 0.899742
| 0.899742
| 0.899742
| 0
| 0.043784
| 0.502362
| 6,563
| 220
| 87
| 29.831818
| 0.786895
| 0
| 0
| 0.896907
| 0
| 0
| 0.120219
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010309
| 0
| 0.010309
| 0.010309
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e04e6c97aa1da1da14c129930289c03dc3ac13da
| 21,304
|
py
|
Python
|
server.py
|
hadiMh/tornado
|
53e8504cb26c99aa1f72664d48d18a874bbccd0f
|
[
"MIT"
] | null | null | null |
server.py
|
hadiMh/tornado
|
53e8504cb26c99aa1f72664d48d18a874bbccd0f
|
[
"MIT"
] | null | null | null |
server.py
|
hadiMh/tornado
|
53e8504cb26c99aa1f72664d48d18a874bbccd0f
|
[
"MIT"
] | null | null | null |
import re
import os
from tornado.web import Application, RequestHandler
from tornado.ioloop import IOLoop
from tornado.options import define, options
import string
import random
# from torndb import Connection
from binascii import hexlify
from time import gmtime, strftime
import mydb
import json
import collections
def createUserTicketList(userTicketList):
result = {}
if userTicketList:
ticketListLength = len(userTicketList)
else:
ticketListLength = 0
result[" tickets"] = "There Are -%d- Ticket" % ticketListLength
result[" code"] = "200"
i = 0
if(userTicketList):
for value in userTicketList:
result["block %d" % i] = {
"subject": value["subject"],
"body": value["body"],
"status": value["status"],
"id": value["id"],
"date": "2019-05-21 15:18:17",
"response": value["response"]
}
i=i+1
return result
def getQueryParametes(self, parameters):
returnValues = []
for value in parameters:
result = re.search("(?<="+value+"=)([^&]+)?", self.request.uri)
if(result):
returnValues.append(result.group())
else:
returnValues.append(0)
return returnValues
def getPostParameters(self, parameters):
returnValues = []
for value in parameters:
result = self.get_argument(value)
if(result):
returnValues.append(result)
else:
returnValues.append(0)
return returnValues
def generateRandomToken():
return hexlify(os.urandom(16)).decode('utf-8')
def getUserLoginToken(username):
user = mydb.doesThisUserAlreadyExist(username)
if(not user):
print("no such user exists for login")
else:
if(not user["token"] or user["token"] == ""):
user['token'] = generateRandomToken()
mydb.saveTokenToThisUser(username, user['token'])
return user['token']
# request handler classes
class MyRequestHandler(RequestHandler):
@property
def hello(self):
print("second function")
class SignupHandler(MyRequestHandler):
def get(self, *args):
username, password, firstname, lastname = getQueryParametes(self, ['username', 'password', 'firstname', 'lastname'])
if(not username or not password):
self.write({"message:": "username and password are required"})
return
if(not firstname):
firstname = ""
if(not lastname):
lastname = ""
# check if user already exists in the saved users
userExists = mydb.doesThisUserAlreadyExist(username)
if not userExists:
mydb.createUserInUsersTable(username, password, firstname, lastname)
self.write({
"message": "Signed Up Successfully",
"code": "200"
})
else:
self.write(
{
"message": "user already exists",
"code": "400"
}
)
def post(self, *args, **kwargs):
username, password, firstname, lastname = getPostParameters(self,
['username', 'password', 'firstname', 'lastname'])
if (not username or not password):
self.write({
"message:": "username and password are required",
"code": "400"})
return
if (not firstname):
firstname = ""
if (not lastname):
lastname = ""
# check if user already exists in the saved users
userExists = mydb.doesThisUserAlreadyExist(username)
if not userExists:
mydb.createUserInUsersTable(username, password, firstname, lastname)
self.write({
"message": "Signed Up Successfully",
"code": "200"
})
else:
self.write(
{
"message": "user already exists",
"code": "300"
}
)
class LoginHandler(MyRequestHandler):
def get(self, *args):
# get username and password that the request contains
username, password = getQueryParametes(self, ['username', 'password'])
# if username or password are not in the request
if(not username or not password):
self.write({
"message": "username and password are required.",
"code": "400"
})
return
# find the user if exist
user = mydb.doesThisUserAlreadyExist(username)
# if user doesnt exist, tell it to response
if not user:
self.write({
"message": "user doesn't exist in the database.",
"code": "400"
})
return
print("user", user)
# if user exist and the password is correct
if (user) and user["password"] == password:
# the below function, saves the token in the user data
token = getUserLoginToken(username=username)
self.write({
"message": "Logged in Successfully",
"code": "200",
"token": token
})
else:
# if the password is not correct
self.write({
"message": "username or password is not correct",
"code": "400"
})
def post(self, *args, **kwargs):
# get username and password that the request contains
username, password = getPostParameters(self, ['username', 'password'])
# username = self.get_argument('username')
# password = self.get_argument('password')
print("post parameters", username, password)
# if username or password are not in the request
if (not username or not password):
self.write({
"message": "username and password are required."
})
return
# find the user if exist
user = mydb.doesThisUserAlreadyExist(username)
# if user doesnt exist, tell it to response
if not user:
self.write({
"message": "user doesn't exist in the database."
})
return
print("user", user)
# if user exist and the password is correct
if (user) and user["password"] == password:
# the below function, saves the token in the user data
token = getUserLoginToken(username=username)
self.write({
"message": "Logged in Successfully",
"code": "200",
"token": token
})
else:
# if the password is not correct
self.write({
"message": "username or password is not correct"
})
class LogoutHandler(MyRequestHandler):
def get(self, *args):
username, password = getQueryParametes(self, ['username', 'password'])
if (not username or not password):
self.write({
"message": "username and password are required."
})
return
# find the user if exist
user = mydb.clearUserToken(username, password)
# if user doesnt exist, tell it to response
if not user:
self.write({
"message": "user have already logged out or "
"user doesn't exist in the database or "
"your password is not correct"
})
return
# if user exist and the password is correct
if (user):
# clear the token so the user is logged out
self.write({
"message": "Logged Out Successfully",
"code": "200"
})
return
else:
# if the password is not correct
self.write({
"message": "username or password is not correct"
})
def post(self, *args, **kwargs):
username, password = getPostParameters(self, ['username', 'password'])
if (not username or not password):
self.write({
"message": "username and password are required."
})
return
# find the user if exist
user = mydb.clearUserToken(username, password)
# if user doesnt exist, tell it to response
if not user:
self.write({
"message": "user have already logged out or "
"user doesn't exist in the database or "
"your password is not correct"
})
return
# if user exist and the password is correct
if (user):
# clear the token so the user is logged out
self.write({
"message": "Logged Out Successfully",
"code": "200"
})
return
else:
# if the password is not correct
self.write({
"message": "username or password is not correct"
})
class SendTicketHandler(MyRequestHandler):
def get(self, *args):
token, subject, body = getQueryParametes(self, ['token', 'subject', 'body'])
if(not token):
self.write({
"message": "token is required. please login first."
})
return
if(not subject or not body):
self.write({
"message": "subject and body of the ticket are required."
})
return
user = mydb.getUserByToken(token)
if(not user):
self.write({
"message": "token not valid"
})
return
ticketId = mydb.saveTicket(user['username'], subject, body)
self.write({
"message": "Ticket Sent Successfully",
"id": ticketId,
"code": "200"
})
def post(self, *args, **kwargs):
token, subject, body = getPostParameters(self, ['token', 'subject', 'body'])
if (not token):
self.write({
"message": "token is required. please login first."
})
return
if (not subject or not body):
self.write({
"message": "subject and body of the ticket are required."
})
return
user = mydb.getUserByToken(token)
if (not user):
self.write({
"message": "token not valid"
})
return
ticketId = mydb.saveTicket(user['username'], subject, body)
self.write({
"message": "Ticket Sent Successfully",
"id": ticketId,
"code": "200"
})
class UserGetTicketHandler(MyRequestHandler):
def get(self, *args):
token = getQueryParametes(self, ['token'])[0]
if(not token):
self.write({
"message": "request in correct format"
})
return
user = mydb.getUserByToken(token)
if not user:
self.write({
"message": "token is not valid"
})
return
userTickets = mydb.getAllUserTickets(user['username'])
if userTickets:
numberOfTickets = len(userTickets)
else:
numberOfTickets = 0
correctFormatUserTicketsList = createUserTicketList(userTickets)
self.write(
collections.OrderedDict(sorted(correctFormatUserTicketsList.items()))
)
def post(self, *args):
token = getPostParameters(self, ['token'])[0]
if(not token):
self.write({
"message": "request in correct format"
})
return
user = mydb.getUserByToken(token)
if not user:
self.write({
"message": "token is not valid"
})
return
userTickets = mydb.getAllUserTickets(user['username'])
numberOfTickets = len(userTickets)
correctFormatUserTicketsList = createUserTicketList(userTickets)
self.write(
collections.OrderedDict(sorted(correctFormatUserTicketsList.items()))
)
class UserCloseTicketHandler(MyRequestHandler):
def get(self, *args):
token, id = getQueryParametes(self, ['token', 'id'])
user = mydb.getUserByToken(token)
userTickets = mydb.getAllUserTickets(user["username"])
if mydb.doesThisTicketExists(id) and mydb.changeTicketStatus(id, "Closed"):
self.write({
"message": "Ticket With id -%s- Closed Successfully" % id,
"code": "200"
})
return
else:
self.write({
"message": "No such user or ticket",
"code": "404"
})
return
def post(self, *args, **kwargs):
token, id = getPostParameters(self, ['token', 'id'])
user = mydb.getUserByToken(token)
userTickets = mydb.getAllUserTickets(user["username"])
if(mydb.changeTicketStatus(id, "Closed")):
self.write({
"message": "Ticket With id -%s- Closed Successfully" % id,
"code": "200"
})
return
else:
self.write({
"message": "No such user or ticket",
"code": "404"
})
return
class AdminGetAllTicketsHandler(MyRequestHandler):
def get(self, *args):
token, = getQueryParametes(self, ['token'])
if (not token):
self.write({
"message": "request in correct format"
})
return
user = mydb.getUserByToken(token)
print("isthisuseradmin",mydb.isThisTokenAdmin(token))
if not user or not mydb.isThisTokenAdmin(token):
self.write({
"message": "token is not valid"
})
return
userTickets = mydb.getAllUserTickets(user["username"])
numberOfTickets = len(userTickets)
correctFormatUserTicketsList = createUserTicketList(userTickets)
self.write(
collections.OrderedDict(sorted(correctFormatUserTicketsList.items()))
)
def post(self, *args, **kwargs):
token, = getPostParameters(self, ['token'])
if (not token):
self.write({
"message": "request in correct format"
})
return
user = mydb.getUserByToken(token)
print("isthisuseradmin",mydb.isThisTokenAdmin(token))
if not user or not mydb.isThisTokenAdmin(token):
self.write({
"message": "token is not valid"
})
return
userTickets = mydb.getAllUserTickets(user["username"])
numberOfTickets = len(userTickets)
correctFormatUserTicketsList = createUserTicketList(userTickets)
self.write(
collections.OrderedDict(sorted(correctFormatUserTicketsList.items()))
)
class AdminAnswerToTicketHandler(MyRequestHandler):
def get(self, *args):
token, id, body = getQueryParametes(self, ['token', 'id', 'body'])
if(not token):
self.write({
"message": "token is required"
})
return
if(not id or not body):
self.write({
"message": "the ticket id and the response text are required"
})
return
user = mydb.getUserByToken(token)
if not user:
self.write({
"message": "token is not valid"
})
return
if not mydb.isThisTokenAdmin(token):
self.write({
"message": "token is not valid"
})
return
if mydb.doesThisTicketExists(id) and mydb.saveThisResponseForThisTicket(id, body):
self.write({
"message": "Response to Ticket With id -%s- Sent Successfully" % id,
"code": "200"
})
else:
self.write({
"message": "Response to Ticket With id -%s- Was not Successfully. Please get sure for ticket existence." % id,
"code": "200"
})
def post(self, *args, **kwargs):
token, id, body = getPostParameters(self, ['token', 'id', 'body'])
if(not token):
self.write({
"message": "token is required"
})
return
if(not id or not body):
self.write({
"message": "the ticket id and the response text are required"
})
return
user = mydb.getUserByToken(token)
if not user:
self.write({
"message": "token is not valid"
})
return
if not mydb.isThisTokenAdmin(token):
self.write({
"message": "token is not valid"
})
return
if mydb.doesThisTicketExists(id) and mydb.saveThisResponseForThisTicket(id, body):
self.write({
"message": "Response to Ticket With id -%s- Sent Successfully" % id,
"code": "200"
})
else:
self.write({
"message": "Response to Ticket With id -%s- Was not Successfully. Please get sure for ticket existence." % id,
"code": "200"
})
class AdminChangeTicketStatus(MyRequestHandler):
def get(self, *args):
token, id, status = getQueryParametes(self, ['token', 'id', 'status'])
if (not token):
self.write({
"message": "token is required"
})
return
if (not id or not status):
self.write({
"message": "the ticket id and the status are required"
})
return
user = mydb.getUserByToken(token)
if not user:
self.write({
"message": "token is not valid"
})
return
if not mydb.isThisTokenAdmin(token):
self.write({
"message": "token is not valid"
})
return
if mydb.doesThisTicketExists(id) and mydb.changeTicketStatus(id, status):
self.write({
"message": "Status Ticket With id -%s- Changed Successfully" % id,
"code": "200"
})
return
else:
self.write({
"message": "No such ticket or user",
"code": "404"
})
return
def post(self, *args, **kwargs):
token, id, status = getPostParameters(self, ['token', 'id', 'status'])
if (not token):
self.write({
"message": "token is required"
})
return
if (not id or not status):
self.write({
"message": "the ticket id and the status are required"
})
return
user = mydb.getUserByToken(token)
if not user:
self.write({
"message": "token is not valid"
})
return
if not mydb.isThisTokenAdmin(token):
self.write({
"message": "token is not valid"
})
return
if mydb.doesThisTicketExists(id) and mydb.changeTicketStatus(id, status):
self.write({
"message": "Status Ticket With id -%s- Changed Successfully" % id,
"code": "200"
})
return
else:
self.write({
"message": "No such ticket or user",
"code": "404"
})
return
class DefaultHandler(MyRequestHandler):
def get(self, *args):
self.write({
"message": "page does not exist"
})
print(self.request)
def make_app():
urls = [
# GET Urls
(r"/signup(.*)", SignupHandler),
(r"/login(.*)", LoginHandler),
(r"/logout(.*)", LogoutHandler),
(r"/sendticket(.*)", SendTicketHandler),
(r"/getticketcli(.*)", UserGetTicketHandler),
(r"/closeticket(.*)", UserCloseTicketHandler),
(r"/getticketmod(.*)", AdminGetAllTicketsHandler),
(r"/restoticketmod(.*)", AdminAnswerToTicketHandler),
(r"/changestatus(.*)", AdminChangeTicketStatus),
# POST Urls
(r"/signup", SignupHandler),
(r"/login", LoginHandler),
(r"/logout", LogoutHandler),
(r"/sendticket", SendTicketHandler),
(r"/getticketcli", UserGetTicketHandler),
(r"/closeticket", UserCloseTicketHandler),
(r"/getticketmod", AdminGetAllTicketsHandler),
(r"/restoticketmod", AdminAnswerToTicketHandler),
(r"/changestatus", AdminChangeTicketStatus),
(r".*", DefaultHandler)
]
return Application(urls)
app = make_app()
app.listen(3000)
# print(options['mysql-database'])
IOLoop.instance().start()
| 31.283407
| 126
| 0.5207
| 1,929
| 21,304
| 5.748056
| 0.104199
| 0.05763
| 0.096681
| 0.037879
| 0.834325
| 0.819084
| 0.809885
| 0.770563
| 0.754239
| 0.744859
| 0
| 0.008227
| 0.372371
| 21,304
| 681
| 127
| 31.283407
| 0.821031
| 0.057407
| 0
| 0.771993
| 0
| 0
| 0.181913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046679
| false
| 0.057451
| 0.021544
| 0.001795
| 0.18851
| 0.014363
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
e055f1819f9394e32580df442ba7e1d0b5cf3217
| 13,318
|
py
|
Python
|
test/test_server.py
|
clach04/PyWebScrapBook
|
310e8f20cc5337336875679246b9269265b4476a
|
[
"MIT"
] | null | null | null |
test/test_server.py
|
clach04/PyWebScrapBook
|
310e8f20cc5337336875679246b9269265b4476a
|
[
"MIT"
] | null | null | null |
test/test_server.py
|
clach04/PyWebScrapBook
|
310e8f20cc5337336875679246b9269265b4476a
|
[
"MIT"
] | null | null | null |
from unittest import mock
import unittest
import sys
import os
import webscrapbook
from webscrapbook import WSB_DIR, WSB_CONFIG
from webscrapbook import server
root_dir = os.path.abspath(os.path.dirname(__file__))
server_root = os.path.join(root_dir, 'test_server')
server_config = os.path.join(server_root, WSB_DIR, WSB_CONFIG)
def setUpModule():
# create temp folders
os.makedirs(os.path.dirname(server_config), exist_ok=True)
# mock out user config
global mockings
mockings = [
mock.patch('webscrapbook.WSB_USER_DIR', server_root, 'wsb'),
mock.patch('webscrapbook.WSB_USER_CONFIG', server_root),
]
for mocking in mockings:
mocking.start()
def tearDownModule():
# purge WSB_DIR
try:
os.remove(os.path.join(server_root, WSB_DIR, 'config.ini'))
except FileNotFoundError:
pass
# stop mock
for mocking in mockings:
mocking.stop()
class TestConfigServer(unittest.TestCase):
@mock.patch('webscrapbook.server.make_server')
def test_root(self, mock_make_server):
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
browse = false
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[2][1][1], f'Document Root: {server_root}')
@mock.patch('webscrapbook.server.make_server')
def test_host_port1(self, mock_make_server):
# IPv4
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 80
browse = false
""")
server.serve(server_root)
self.assertEqual(mock_make_server.call_args[1]['host'], '127.0.0.1')
self.assertEqual(mock_make_server.call_args[1]['port'], 80)
self.assertEqual(mock_make_server.mock_calls[3][1][1], 'Listening on http://127.0.0.1:80')
@mock.patch('webscrapbook.server.make_server')
def test_host_port2(self, mock_make_server):
# IPv6 => with []
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = ::1
port = 8000
browse = false
""")
server.serve(server_root)
self.assertEqual(mock_make_server.call_args[1]['host'], '::1')
self.assertEqual(mock_make_server.call_args[1]['port'], 8000)
self.assertEqual(mock_make_server.mock_calls[3][1][1], 'Listening on http://[::1]:8000')
@mock.patch('webscrapbook.server.make_server')
def test_host_port3(self, mock_make_server):
# domain_name (the server will actually bind to the resolved IP.)
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = localhost
port = 7357
browse = false
""")
server.serve(server_root)
self.assertEqual(mock_make_server.call_args[1]['host'], 'localhost')
self.assertEqual(mock_make_server.call_args[1]['port'], 7357)
self.assertEqual(mock_make_server.mock_calls[3][1][1], 'Listening on http://localhost:7357')
@mock.patch('webscrapbook.server.make_server')
def test_ssl1(self, mock_make_server):
# SSL off
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = false
ssl_key = .wsb/test.key
ssl_cert = .wsb/test.pem
browse = false
""")
server.serve(server_root)
self.assertIs(mock_make_server.call_args[1]['ssl_context'], None)
@mock.patch('webscrapbook.server.make_server')
def test_ssl2(self, mock_make_server):
# SSL with an adhoc key
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = true
ssl_key =
ssl_cert =
browse = false
""")
server.serve(server_root)
self.assertEqual(mock_make_server.call_args[1]['ssl_context'], 'adhoc')
@mock.patch('webscrapbook.server.make_server')
def test_ssl3(self, mock_make_server):
# SSL with missing key => adhoc
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = true
ssl_key =
ssl_cert = .wsb/test.pem
browse = false
""")
server.serve(server_root)
self.assertEqual(mock_make_server.call_args[1]['ssl_context'], 'adhoc')
@mock.patch('webscrapbook.server.make_server')
def test_ssl4(self, mock_make_server):
# SSL with missing cert => adhoc
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = true
ssl_key = .wsb/test.key
ssl_cert =
browse = false
""")
server.serve(server_root)
self.assertEqual(mock_make_server.call_args[1]['ssl_context'], 'adhoc')
@mock.patch('webscrapbook.server.make_server')
def test_ssl5(self, mock_make_server):
# SSL with key and cert
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = true
ssl_key = .wsb/test.key
ssl_cert = .wsb/test.pem
browse = false
""")
server.serve(server_root)
self.assertEqual(mock_make_server.call_args[1]['ssl_context'], (
os.path.join(server_root, WSB_DIR, 'test.pem'),
os.path.join(server_root, WSB_DIR, 'test.key'),
))
class TestConfigBrowser(unittest.TestCase):
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_command1(self, mock_make_server, mock_browser):
# server.browse = false
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 80
browse = false
[browser]
command =
""")
server.serve(server_root)
mock_browser.assert_not_called()
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_command2(self, mock_make_server, mock_browser):
# server.browse = true, browser.command not set
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 80
browse = true
[browser]
command =
""")
server.serve(server_root)
mock_browser.assert_called_once_with(None)
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_command3(self, mock_make_server, mock_browser):
# server.browse = true, browser.command set
with open(server_config, 'w', encoding='UTF-8') as f:
f.write(r"""[server]
host = 127.0.0.1
port = 80
browse = true
[browser]
command = "C:\Program Files\Mozilla Firefox\firefox.exe" %s &
""")
server.serve(server_root)
mock_browser.assert_called_once_with(r'"C:\Program Files\Mozilla Firefox\firefox.exe" %s &')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_host1(self, mock_make_server, mock_browser):
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][0][1], 'Launching browser at http://127.0.0.1:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_scheme1(self, mock_make_server, mock_browser):
# http
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = false
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://127.0.0.1:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_scheme2(self, mock_make_server, mock_browser):
# https
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = true
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at https://127.0.0.1:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_host1(self, mock_make_server, mock_browser):
# IPv4
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://127.0.0.1:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_host2(self, mock_make_server, mock_browser):
# IPv6 => with []
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = ::1
port = 7357
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://[::1]:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_host3(self, mock_make_server, mock_browser):
# domain name
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = localhost
port = 7357
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://localhost:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_host4(self, mock_make_server, mock_browser):
# null host (0.0.0.0) => localhost
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 0.0.0.0
port = 7357
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://localhost:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_host5(self, mock_make_server, mock_browser):
# null host (::) => localhost
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = ::
port = 7357
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://localhost:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_port1(self, mock_make_server, mock_browser):
# normal port
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = false
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://127.0.0.1:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_port2(self, mock_make_server, mock_browser):
# 80 for http
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 80
ssl_on = false
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://127.0.0.1 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_port3(self, mock_make_server, mock_browser):
# 443 for https
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 443
ssl_on = true
browse = true
[app]
base =
[browser]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at https://127.0.0.1 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_path1(self, mock_make_server, mock_browser):
# app.index not set
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = false
browse = true
[app]
index =
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://127.0.0.1:7357 ...')
@mock.patch('webbrowser.get')
@mock.patch('webscrapbook.server.make_server')
def test_url_path2(self, mock_make_server, mock_browser):
# app.index set
with open(server_config, 'w', encoding='UTF-8') as f:
f.write("""[server]
host = 127.0.0.1
port = 7357
ssl_on = false
browse = true
[app]
index = index.html
""")
server.serve(server_root)
self.assertEqual(mock_make_server.mock_calls[5][1][1], 'Launching browser at http://127.0.0.1:7357/index.html ...')
if __name__ == '__main__':
unittest.main()
| 27.069106
| 123
| 0.645893
| 1,892
| 13,318
| 4.365222
| 0.082452
| 0.094442
| 0.089841
| 0.071922
| 0.890423
| 0.860516
| 0.829883
| 0.815837
| 0.756508
| 0.727691
| 0
| 0.043801
| 0.201156
| 13,318
| 491
| 124
| 27.124236
| 0.732494
| 0.041072
| 0
| 0.784777
| 0
| 0
| 0.337205
| 0.064977
| 0
| 0
| 0
| 0
| 0.081365
| 1
| 0.070866
| false
| 0.002625
| 0.018373
| 0
| 0.094488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ec4086a9d5d048e4ac944af5ce5f5568281a84e
| 7,748
|
py
|
Python
|
tests/test_forwarded.py
|
rgacote/aiohttp-remotes
|
8b28757bc10ed7878e1bbc0539dcfb3b37cb5e96
|
[
"MIT"
] | 1
|
2019-08-20T17:18:39.000Z
|
2019-08-20T17:18:39.000Z
|
tests/test_forwarded.py
|
rgacote/aiohttp-remotes
|
8b28757bc10ed7878e1bbc0539dcfb3b37cb5e96
|
[
"MIT"
] | null | null | null |
tests/test_forwarded.py
|
rgacote/aiohttp-remotes
|
8b28757bc10ed7878e1bbc0539dcfb3b37cb5e96
|
[
"MIT"
] | null | null | null |
from aiohttp import web
from aiohttp_remotes import ForwardedRelaxed, ForwardedStrict
from aiohttp_remotes import setup as _setup
async def test_forwarded_relaxed_ok(aiohttp_client):
async def handler(request):
assert request.host == 'example.com'
assert request.scheme == 'https'
assert request.secure
assert request.remote == '10.10.10.10'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedRelaxed())
cl = await aiohttp_client(app)
hdr_val = '; '.join(['for=10.10.10.10',
'proto=https',
'host=example.com'])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 200
async def test_forwarded_relaxed_no_for(aiohttp_client):
async def handler(request):
assert request.host == 'example.com'
assert request.scheme == 'https'
assert request.secure
assert request.remote == '127.0.0.1'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedRelaxed())
cl = await aiohttp_client(app)
hdr_val = '; '.join(['proto=https',
'host=example.com'])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 200
async def test_forwarded_relaxed_no_proto(aiohttp_client):
async def handler(request):
assert request.host == 'example.com'
assert request.scheme == 'http'
assert not request.secure
assert request.remote == '10.10.10.10'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedRelaxed())
cl = await aiohttp_client(app)
hdr_val = '; '.join(['for=10.10.10.10',
'host=example.com'])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 200
async def test_forwarded_relaxed_no_host(aiohttp_client):
async def handler(request):
url = cl.make_url('/')
host = url.host + ':' + str(url.port)
assert request.host == host
assert request.scheme == 'https'
assert request.secure
assert request.remote == '10.10.10.10'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedRelaxed())
cl = await aiohttp_client(app)
hdr_val = '; '.join(['for=10.10.10.10',
'proto=https'])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 200
async def test_forwarded_relaxed_many_hosts(aiohttp_client):
async def handler(request):
assert request.host == 'example.com'
assert request.scheme == 'https'
assert request.secure
assert request.remote == '10.10.10.10'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedRelaxed())
cl = await aiohttp_client(app)
hdr_val1 = '; '.join(['for=20.20.20.20',
'proto=http',
'host=example.org'])
hdr_val2 = '; '.join(['for=10.10.10.10',
'proto=https',
'host=example.com'])
hdr_val = ', '.join([hdr_val1, hdr_val2])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 200
async def test_forwarded_strict_ok(aiohttp_client):
async def handler(request):
assert request.host == 'example.com'
assert request.scheme == 'https'
assert request.secure
assert request.remote == '10.10.10.10'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedStrict([['127.0.0.1']]))
cl = await aiohttp_client(app)
hdr_val = '; '.join(['for=10.10.10.10',
'proto=https',
'host=example.com'])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 200
async def test_forwarded_strict_no_proto(aiohttp_client):
async def handler(request):
assert request.host == 'example.com'
assert request.scheme == 'http'
assert request.remote == '10.10.10.10'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedStrict([['127.0.0.1']]))
cl = await aiohttp_client(app)
hdr_val = '; '.join(['for=10.10.10.10',
'host=example.com'])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 200
async def test_forwarded_strict_no_host(aiohttp_client):
async def handler(request):
assert request.host.startswith('127.0.0.1:')
assert request.scheme == 'https'
assert request.remote == '10.10.10.10'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedStrict([['127.0.0.1']]))
cl = await aiohttp_client(app)
hdr_val = '; '.join(['for=10.10.10.10',
'proto=https'])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 200
async def test_forwarded_strict_too_many_protos(aiohttp_client):
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedStrict([['127.0.0.1']]))
cl = await aiohttp_client(app)
hdr1_val = '; '.join(['for=10.10.10.10',
'proto=https'])
hdr2_val = '; '.join(['for=20.20.20.20',
'proto=http'])
hdr_val = ', '.join([hdr1_val, hdr2_val])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 400
async def test_forwarded_strict_too_many_for(aiohttp_client):
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedStrict([['127.0.0.1']]))
cl = await aiohttp_client(app)
resp = await cl.get('/',
headers={'Forwarded':
'for=10.10.10.10, for=11.11.11.11'})
assert resp.status == 400
async def test_forwarded_strict_untrusted_ip(aiohttp_client):
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedStrict([['20.20.20.20']]))
cl = await aiohttp_client(app)
resp = await cl.get('/',
headers={'Forwarded': 'for=10.10.10.10'})
assert resp.status == 400
async def test_forwarded_strict_whitelist(aiohttp_client):
async def handler(request):
assert request.remote == '127.0.0.1'
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedStrict([['20.20.20.20']], white_paths=['/']))
cl = await aiohttp_client(app)
resp = await cl.get('/',
headers={'Forwarded': 'for=10.10.10.10'})
assert resp.status == 200
async def test_forwarded_strict_no_for(aiohttp_client):
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, ForwardedStrict([['127.0.0.1'], ['10.10.10.10']]))
cl = await aiohttp_client(app)
hdr_val = ', '.join(['for=10.10.10.10',
'proto=https'])
resp = await cl.get('/', headers={'Forwarded': hdr_val})
assert resp.status == 400
| 32.970213
| 76
| 0.598993
| 965
| 7,748
| 4.668394
| 0.076684
| 0.053274
| 0.053274
| 0.035516
| 0.926304
| 0.920089
| 0.914761
| 0.911654
| 0.883019
| 0.842397
| 0
| 0.051879
| 0.251162
| 7,748
| 234
| 77
| 33.111111
| 0.724578
| 0
| 0
| 0.802139
| 0
| 0
| 0.118353
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0
| false
| 0
| 0.016043
| 0
| 0.085562
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16198803cc216e270ca1bf56ab77df7b23cc51b1
| 71
|
py
|
Python
|
cw01/zad11_12.py
|
BartoszHolubowicz/projekt-psi
|
e1d753e543ed2676a21ba1d99191e36dbe484ae5
|
[
"bzip2-1.0.6"
] | null | null | null |
cw01/zad11_12.py
|
BartoszHolubowicz/projekt-psi
|
e1d753e543ed2676a21ba1d99191e36dbe484ae5
|
[
"bzip2-1.0.6"
] | null | null | null |
cw01/zad11_12.py
|
BartoszHolubowicz/projekt-psi
|
e1d753e543ed2676a21ba1d99191e36dbe484ae5
|
[
"bzip2-1.0.6"
] | null | null | null |
# 11.
print(list(range(1, 10)))
# 12.
print(list(range(100, 20, -5)))
| 11.833333
| 31
| 0.577465
| 13
| 71
| 3.153846
| 0.769231
| 0.439024
| 0.682927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213115
| 0.140845
| 71
| 5
| 32
| 14.2
| 0.459016
| 0.098592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
162d388e7eb0b780d2a8e59c687022c4f9eef662
| 3,632
|
py
|
Python
|
pyaz/policy/state/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/policy/state/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/policy/state/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Manage policy compliance states.
'''
from ... pyaz_utils import _call_az
def list(all=None, apply=None, expand=None, filter=None, from_=None, management_group=None, namespace=None, order_by=None, parent=None, policy_assignment=None, policy_definition=None, policy_set_definition=None, resource=None, resource_group=None, resource_type=None, select=None, to=None, top=None):
'''
List policy compliance states.
Optional Parameters:
- all -- Within the specified time interval, get all policy states instead of the latest only.
- apply -- Apply expression for aggregations using OData notation.
- expand -- Expand expression using OData notation.
- filter -- Filter expression using OData notation.
- from_ -- ISO 8601 formatted timestamp specifying the start time of the interval to query.
- management_group -- Name of management group.
- namespace -- Provider namespace (Ex: Microsoft.Provider).
- order_by -- Ordering expression using OData notation.
- parent -- The parent path (Ex: resourceTypeA/nameA/resourceTypeB/nameB).
- policy_assignment -- Name of policy assignment.
- policy_definition -- Name of policy definition.
- policy_set_definition -- Name of policy set definition.
- resource -- Resource ID or resource name. If a name is given, please provide the resource group and other relevant resource id arguments.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- resource_type -- Resource type (Ex: resourceTypeC).
- select -- Select expression using OData notation.
- to -- ISO 8601 formatted timestamp specifying the end time of the interval to query.
- top -- Maximum number of records to return.
'''
return _call_az("az policy state list", locals())
def summarize(filter=None, from_=None, management_group=None, namespace=None, parent=None, policy_assignment=None, policy_definition=None, policy_set_definition=None, resource=None, resource_group=None, resource_type=None, to=None, top=None):
'''
Summarize policy compliance states.
Optional Parameters:
- filter -- Filter expression using OData notation.
- from_ -- ISO 8601 formatted timestamp specifying the start time of the interval to query.
- management_group -- Name of management group.
- namespace -- Provider namespace (Ex: Microsoft.Provider).
- parent -- The parent path (Ex: resourceTypeA/nameA/resourceTypeB/nameB).
- policy_assignment -- Name of policy assignment.
- policy_definition -- Name of policy definition.
- policy_set_definition -- Name of policy set definition.
- resource -- Resource ID or resource name. If a name is given, please provide the resource group and other relevant resource id arguments.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- resource_type -- Resource type (Ex: resourceTypeC).
- to -- ISO 8601 formatted timestamp specifying the end time of the interval to query.
- top -- Maximum number of records to return.
'''
return _call_az("az policy state summarize", locals())
def trigger_scan(no_wait=None, resource_group=None):
'''
Trigger a policy compliance evaluation for a scope.
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az policy state trigger-scan", locals())
| 55.876923
| 300
| 0.732379
| 475
| 3,632
| 5.498947
| 0.218947
| 0.054747
| 0.043645
| 0.053599
| 0.783308
| 0.741194
| 0.731623
| 0.731623
| 0.731623
| 0.69487
| 0
| 0.005382
| 0.181443
| 3,632
| 64
| 301
| 56.75
| 0.873192
| 0.717236
| 0
| 0
| 0
| 0
| 0.088058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
16473c72befcd57e3a03e200d7ffbdf952a3f147
| 69,867
|
py
|
Python
|
tests/test_cli.py
|
Echofi-co/ecs-deploy
|
fdedebe81f5563e96524860b3515d70de92b77be
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cli.py
|
Echofi-co/ecs-deploy
|
fdedebe81f5563e96524860b3515d70de92b77be
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cli.py
|
Echofi-co/ecs-deploy
|
fdedebe81f5563e96524860b3515d70de92b77be
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
import pytest
from click.testing import CliRunner
from mock.mock import patch
from ecs_deploy import cli
from ecs_deploy.cli import get_client, record_deployment
from ecs_deploy.ecs import EcsClient
from ecs_deploy.newrelic import Deployment, NewRelicDeploymentException
from tests.test_ecs import EcsTestClient, CLUSTER_NAME, SERVICE_NAME, \
TASK_DEFINITION_ARN_1, TASK_DEFINITION_ARN_2, TASK_DEFINITION_FAMILY_1, \
TASK_DEFINITION_REVISION_2, TASK_DEFINITION_REVISION_1, \
TASK_DEFINITION_REVISION_3
@pytest.fixture
def runner():
return CliRunner()
@patch.object(EcsClient, '__init__')
def test_get_client(ecs_client):
ecs_client.return_value = None
client = get_client('access_key_id', 'secret_access_key', 'region', 'profile')
ecs_client.assert_called_once_with('access_key_id', 'secret_access_key', 'region', 'profile')
assert isinstance(client, EcsClient)
def test_ecs(runner):
result = runner.invoke(cli.ecs)
assert result.exit_code == 0
assert not result.exception
assert 'Usage: ecs [OPTIONS] COMMAND [ARGS]' in result.output
assert ' deploy ' in result.output
assert ' scale ' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME))
assert result.exit_code == 1
assert result.output == u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n'
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_invalid_cluster(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, ('unknown-cluster', SERVICE_NAME))
assert result.exit_code == 1
assert result.output == u'An error occurred (ClusterNotFoundException) when calling the DescribeServices ' \
u'operation: Cluster not found.\n\n'
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_invalid_service(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, 'unknown-service'))
assert result.exit_code == 1
assert result.output == u'An error occurred when calling the DescribeServices operation: Service not found.\n\n'
@patch('ecs_deploy.cli.get_client')
def test_deploy(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Updating task definition" not in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_rollback(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', wait=2)
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--timeout=1', '--rollback'))
assert result.exit_code == 1
assert result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Deployment failed" in result.output
assert u"Rolling back to task definition: test-task:1" in result.output
assert u'Successfully changed task definition to: test-task:1' in result.output
assert u"Rollback successful" in result.output
assert u'Deployment failed, but service has been rolled back to ' \
u'previous task definition: test-task:1' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_deregister(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--no-deregister'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' not in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Updating task definition" not in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_role_arn(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-r', 'arn:new:role'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Updating task definition" in result.output
assert u'Changed role_arn to: "arn:new:role" (was: "arn:test:role:1")' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_execution_role_arn(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-x', 'arn:new:role'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Updating task definition" in result.output
assert u'Changed execution_role_arn to: "arn:new:role" (was: "arn:test:role:1")' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_new_tag(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-t', 'latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "webserver" to: "webserver:latest" (was: "webserver:123")' in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_image(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-i', 'application', 'application:latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_two_new_images(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-i', 'application', 'application:latest',
'-i', 'webserver', 'webserver:latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "webserver" to: "webserver:latest" (was: "webserver:123")' in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_command(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-c', 'application', 'foobar'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed command of container "application" to: "foobar" (was: "run")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@pytest.mark.parametrize(
'cmd_input, cmd_expected',
(
(
u'curl -f http://localhost/alive/',
u'curl -f http://localhost/alive/',
),
(
u'CMD-SHELL curl -f http://localhost/alive/ || 1',
u'CMD-SHELL curl -f http://localhost/alive/ || 1',
)
)
)
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_health_check(get_client, cmd_input, cmd_expected, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-h', 'application', cmd_input, 30, 5, 3, 0))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
expected_health_check = {
u'command': cmd_expected,
u'interval': 30,
u'timeout': 5,
u'retries': 3,
u'startPeriod': 0,
}
assert 'Changed healthCheck of container "application" to: ' in result.output
assert "'command': " in result.output
assert cmd_expected in result.output
assert "'interval': 30" in result.output
assert "'timeout': 5" in result.output
assert "'retries': 3" in result.output
assert "'startPeriod': 0" in result.output
assert '(was: "None")' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_environment_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME,
'-e', 'application', 'foo', 'bar',
'-e', 'webserver', 'foo', 'baz'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "foo" of container "application" to: "bar"' in result.output
assert u'Changed environment "foo" of container "webserver" to: "baz"' in result.output
assert u'Changed environment "lorem" of container "webserver" to: "ipsum"' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_change_environment_variable_empty_string(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'application', 'foo', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "foo" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_new_empty_environment_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'application', 'new', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "new" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_empty_environment_variable_again(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'webserver', 'empty', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed environment' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_previously_empty_environment_variable_with_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'webserver', 'empty', 'not-empty'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "empty" of container "webserver" to: "not-empty"' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_s3_env_file_with_previous_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--s3-env-file', 'webserver', 'arn:aws:s3:::centerfun/.env', '--s3-env-file', 'webserver', 'arn:aws:s3:::stormzone/.env'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environmentFiles of container "webserver" to: "{\'arn:aws:s3:::stormzone/.env\', \'arn:aws:s3:::coolBuckets/dev/.env\', \'arn:aws:s3:::myS3bucket/myApp/.env\', \'arn:aws:s3:::centerfun/.env\'}" (was: "{\'arn:aws:s3:::coolBuckets/dev/.env\', \'arn:aws:s3:::myS3bucket/myApp/.env\'}")'
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_runtime_platform_with_previous_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--runtime-platform', 'ARM64', 'WINDOWS'))
expected_runtime_platform = {
u'cpuArchitecture': u'ARM64',
u'operatingSystemFamily': u'WINDOWS'
}
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert str(expected_runtime_platform) in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_previously_empty_runtime_platform_with_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_2, '--runtime-platform', 'ARM64', 'WINDOWS'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:2" in result.output
assert u"Updating task definition" in result.output
expected_runtime_platform = {
u'cpuArchitecture': u'ARM64',
u'operatingSystemFamily': u'WINDOWS'
}
assert str(expected_runtime_platform) in result.output
assert u"Creating new task definition revision" in result.output
assert u"Successfully created revision: 2" in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_exclusive_environment(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'webserver', 'new-env', 'new-value', '--exclusive-env'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "new-env" of container "webserver" to: "new-value"' in result.output
assert u'Removed environment "foo" of container "webserver"' in result.output
assert u'Removed environment "lorem" of container "webserver"' in result.output
assert u'Removed secret' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_docker_laberl(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME,
'-d', 'application', 'foo', 'bar',
'-d', 'webserver', 'foo', 'baz'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "foo" of container "application" to: "bar"' in result.output
assert u'Changed dockerLabel "foo" of container "webserver" to: "baz"' in result.output
assert u'Changed dockerLabel "lorem" of container "webserver" to: "ipsum"' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_change_docker_label_empty_string(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-d', 'application', 'foo', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "foo" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_new_empty_docker_label(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-d', 'application', 'new', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "new" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_empty_docker_label_again(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-d', 'webserver', 'empty', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed dockerLabel' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_previously_empty_docker_label_with_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-d', 'webserver', 'empty', 'not-empty'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "empty" of container "webserver" to: "not-empty"' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_exclusive_docker_label(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-d', 'webserver', 'new-label', 'new-value', '--exclusive-docker-labels'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "new-label" of container "webserver" to: "new-value"' in result.output
assert u'Removed dockerLabel "foo" of container "webserver"' in result.output
assert u'Removed dockerLabel "lorem" of container "webserver"' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_exclusive_secret(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-s', 'webserver', 'new-secret', 'new-place', '--exclusive-secrets'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed secret "new-secret" of container "webserver" to: "new-place"' in result.output
assert u'Removed secret "baz" of container "webserver"' in result.output
assert u'Removed secret "dolor" of container "webserver"' in result.output
assert u'Removed environment' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_one_new_secret_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME,
'-s', 'application', 'baz', 'qux',
'-s', 'webserver', 'baz', 'quux'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed secret "baz" of container "application" to: "qux"' in result.output
assert u'Changed secret "baz" of container "webserver" to: "quux"' in result.output
assert u'Changed secret "dolor" of container "webserver" to: "sit"' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_changing_environment_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-e', 'webserver', 'foo', 'bar'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed environment' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_changing_docker_labels(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-d', 'webserver', 'foo', 'bar'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed dockerLabel' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_changing_secrets_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-s', 'webserver', 'baz', 'qux'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed secrets' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_diff(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '-t', 'latest', '-e', 'webserver', 'foo', 'barz', '--no-diff'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed environment' not in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_errors(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', deployment_errors=True)
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME))
assert result.exit_code == 1
assert u"Deployment failed" in result.output
assert u"ERROR: Service was unable to Lorem Ipsum" in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_client_errors(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', client_errors=True)
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME))
assert result.exit_code == 1
assert u"Something went wrong" in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_ignore_warnings(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', deployment_errors=True)
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--ignore-warnings'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u"WARNING: Service was unable to Lorem Ipsum" in result.output
assert u"Continuing." in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.newrelic.Deployment.deploy')
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_newrelic_tag(get_client, newrelic, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME,
'-t', 'my-tag',
'--newrelic-apikey', 'test',
'--newrelic-appid', 'test',
'--comment', 'Lorem Ipsum'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Recording deployment in New Relic" in result.output
newrelic.assert_called_once_with(
'my-tag',
'',
'Lorem Ipsum'
)
@patch('ecs_deploy.newrelic.Deployment.deploy')
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_newrelic_revision(get_client, newrelic, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME,
'-i', 'application', 'application:latest',
'--newrelic-apikey', 'test',
'--newrelic-appid', 'test',
'--newrelic-revision', '1.0.0',
'--comment', 'Lorem Ipsum'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Recording deployment in New Relic" in result.output
newrelic.assert_called_once_with(
'1.0.0',
'',
'Lorem Ipsum'
)
@patch('ecs_deploy.newrelic.Deployment.deploy')
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_newrelic_tag_revision(get_client, newrelic, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME,
'-t', 'my-tag',
'--newrelic-apikey', 'test',
'--newrelic-appid', 'test',
'--newrelic-revision', '1.0.0',
'--comment', 'Lorem Ipsum'))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Successfully deregistered revision: 1' in result.output
assert u'Successfully changed task definition to: test-task:2' in result.output
assert u'Deployment successful' in result.output
assert u"Recording deployment in New Relic" in result.output
newrelic.assert_called_once_with(
'1.0.0',
'',
'Lorem Ipsum'
)
@patch('ecs_deploy.newrelic.Deployment.deploy')
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_newrelic_errors(get_client, deploy, runner):
e = NewRelicDeploymentException('Recording deployment failed')
deploy.side_effect = e
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME,
'-t', 'test',
'--newrelic-apikey', 'test',
'--newrelic-appid', 'test'))
assert result.exit_code == 1
assert u"Recording deployment failed" in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_task_definition_arn(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--task', TASK_DEFINITION_ARN_2))
assert result.exit_code == 0
assert not result.exception
assert u"Deploying based on task definition: test-task:2" in result.output
assert u'Successfully deregistered revision: 2' in result.output
assert u'Deployment successful' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_timeout(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', wait=2)
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--timeout', '1'))
assert result.exit_code == 1
assert u"Deployment failed due to timeout. Please see: " \
u"https://github.com/fabfuel/ecs-deploy#timeout" in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_with_wait_within_timeout(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', wait=2)
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--timeout', '10'))
assert result.exit_code == 0
assert u'Deploying new task definition' in result.output
assert u'...' in result.output
@patch('ecs_deploy.cli.get_client')
def test_deploy_without_timeout(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', wait=2)
start_time = datetime.now()
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--timeout', '-1'))
end_time = datetime.now()
assert result.exit_code == 0
# assert task is not waiting for deployment
assert u'Deploying new task definition\n' in result.output
assert u'...' not in result.output
assert (end_time - start_time).total_seconds() < 1
@patch('ecs_deploy.cli.get_client')
def test_deploy_unknown_task_definition_arn(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.deploy, (CLUSTER_NAME, SERVICE_NAME, '--task', u'arn:aws:ecs:eu-central-1:123456789012:task-definition/foobar:55'))
assert result.exit_code == 1
assert u"Unknown task definition arn: arn:aws:ecs:eu-central-1:123456789012:task-definition/foobar:55" in result.output
@patch('ecs_deploy.cli.get_client')
def test_scale_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.scale, (CLUSTER_NAME, SERVICE_NAME, '2'))
assert result.exit_code == 1
assert result.output == u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n'
@patch('ecs_deploy.cli.get_client')
def test_scale_with_invalid_cluster(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.scale, ('unknown-cluster', SERVICE_NAME, '2'))
assert result.exit_code == 1
assert result.output == u'An error occurred (ClusterNotFoundException) when calling the DescribeServices ' \
u'operation: Cluster not found.\n\n'
@patch('ecs_deploy.cli.get_client')
def test_scale_with_invalid_service(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.scale, (CLUSTER_NAME, 'unknown-service', '2'))
assert result.exit_code == 1
assert result.output == u'An error occurred when calling the DescribeServices operation: Service not found.\n\n'
@patch('ecs_deploy.cli.get_client')
def test_scale(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.scale, (CLUSTER_NAME, SERVICE_NAME, '2'))
assert not result.exception
assert result.exit_code == 0
assert u"Successfully changed desired count to: 2" in result.output
assert u"Scaling successful" in result.output
@patch('ecs_deploy.cli.get_client')
def test_scale_with_errors(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', deployment_errors=True)
result = runner.invoke(cli.scale, (CLUSTER_NAME, SERVICE_NAME, '2'))
assert result.exit_code == 1
assert u"Scaling failed" in result.output
assert u"ERROR: Service was unable to Lorem Ipsum" in result.output
@patch('ecs_deploy.cli.get_client')
def test_scale_with_client_errors(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', client_errors=True)
result = runner.invoke(cli.scale, (CLUSTER_NAME, SERVICE_NAME, '2'))
assert result.exit_code == 1
assert u"Something went wrong" in result.output
@patch('ecs_deploy.cli.get_client')
def test_scale_ignore_warnings(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', deployment_errors=True)
result = runner.invoke(cli.scale, (CLUSTER_NAME, SERVICE_NAME, '2', '--ignore-warnings'))
assert not result.exception
assert result.exit_code == 0
assert u"Successfully changed desired count to: 2" in result.output
assert u"WARNING: Service was unable to Lorem Ipsum" in result.output
assert u"Continuing." in result.output
assert u"Scaling successful" in result.output
@patch('ecs_deploy.cli.get_client')
def test_scale_with_timeout(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', wait=2)
result = runner.invoke(cli.scale, (CLUSTER_NAME, SERVICE_NAME, '2', '--timeout', '1'))
assert result.exit_code == 1
assert u"Scaling failed due to timeout. Please see: " \
u"https://github.com/fabfuel/ecs-deploy#timeout" in result.output
@patch('ecs_deploy.cli.get_client')
def test_scale_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.scale, (CLUSTER_NAME, SERVICE_NAME, '2'))
assert result.exit_code == 1
assert result.output == u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n'
@patch('ecs_deploy.cli.get_client')
def test_run_task(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.run, (CLUSTER_NAME, 'test-task'))
assert not result.exception
assert result.exit_code == 0
assert u"Successfully started 2 instances of task: test-task:2" in result.output
assert u"- arn:foo:bar" in result.output
assert u"- arn:lorem:ipsum" in result.output
@patch('ecs_deploy.cli.get_client')
def test_run_task_with_command(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.run, (CLUSTER_NAME, 'test-task', '2', '-c', 'webserver', 'date'))
assert not result.exception
assert result.exit_code == 0
assert u"Using task definition: test-task" in result.output
assert u'Changed command of container "webserver" to: "date" (was: "run")' in result.output
assert u"Successfully started 2 instances of task: test-task:2" in result.output
assert u"- arn:foo:bar" in result.output
assert u"- arn:lorem:ipsum" in result.output
@patch('ecs_deploy.cli.get_client')
def test_run_task_with_environment_var(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.run, (CLUSTER_NAME, 'test-task', '2', '-e', 'application', 'foo', 'bar'))
assert not result.exception
assert result.exit_code == 0
assert u"Using task definition: test-task" in result.output
assert u'Changed environment "foo" of container "application" to: "bar"' in result.output
assert u"Successfully started 2 instances of task: test-task:2" in result.output
assert u"- arn:foo:bar" in result.output
assert u"- arn:lorem:ipsum" in result.output
@patch('ecs_deploy.cli.get_client')
def test_run_task_with_docker_label(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.run, (CLUSTER_NAME, 'test-task', '2', '-d', 'application', 'foo', 'bar'))
assert not result.exception
assert result.exit_code == 0
assert u"Using task definition: test-task" in result.output
assert u'Changed dockerLabel "foo" of container "application" to: "bar"' in result.output
assert u"Successfully started 2 instances of task: test-task:2" in result.output
assert u"- arn:foo:bar" in result.output
assert u"- arn:lorem:ipsum" in result.output
@patch('ecs_deploy.cli.get_client')
def test_run_task_without_diff(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.run, (CLUSTER_NAME, 'test-task', '2', '-e', 'application', 'foo', 'bar', '--no-diff'))
assert not result.exception
assert result.exit_code == 0
assert u"Using task definition: test-task" not in result.output
assert u'Changed environment' not in result.output
assert u"Successfully started 2 instances of task: test-task:2" in result.output
assert u"- arn:foo:bar" in result.output
assert u"- arn:lorem:ipsum" in result.output
@patch('ecs_deploy.cli.get_client')
def test_run_task_with_errors(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key', deployment_errors=True)
result = runner.invoke(cli.run, (CLUSTER_NAME, 'test-task'))
assert result.exception
assert result.exit_code == 1
assert u"An error occurred (123) when calling the fake_error operation: Something went wrong" in result.output
@patch('ecs_deploy.cli.get_client')
def test_run_task_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.run, (CLUSTER_NAME, 'test-task'))
assert result.exit_code == 1
assert result.output == u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n'
@patch('ecs_deploy.cli.get_client')
def test_run_task_with_invalid_cluster(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.run, ('unknown-cluster', 'test-task'))
assert result.exit_code == 1
assert result.output == u'An error occurred (ClusterNotFoundException) when calling the RunTask operation: Cluster not found.\n\n'
@patch('ecs_deploy.newrelic.Deployment')
def test_record_deployment_without_revision(Deployment):
result = record_deployment(None, None, None, None, None, None, None)
assert result is False
@patch('ecs_deploy.newrelic.Deployment')
def test_record_deployment_without_apikey(Deployment):
result = record_deployment('1.2.3', None, None, None, None, None, None)
assert result is False
@patch('click.secho')
@patch('ecs_deploy.newrelic.Deployment')
def test_record_deployment_without_appid(Deployment, secho):
result = record_deployment('1.2.3', 'APIKEY',None, None, None, None, None)
secho.assert_any_call('Missing required parameters for recording New Relic deployment.Please see https://github.com/fabfuel/ecs-deploy#new-relic')
assert result is False
@patch('click.secho')
@patch.object(Deployment, 'deploy')
@patch.object(Deployment, '__init__')
def test_record_deployment_tag(deployment_init, deployment_deploy, secho):
deployment_init.return_value = None
result = record_deployment('1.2.3', 'APIKEY', 'APPID', 'EU', None, 'Comment', 'user')
deployment_init.assert_called_once_with('APIKEY', 'APPID', 'user', 'EU')
deployment_deploy.assert_called_once_with('1.2.3', '', 'Comment')
secho.assert_any_call('Recording deployment in New Relic', nl=False)
secho.assert_any_call('\nDone\n', fg='green')
assert result is True
@patch('ecs_deploy.cli.get_client')
def test_update_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1,))
assert result.exit_code == 1
assert u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_creates_new_revision(get_client, runner):
get_client.return_value = EcsTestClient('access_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1,))
assert result.exit_code == 0
assert u"Creating new task definition revision" in result.output
assert u"Successfully created revision: 2" in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1,))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_with_role_arn(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-r', 'arn:new:role'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed role_arn to: "arn:new:role" (was: "arn:test:role:1")' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_new_tag(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-t', 'latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "webserver" to: "webserver:latest" (was: "webserver:123")' in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_one_new_image(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-i', 'application', 'application:latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_two_new_images(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-i', 'application', 'application:latest',
'-i', 'webserver', 'webserver:latest'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed image of container "webserver" to: "webserver:latest" (was: "webserver:123")' in result.output
assert u'Changed image of container "application" to: "application:latest" (was: "application:123")' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_one_new_command(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-c', 'application', 'foobar'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed command of container "application" to: "foobar" (was: "run")' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_one_new_environment_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1,
'-e', 'application', 'foo', 'bar',
'-e', 'webserver', 'foo', 'baz'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "foo" of container "application" to: "bar"' in result.output
assert u'Changed environment "foo" of container "webserver" to: "baz"' in result.output
assert u'Changed environment "lorem" of container "webserver" to: "ipsum"' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_change_environment_variable_empty_string(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-e', 'application', 'foo', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "foo" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_new_empty_environment_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-e', 'application', 'new', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "new" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_empty_environment_variable_again(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-e', 'webserver', 'empty', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed environment' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_previously_empty_environment_variable_with_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-e', 'webserver', 'empty', 'not-empty'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "empty" of container "webserver" to: "not-empty"' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_exclusive_environment(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-e', 'webserver', 'new-env', 'new-value', '--exclusive-env'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed environment "new-env" of container "webserver" to: "new-value"' in result.output
assert u'Removed environment "foo" of container "webserver"' in result.output
assert u'Removed environment "lorem" of container "webserver"' in result.output
assert u'Removed secret' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_one_new_docker_label(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1,
'-d', 'application', 'foo', 'bar',
'-d', 'webserver', 'foo', 'baz'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "foo" of container "application" to: "bar"' in result.output
assert u'Changed dockerLabel "foo" of container "webserver" to: "baz"' in result.output
assert u'Changed dockerLabel "lorem" of container "webserver" to: "ipsum"' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_change_docker_label_empty_string(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-d', 'application', 'foo', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "foo" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_new_empty_docker_label(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-d', 'application', 'new', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "new" of container "application" to: ""' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_empty_docker_label_again(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-d', 'webserver', 'empty', ''))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed dockerLabel' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_previously_empty_docker_label_with_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-d', 'webserver', 'empty', 'not-empty'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "empty" of container "webserver" to: "not-empty"' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_exclusive_docker_labels(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-d', 'webserver', 'new-label', 'new-value', '--exclusive-docker-labels'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed dockerLabel "new-label" of container "webserver" to: "new-value"' in result.output
assert u'Removed dockerLabel "foo" of container "webserver"' in result.output
assert u'Removed dockerLabel "lorem" of container "webserver"' in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_exclusive_secret(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-s', 'webserver', 'new-secret', 'new-place', '--exclusive-secrets'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed secret "new-secret" of container "webserver" to: "new-place"' in result.output
assert u'Removed secret "baz" of container "webserver"' in result.output
assert u'Removed secret "dolor" of container "webserver"' in result.output
assert u'Removed environment' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_one_new_secret_variable(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1,
'-s', 'application', 'baz', 'qux',
'-s', 'webserver', 'baz', 'quux'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" in result.output
assert u'Changed secret "baz" of container "application" to: "qux"' in result.output
assert u'Changed secret "baz" of container "webserver" to: "quux"' in result.output
assert u'Changed secret "dolor" of container "webserver" to: "sit"' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_without_changing_environment_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-e', 'webserver', 'foo', 'bar'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed environment' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_without_changing_docker_labels(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-d', 'webserver', 'foo', 'bar'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed dockerLabel' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_without_changing_secrets_value(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-s', 'webserver', 'baz', 'qux'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed secrets' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_update_task_without_diff(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.update, (TASK_DEFINITION_ARN_1, '-t', 'latest', '-e', 'webserver', 'foo', 'barz', '--no-diff'))
assert result.exit_code == 0
assert not result.exception
assert u"Update task definition based on: test-task:1" in result.output
assert u"Updating task definition" not in result.output
assert u'Changed environment' not in result.output
assert u'Successfully created revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_cron_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.cron, (CLUSTER_NAME, TASK_DEFINITION_FAMILY_1, 'rule'))
assert result.exit_code == 1
assert u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n' in result.output
@patch('ecs_deploy.cli.get_client')
def test_cron(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.cron, (CLUSTER_NAME, TASK_DEFINITION_FAMILY_1, 'rule'))
assert not result.exception
assert result.exit_code == 0
assert u'Update task definition based on: test-task:2' in result.output
assert u'Creating new task definition revision' in result.output
assert u'Successfully created revision: 2' in result.output
assert u'Updating scheduled task' in result.output
assert u'Deregister task definition revision' in result.output
assert u'Successfully deregistered revision: 2' in result.output
@patch('ecs_deploy.cli.get_client')
def test_diff(get_client, runner):
get_client.return_value = EcsTestClient('acces_key', 'secret_key')
result = runner.invoke(cli.diff, (TASK_DEFINITION_FAMILY_1, str(TASK_DEFINITION_REVISION_1), str(TASK_DEFINITION_REVISION_3)))
assert not result.exception
assert result.exit_code == 0
assert 'change: containers.webserver.image' in result.output
assert '- "webserver:123"' in result.output
assert '+ "webserver:456"' in result.output
assert 'change: containers.webserver.command' in result.output
assert '- "run"' in result.output
assert '+ "execute"' in result.output
assert 'change: containers.webserver.environment.foo' in result.output
assert '- "bar"' in result.output
assert '+ "foobar"' in result.output
assert 'remove: containers.webserver.environment' in result.output
assert '- empty: ' in result.output
assert 'change: containers.webserver.dockerLabels.foo' in result.output
assert '- "bar"' in result.output
assert '+ "foobar"' in result.output
assert 'remove: containers.webserver.dockerLabels' in result.output
assert '- empty: ' in result.output
assert 'change: containers.webserver.secrets.baz' in result.output
assert '- "qux"' in result.output
assert '+ "foobaz"' in result.output
assert 'change: containers.webserver.secrets.dolor' in result.output
assert '- "sit"' in result.output
assert '+ "loremdolor"' in result.output
assert 'change: role_arn' in result.output
assert '- "arn:test:role:1"' in result.output
assert '+ "arn:test:another-role:1"' in result.output
assert 'change: execution_role_arn' in result.output
assert '- "arn:test:role:1"' in result.output
assert '+ "arn:test:another-role:1"' in result.output
assert 'add: containers.webserver.environment' in result.output
assert '+ newvar: "new value"' in result.output
assert 'add: containers.webserver.dockerLabel' in result.output
assert '+ newlabel: "new value"' in result.output
@patch('ecs_deploy.cli.get_client')
def test_diff_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.diff, (TASK_DEFINITION_FAMILY_1, str(TASK_DEFINITION_REVISION_1), str(TASK_DEFINITION_REVISION_3)))
assert result.exit_code == 1
assert u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n' in result.output
| 47.560926
| 304
| 0.730316
| 9,689
| 69,867
| 5.110435
| 0.028899
| 0.114147
| 0.130627
| 0.151873
| 0.948904
| 0.940624
| 0.935575
| 0.923861
| 0.917237
| 0.911057
| 0
| 0.008602
| 0.163053
| 69,867
| 1,468
| 305
| 47.593324
| 0.83817
| 0.000587
| 0
| 0.750218
| 0
| 0.013974
| 0.362254
| 0.047047
| 0
| 0
| 0
| 0
| 0.569432
| 1
| 0.087336
| false
| 0
| 0.00786
| 0.000873
| 0.09607
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1670045b61ce2ddad9292b2508c14d8ec7c3de5d
| 19,987
|
py
|
Python
|
robin_stocks/options.py
|
nkaliape/robin_stocks
|
36a100610b55ab9fad1a57f92a57bad6d9c1b835
|
[
"MIT"
] | null | null | null |
robin_stocks/options.py
|
nkaliape/robin_stocks
|
36a100610b55ab9fad1a57f92a57bad6d9c1b835
|
[
"MIT"
] | null | null | null |
robin_stocks/options.py
|
nkaliape/robin_stocks
|
36a100610b55ab9fad1a57f92a57bad6d9c1b835
|
[
"MIT"
] | null | null | null |
"""Contains functions for getting information about options."""
import robin_stocks.helper as helper
import robin_stocks.urls as urls
@helper.login_required
def get_aggregate_positions(info=None):
"""Collapses all option orders for a stock into a single dictionary.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.aggregate()
data = helper.request_get(url,'pagination')
return(helper.filter(data,info))
@helper.login_required
def get_market_options(info=None):
"""Returns a list of all options.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_orders()
data = helper.request_get(url,'pagination')
return(helper.filter(data,info))
@helper.login_required
def get_all_option_positions(info=None):
"""Returns all option positions ever held for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_positions()
data = helper.request_get(url,'pagination')
return(helper.filter(data,info))
@helper.login_required
def get_open_option_positions(info=None):
"""Returns all open option positions for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_positions()
payload = { 'nonzero' : 'True' }
data = helper.request_get(url,'pagination',payload)
return(helper.filter(data,info))
def get_chains(symbol,info=None):
"""Returns the chain information of an option.
:param symbol: The ticker of the stock.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message)
return None
url = urls.chains(symbol)
data = helper.request_get(url)
return(helper.filter(data,info))
def find_tradable_options_for_stock(symbol,optionType='both',info=None):
"""Returns a list of all available options for a stock.
:param symbol: The ticker of the stock.
:type symbol: str
:param optionType: Can be either 'call' or 'put' or left blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all calls of the stock. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message)
return [None]
url = urls.option_instruments()
if (optionType == 'call' or optionType == 'put'):
payload = { 'chain_id' : helper.id_for_chain(symbol),
'state' : 'active',
'tradability' : 'tradable',
'type' : optionType}
else:
payload = { 'chain_id' : helper.id_for_chain(symbol),
'state' : 'active',
'tradability' : 'tradable'}
data = helper.request_get(url,'pagination',payload)
return(helper.filter(data,info))
def find_options_for_stock_by_expiration(symbol,expirationDate,optionType='both',info=None):
"""Returns a list of all the option orders that match the seach parameters
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message)
return [None]
allOptions = find_tradable_options_for_stock(symbol,optionType)
filteredOptions = [item for item in allOptions if item["expiration_date"] == expirationDate
and item['rhs_tradability'] == 'tradable']
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
item.update(marketData)
return(helper.filter(filteredOptions,info))
def find_options_for_stock_by_strike(symbol,strike,optionType='both',info=None):
"""Returns a list of all the option orders that match the seach parameters
:param symbol: The ticker of the stock.
:type symbol: str
:param strike: Represents the price of the option.
:type strike: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message)
return [None]
allOptions = find_tradable_options_for_stock(symbol,optionType)
filteredOptions = [item for item in allOptions if float(item["strike_price"]) == float(strike)
and item['rhs_tradability'] == 'tradable']
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
item.update(marketData)
return(helper.filter(filteredOptions,info))
def find_options_for_stock_by_expiration_and_strike(symbol,expirationDate,strike,optionType='both',info=None):
"""Returns a list of all the option orders that match the seach parameters
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strike: Represents the price of the option.
:type strike: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message)
return [None]
allOptions = find_tradable_options_for_stock(symbol,optionType)
filteredOptions = [item for item in allOptions if item["expiration_date"] == expirationDate and float(item["strike_price"]) == float(strike)
and item['rhs_tradability'] == 'tradable']
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
item.update(marketData)
return(helper.filter(filteredOptions,info))
def find_options_for_list_of_stocks_by_expiration_date(inputSymbols,expirationDate,optionType='both',info=None):
"""Returns a list of all the option orders that match the seach parameters
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
symbols = helper.inputs_to_set(inputSymbols)
try:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message)
return [None]
data = []
url = urls.option_instruments()
for symbol in symbols:
if (optionType == 'put' or optionType == 'call' ):
payload = { 'chain_id' : helper.id_for_chain(symbol),
'expiration_date' : expirationDate,
'state' : 'active',
'tradability' : 'tradable',
'rhs_tradability' : 'tradable',
'type' : optionType}
else:
payload = { 'chain_id' : helper.id_for_chain(symbol),
'expiration_date' : expirationDate,
'state' : 'active',
'tradability' : 'tradable',
'rhs_tradability' : 'tradable'}
otherData = helper.request_get(url,'pagination',payload)
for item in otherData:
if (item['expiration_date'] == expirationDate and item['rhs_tradability'] == 'tradable'):
data.append(item)
for item in data:
marketData = get_option_market_data_by_id(item['id'])
item.update(marketData)
return(helper.filter(data,info))
def get_list_market_data(inputSymbols,expirationDate,info=None):
"""Returns a list of option market data for several stock tickers.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all stock option market data. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
symbols = helper.inputs_to_set(inputSymbols)
ids = []
data = []
url = urls.option_instruments()
for symbol in symbols:
payload = { 'chain_id' : helper.id_for_chain(symbol),
'expiration_date' : expirationDate,
'state' : 'active',
'tradability' : 'tradable',
'rhs_tradability' : 'tradable'}
otherData = helper.request_get(url,'pagination',payload)
for item in otherData:
if (item['expiration_date'] == expirationDate and item['rhs_tradability'] == 'tradable'):
ids.append(item['id'])
for id in ids:
url = urls.marketdata_options(id)
otherData = helper.request_get(url)
data.append(otherData)
return(helper.filter(data,info))
def get_list_options_of_specific_profitability(inputSymbols,expirationDate,typeProfit="chance_of_profit_short",profitFloor=0.0, profitCeiling=1.0,info=None):
"""Returns a list of option market data for several stock tickers that match a range of profitability.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param typeProfit: Will either be "chance_of_profit_short" or "chance_of_profit_long".
:type typeProfit: str
:param profitFloor: The lower percentage on scale 0 to 1.
:type profitFloor: int
:param profitCeiling: The higher percentage on scale 0 to 1.
:type profitCeiling: int
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all stock option market data. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
symbols = helper.inputs_to_set(inputSymbols)
ids = []
data = []
returnData = []
url = urls.option_instruments()
if (typeProfit != "chance_of_profit_short" and typeProfit != "chance_of_profit_long"):
print("Invalid string for 'typeProfit'. Defaulting to 'chance_of_profit_short'.")
typeProfit = "chance_of_profit_short"
for symbol in symbols:
payload = { 'chain_id' : helper.id_for_chain(symbol),
'expiration_date' : expirationDate,
'state' : 'active',
'tradability' : 'tradable',
'rhs_tradability' : 'tradable'}
otherData = helper.request_get(url,'pagination',payload)
for item in otherData:
if (item['rhs_tradability'] == 'tradable'):
ids.append(item['id'])
for id in ids:
url = urls.marketdata_options(id)
otherData = helper.request_get(url)
data.append(otherData)
for item in data:
try:
floatValue = float(item[typeProfit])
if (floatValue > profitFloor and floatValue < profitCeiling):
returnData.append(item)
except:
pass
return(helper.filter(returnData,info))
def get_option_market_data_by_id(id,info=None):
"""Returns the option market data for a stock, including the greeks,
open interest, change of profit, and adjusted mark price.
:param id: The id of the stock.
:type id: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
url = urls.marketdata_options(id)
data = helper.request_get(url)
return(helper.filter(data,info))
def get_option_market_data(symbol,expirationDate,strike,optionType,info=None):
"""Returns the option market data for the stock option, including the greeks,
open interest, change of profit, and adjusted mark price.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strike: Represents the price of the option.
:type strike: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message)
return [None]
optionID= helper.id_for_option(symbol,expirationDate,strike,optionType)
url = urls.marketdata_options(optionID)
data = helper.request_get(url)
return(helper.filter(data,info))
def get_option_instrument_data_by_id(id,info=None):
"""Returns the option instrument information.
:param id: The id of the stock.
:type id: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
url = urls.option_instruments(id)
data = helper.request_get(url)
return(helper.filter(data,info))
def get_option_instrument_data(symbol,expirationDate,strike,optionType,info=None):
"""Returns the option instrument data for the stock option.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strike: Represents the price of the option.
:type strike: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message)
return [None]
optionID= helper.id_for_option(symbol,expirationDate,strike,optionType)
url = urls.option_instruments(optionID)
data = helper.request_get(url)
return(helper.filter(data,info))
def get_option_historicals(symbol,expirationDate,strike,optionType,span='week'):
"""Returns the data that is used to make the graphs.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strike: Represents the price of the option.
:type strike: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param span: Sets the range of the data to be either 'day', 'week', 'year', or '5year'. Default is 'week'.
:type span: Optional[str]
:returns: Returns a list that contains a list for each symbol. \
Each list contains a dictionary where each dictionary is for a different time.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message)
return [None]
span_check = ['day','week','year','5year']
if span not in span_check:
print('ERROR: Span must be "day","week","year",or "5year"')
return([None])
if span == 'day':
interval = '5minute'
elif span == 'week':
interval = '10minute'
elif span == 'year':
interval = 'day'
else:
interval = 'week'
optionID = helper.id_for_option(symbol,expirationDate,strike,optionType)
url = urls.option_historicals(optionID)
payload = { 'span' : span,
'interval' : interval}
data = helper.request_get(url,'regular',payload)
return(data)
| 40.296371
| 157
| 0.67749
| 2,645
| 19,987
| 5.04121
| 0.072968
| 0.013874
| 0.017849
| 0.019949
| 0.873931
| 0.854282
| 0.843483
| 0.834558
| 0.825484
| 0.812509
| 0
| 0.000915
| 0.234653
| 19,987
| 495
| 158
| 40.377778
| 0.870759
| 0.465953
| 0
| 0.730435
| 0
| 0
| 0.103431
| 0.013482
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073913
| false
| 0.004348
| 0.008696
| 0
| 0.121739
| 0.047826
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16e24d97ddee0751e0b808b89080074c1b4baba7
| 37,023
|
py
|
Python
|
tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py
|
ryorda/tensorflow-viennacl
|
054b515feec0a3fca4cfb1f29adbf423c9027c3a
|
[
"Apache-2.0"
] | 522
|
2016-06-08T02:15:50.000Z
|
2022-03-02T05:30:36.000Z
|
tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py
|
ryorda/tensorflow-viennacl
|
054b515feec0a3fca4cfb1f29adbf423c9027c3a
|
[
"Apache-2.0"
] | 48
|
2016-07-26T00:11:55.000Z
|
2022-02-23T13:36:33.000Z
|
tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch_test.py
|
ryorda/tensorflow-viennacl
|
054b515feec0a3fca4cfb1f29adbf423c9027c3a
|
[
"Apache-2.0"
] | 108
|
2016-06-16T15:34:05.000Z
|
2022-03-12T13:23:11.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT train function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.contrib import layers
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _squared_loss(label, unused_weights, predictions):
"""Unweighted loss implementation."""
loss = math_ops.reduce_sum(
math_ops.square(predictions - label), 1, keep_dims=True)
return loss
class GbdtTest(test_util.TensorFlowTestCase):
def setUp(self):
super(GbdtTest, self).setUp()
def testExtractFeatures(self):
"""Tests feature extraction."""
with self.test_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_int"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.int64),
array_ops.zeros([2], dtypes.int64))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (gbdt_batch.extract_features(features, None))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_int"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_int"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(),
features["sparse_int"].values.eval())
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_int"].dense_shape.eval())
def testExtractFeaturesWithTransformation(self):
"""Tests feature extraction."""
with self.test_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_categorical"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros(
[2], dtypes.string), array_ops.zeros([2], dtypes.int64))
feature_columns = set()
feature_columns.add(layers.real_valued_column("dense_float"))
feature_columns.add(
layers.feature_column._real_valued_var_len_column(
"sparse_float", is_sparse=True))
feature_columns.add(
feature_column_lib.sparse_column_with_hash_bucket(
"sparse_categorical", hash_bucket_size=1000000))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (gbdt_batch.extract_features(
features, feature_columns))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_categorical"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_categorical"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_categorical"].dense_shape.eval())
def testTrainFnChiefNoBiasCentering(self):
"""Tests the train function running on chief without bias centering."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefScalingNumberOfExamples(self):
"""Tests the train function running on chief without bias centering."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
num_examples_fn = (
lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1)
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=num_examples_fn,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefWithBiasCentering(self):
"""Tests the train function running on chief with bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect bias to be centered.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
expected_tree = """
nodes {
leaf {
vector {
value: 0.25
}
}
}"""
self.assertEquals(len(output.trees), 1)
self.assertAllEqual(output.tree_weights, [1.0])
self.assertProtoEquals(expected_tree, output.trees[0])
self.assertEquals(stamp_token.eval(), 1)
def testTrainFnNonChiefNoBiasCentering(self):
"""Tests the train function running on worker without bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testTrainFnNonChiefWithCentering(self):
"""Tests the train function running on worker with bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testPredictFn(self):
"""Tests the predict function."""
with self.test_session() as sess:
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
leaf {
vector {
value: 0.25
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
# Create predict op.
mode = model_fn.ModeKeys.EVAL
predictions_dict = sess.run(gbdt_model.predict(mode))
self.assertEquals(predictions_dict["ensemble_stamp"], 3)
self.assertAllClose(predictions_dict["predictions"], [[0.25], [0.25],
[0.25], [0.25]])
self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0])
def testTrainFnMulticlassFullHessian(self):
"""Tests the GBDT train for multiclass full hessian."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
batch_size = 3
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-3.4480, -3.4429, 13.8490, -3.45, -3.4508]
expected_leaf_2 = [-1.2547, -1.3145, 1.52, 2.3875, -1.3264]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
def testTrainFnMulticlassDiagonalHessian(self):
"""Tests the GBDT train for multiclass diagonal hessian."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
batch_size = 3
features = {}
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]
expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
def testTrainFnMulticlassTreePerClass(self):
"""Tests the GBDT train for multiclass tree per class strategy."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {
"dense_float": array_ops.constant(
[[1.0], [1.5], [2.0]], dtypes.float32),
}
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
batch_size = 3
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 2.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
# This should result in a tree built for a class 2.
"num_trees": 13,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# One node for a split, two children nodes.
self.assertEqual(3, len(output.trees[0].nodes))
# Leafs will have a sparse vector for class 3.
self.assertEqual(1,
len(output.trees[0].nodes[1].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
self.assertAlmostEqual(
-1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0])
self.assertEqual(1,
len(output.trees[0].nodes[2].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
self.assertAlmostEqual(
0.893284678459, output.trees[0].nodes[2].leaf.sparse_vector.value[0])
if __name__ == "__main__":
googletest.main()
| 40.198697
| 88
| 0.650434
| 4,309
| 37,023
| 5.346716
| 0.084706
| 0.049091
| 0.004167
| 0.019098
| 0.874213
| 0.863319
| 0.849039
| 0.832805
| 0.821043
| 0.805243
| 0
| 0.030509
| 0.246603
| 37,023
| 920
| 89
| 40.242391
| 0.795468
| 0.09251
| 0
| 0.820717
| 0
| 0
| 0.080709
| 0.007411
| 0
| 0
| 0
| 0
| 0.119522
| 1
| 0.017264
| false
| 0
| 0.02656
| 0
| 0.046481
| 0.001328
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc48eefc4f53768b5f08abd5a68be6409cfff066
| 103,893
|
py
|
Python
|
telethon/tl/types/secret.py
|
yande-eghosa/Telegram-Click-Bot
|
d2905373b93475ea3b4562128f84a66aee0eb7a0
|
[
"MIT"
] | 1
|
2020-11-22T20:30:27.000Z
|
2020-11-22T20:30:27.000Z
|
telethon/tl/types/secret.py
|
yande-eghosa/Telegram-Click-Bot
|
d2905373b93475ea3b4562128f84a66aee0eb7a0
|
[
"MIT"
] | null | null | null |
telethon/tl/types/secret.py
|
yande-eghosa/Telegram-Click-Bot
|
d2905373b93475ea3b4562128f84a66aee0eb7a0
|
[
"MIT"
] | null | null | null |
"""File generated by TLObjects' generator. All changes will be ERASED"""
from ...tl.tlobject import TLObject
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
if TYPE_CHECKING:
from ...tl.types import TypeDecryptedMessage, TypeDecryptedMessageAction, TypeDecryptedMessageMedia, TypeDocumentAttribute, TypeFileLocation, TypeInputStickerSet, TypeMessageEntity, TypePhotoSize, TypeSendMessageAction
class DecryptedMessage(TLObject):
CONSTRUCTOR_ID = 0x91cc4674
SUBCLASS_OF_ID = 0x5182c3e8
def __init__(self, ttl: int, message: str, random_id: int=None, media: Optional['TypeDecryptedMessageMedia']=None, entities: Optional[List['TypeMessageEntity']]=None, via_bot_name: Optional[str]=None, reply_to_random_id: Optional[int]=None, grouped_id: Optional[int]=None):
"""
Constructor for secret.DecryptedMessage: Instance of either DecryptedMessage8, DecryptedMessageService8, DecryptedMessage23, DecryptedMessageService, DecryptedMessage46, DecryptedMessage.
"""
self.ttl = ttl
self.message = message
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
self.media = media
self.entities = entities
self.via_bot_name = via_bot_name
self.reply_to_random_id = reply_to_random_id
self.grouped_id = grouped_id
def to_dict(self):
return {
'_': 'DecryptedMessage',
'ttl': self.ttl,
'message': self.message,
'random_id': self.random_id,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'via_bot_name': self.via_bot_name,
'reply_to_random_id': self.reply_to_random_id,
'grouped_id': self.grouped_id
}
def __bytes__(self):
return b''.join((
b'tF\xcc\x91',
struct.pack('<I', (0 if self.media is None or self.media is False else 512) | (0 if self.entities is None or self.entities is False else 128) | (0 if self.via_bot_name is None or self.via_bot_name is False else 2048) | (0 if self.reply_to_random_id is None or self.reply_to_random_id is False else 8) | (0 if self.grouped_id is None or self.grouped_id is False else 131072)),
struct.pack('<q', self.random_id),
struct.pack('<i', self.ttl),
self.serialize_bytes(self.message),
b'' if self.media is None or self.media is False else (bytes(self.media)),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(bytes(x) for x in self.entities))),
b'' if self.via_bot_name is None or self.via_bot_name is False else (self.serialize_bytes(self.via_bot_name)),
b'' if self.reply_to_random_id is None or self.reply_to_random_id is False else (struct.pack('<q', self.reply_to_random_id)),
b'' if self.grouped_id is None or self.grouped_id is False else (struct.pack('<q', self.grouped_id)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_random_id = reader.read_long()
_ttl = reader.read_int()
_message = reader.tgread_string()
if flags & 512:
_media = reader.tgread_object()
else:
_media = None
if flags & 128:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
if flags & 2048:
_via_bot_name = reader.tgread_string()
else:
_via_bot_name = None
if flags & 8:
_reply_to_random_id = reader.read_long()
else:
_reply_to_random_id = None
if flags & 131072:
_grouped_id = reader.read_long()
else:
_grouped_id = None
return cls(ttl=_ttl, message=_message, random_id=_random_id, media=_media, entities=_entities, via_bot_name=_via_bot_name, reply_to_random_id=_reply_to_random_id, grouped_id=_grouped_id)
class DecryptedMessage23(TLObject):
CONSTRUCTOR_ID = 0x204d3878
SUBCLASS_OF_ID = 0x5182c3e8
def __init__(self, ttl: int, message: str, media: 'TypeDecryptedMessageMedia', random_id: int=None):
"""
Constructor for secret.DecryptedMessage: Instance of either DecryptedMessage8, DecryptedMessageService8, DecryptedMessage23, DecryptedMessageService, DecryptedMessage46, DecryptedMessage.
"""
self.ttl = ttl
self.message = message
self.media = media
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
def to_dict(self):
return {
'_': 'DecryptedMessage23',
'ttl': self.ttl,
'message': self.message,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media,
'random_id': self.random_id
}
def __bytes__(self):
return b''.join((
b'x8M ',
struct.pack('<q', self.random_id),
struct.pack('<i', self.ttl),
self.serialize_bytes(self.message),
bytes(self.media),
))
@classmethod
def from_reader(cls, reader):
_random_id = reader.read_long()
_ttl = reader.read_int()
_message = reader.tgread_string()
_media = reader.tgread_object()
return cls(ttl=_ttl, message=_message, media=_media, random_id=_random_id)
class DecryptedMessage46(TLObject):
CONSTRUCTOR_ID = 0x36b091de
SUBCLASS_OF_ID = 0x5182c3e8
def __init__(self, ttl: int, message: str, random_id: int=None, media: Optional['TypeDecryptedMessageMedia']=None, entities: Optional[List['TypeMessageEntity']]=None, via_bot_name: Optional[str]=None, reply_to_random_id: Optional[int]=None):
"""
Constructor for secret.DecryptedMessage: Instance of either DecryptedMessage8, DecryptedMessageService8, DecryptedMessage23, DecryptedMessageService, DecryptedMessage46, DecryptedMessage.
"""
self.ttl = ttl
self.message = message
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
self.media = media
self.entities = entities
self.via_bot_name = via_bot_name
self.reply_to_random_id = reply_to_random_id
def to_dict(self):
return {
'_': 'DecryptedMessage46',
'ttl': self.ttl,
'message': self.message,
'random_id': self.random_id,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'via_bot_name': self.via_bot_name,
'reply_to_random_id': self.reply_to_random_id
}
def __bytes__(self):
return b''.join((
b'\xde\x91\xb06',
struct.pack('<I', (0 if self.media is None or self.media is False else 512) | (0 if self.entities is None or self.entities is False else 128) | (0 if self.via_bot_name is None or self.via_bot_name is False else 2048) | (0 if self.reply_to_random_id is None or self.reply_to_random_id is False else 8)),
struct.pack('<q', self.random_id),
struct.pack('<i', self.ttl),
self.serialize_bytes(self.message),
b'' if self.media is None or self.media is False else (bytes(self.media)),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(bytes(x) for x in self.entities))),
b'' if self.via_bot_name is None or self.via_bot_name is False else (self.serialize_bytes(self.via_bot_name)),
b'' if self.reply_to_random_id is None or self.reply_to_random_id is False else (struct.pack('<q', self.reply_to_random_id)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_random_id = reader.read_long()
_ttl = reader.read_int()
_message = reader.tgread_string()
if flags & 512:
_media = reader.tgread_object()
else:
_media = None
if flags & 128:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
if flags & 2048:
_via_bot_name = reader.tgread_string()
else:
_via_bot_name = None
if flags & 8:
_reply_to_random_id = reader.read_long()
else:
_reply_to_random_id = None
return cls(ttl=_ttl, message=_message, random_id=_random_id, media=_media, entities=_entities, via_bot_name=_via_bot_name, reply_to_random_id=_reply_to_random_id)
class DecryptedMessage8(TLObject):
CONSTRUCTOR_ID = 0x1f814f1f
SUBCLASS_OF_ID = 0x5182c3e8
def __init__(self, random_bytes: bytes, message: str, media: 'TypeDecryptedMessageMedia', random_id: int=None):
"""
Constructor for secret.DecryptedMessage: Instance of either DecryptedMessage8, DecryptedMessageService8, DecryptedMessage23, DecryptedMessageService, DecryptedMessage46, DecryptedMessage.
"""
self.random_bytes = random_bytes
self.message = message
self.media = media
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
def to_dict(self):
return {
'_': 'DecryptedMessage8',
'random_bytes': self.random_bytes,
'message': self.message,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media,
'random_id': self.random_id
}
def __bytes__(self):
return b''.join((
b'\x1fO\x81\x1f',
struct.pack('<q', self.random_id),
self.serialize_bytes(self.random_bytes),
self.serialize_bytes(self.message),
bytes(self.media),
))
@classmethod
def from_reader(cls, reader):
_random_id = reader.read_long()
_random_bytes = reader.tgread_bytes()
_message = reader.tgread_string()
_media = reader.tgread_object()
return cls(random_bytes=_random_bytes, message=_message, media=_media, random_id=_random_id)
class DecryptedMessageActionAbortKey(TLObject):
CONSTRUCTOR_ID = 0xdd05ec6b
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, exchange_id: int):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.exchange_id = exchange_id
def to_dict(self):
return {
'_': 'DecryptedMessageActionAbortKey',
'exchange_id': self.exchange_id
}
def __bytes__(self):
return b''.join((
b'k\xec\x05\xdd',
struct.pack('<q', self.exchange_id),
))
@classmethod
def from_reader(cls, reader):
_exchange_id = reader.read_long()
return cls(exchange_id=_exchange_id)
class DecryptedMessageActionAcceptKey(TLObject):
CONSTRUCTOR_ID = 0x6fe1735b
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, exchange_id: int, g_b: bytes, key_fingerprint: int):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.exchange_id = exchange_id
self.g_b = g_b
self.key_fingerprint = key_fingerprint
def to_dict(self):
return {
'_': 'DecryptedMessageActionAcceptKey',
'exchange_id': self.exchange_id,
'g_b': self.g_b,
'key_fingerprint': self.key_fingerprint
}
def __bytes__(self):
return b''.join((
b'[s\xe1o',
struct.pack('<q', self.exchange_id),
self.serialize_bytes(self.g_b),
struct.pack('<q', self.key_fingerprint),
))
@classmethod
def from_reader(cls, reader):
_exchange_id = reader.read_long()
_g_b = reader.tgread_bytes()
_key_fingerprint = reader.read_long()
return cls(exchange_id=_exchange_id, g_b=_g_b, key_fingerprint=_key_fingerprint)
class DecryptedMessageActionCommitKey(TLObject):
CONSTRUCTOR_ID = 0xec2e0b9b
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, exchange_id: int, key_fingerprint: int):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.exchange_id = exchange_id
self.key_fingerprint = key_fingerprint
def to_dict(self):
return {
'_': 'DecryptedMessageActionCommitKey',
'exchange_id': self.exchange_id,
'key_fingerprint': self.key_fingerprint
}
def __bytes__(self):
return b''.join((
b'\x9b\x0b.\xec',
struct.pack('<q', self.exchange_id),
struct.pack('<q', self.key_fingerprint),
))
@classmethod
def from_reader(cls, reader):
_exchange_id = reader.read_long()
_key_fingerprint = reader.read_long()
return cls(exchange_id=_exchange_id, key_fingerprint=_key_fingerprint)
class DecryptedMessageActionDeleteMessages(TLObject):
CONSTRUCTOR_ID = 0x65614304
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, random_ids: List[int]):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.random_ids = random_ids
def to_dict(self):
return {
'_': 'DecryptedMessageActionDeleteMessages',
'random_ids': [] if self.random_ids is None else self.random_ids[:]
}
def __bytes__(self):
return b''.join((
b'\x04Cae',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.random_ids)),b''.join(struct.pack('<q', x) for x in self.random_ids),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_random_ids = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_random_ids.append(_x)
return cls(random_ids=_random_ids)
class DecryptedMessageActionFlushHistory(TLObject):
CONSTRUCTOR_ID = 0x6719e45c
SUBCLASS_OF_ID = 0x3eecb877
def to_dict(self):
return {
'_': 'DecryptedMessageActionFlushHistory'
}
def __bytes__(self):
return b''.join((
b'\\\xe4\x19g',
))
@classmethod
def from_reader(cls, reader):
return cls()
class DecryptedMessageActionNoop(TLObject):
CONSTRUCTOR_ID = 0xa82fdd63
SUBCLASS_OF_ID = 0x3eecb877
def to_dict(self):
return {
'_': 'DecryptedMessageActionNoop'
}
def __bytes__(self):
return b''.join((
b'c\xdd/\xa8',
))
@classmethod
def from_reader(cls, reader):
return cls()
class DecryptedMessageActionNotifyLayer(TLObject):
CONSTRUCTOR_ID = 0xf3048883
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, layer: int):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.layer = layer
def to_dict(self):
return {
'_': 'DecryptedMessageActionNotifyLayer',
'layer': self.layer
}
def __bytes__(self):
return b''.join((
b'\x83\x88\x04\xf3',
struct.pack('<i', self.layer),
))
@classmethod
def from_reader(cls, reader):
_layer = reader.read_int()
return cls(layer=_layer)
class DecryptedMessageActionReadMessages(TLObject):
CONSTRUCTOR_ID = 0xc4f40be
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, random_ids: List[int]):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.random_ids = random_ids
def to_dict(self):
return {
'_': 'DecryptedMessageActionReadMessages',
'random_ids': [] if self.random_ids is None else self.random_ids[:]
}
def __bytes__(self):
return b''.join((
b'\xbe@O\x0c',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.random_ids)),b''.join(struct.pack('<q', x) for x in self.random_ids),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_random_ids = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_random_ids.append(_x)
return cls(random_ids=_random_ids)
class DecryptedMessageActionRequestKey(TLObject):
CONSTRUCTOR_ID = 0xf3c9611b
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, exchange_id: int, g_a: bytes):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.exchange_id = exchange_id
self.g_a = g_a
def to_dict(self):
return {
'_': 'DecryptedMessageActionRequestKey',
'exchange_id': self.exchange_id,
'g_a': self.g_a
}
def __bytes__(self):
return b''.join((
b'\x1ba\xc9\xf3',
struct.pack('<q', self.exchange_id),
self.serialize_bytes(self.g_a),
))
@classmethod
def from_reader(cls, reader):
_exchange_id = reader.read_long()
_g_a = reader.tgread_bytes()
return cls(exchange_id=_exchange_id, g_a=_g_a)
class DecryptedMessageActionResend(TLObject):
CONSTRUCTOR_ID = 0x511110b0
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, start_seq_no: int, end_seq_no: int):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.start_seq_no = start_seq_no
self.end_seq_no = end_seq_no
def to_dict(self):
return {
'_': 'DecryptedMessageActionResend',
'start_seq_no': self.start_seq_no,
'end_seq_no': self.end_seq_no
}
def __bytes__(self):
return b''.join((
b'\xb0\x10\x11Q',
struct.pack('<i', self.start_seq_no),
struct.pack('<i', self.end_seq_no),
))
@classmethod
def from_reader(cls, reader):
_start_seq_no = reader.read_int()
_end_seq_no = reader.read_int()
return cls(start_seq_no=_start_seq_no, end_seq_no=_end_seq_no)
class DecryptedMessageActionScreenshotMessages(TLObject):
CONSTRUCTOR_ID = 0x8ac1f475
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, random_ids: List[int]):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.random_ids = random_ids
def to_dict(self):
return {
'_': 'DecryptedMessageActionScreenshotMessages',
'random_ids': [] if self.random_ids is None else self.random_ids[:]
}
def __bytes__(self):
return b''.join((
b'u\xf4\xc1\x8a',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.random_ids)),b''.join(struct.pack('<q', x) for x in self.random_ids),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_random_ids = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_random_ids.append(_x)
return cls(random_ids=_random_ids)
class DecryptedMessageActionSetMessageTTL(TLObject):
CONSTRUCTOR_ID = 0xa1733aec
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, ttl_seconds: int):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.ttl_seconds = ttl_seconds
def to_dict(self):
return {
'_': 'DecryptedMessageActionSetMessageTTL',
'ttl_seconds': self.ttl_seconds
}
def __bytes__(self):
return b''.join((
b'\xec:s\xa1',
struct.pack('<i', self.ttl_seconds),
))
@classmethod
def from_reader(cls, reader):
_ttl_seconds = reader.read_int()
return cls(ttl_seconds=_ttl_seconds)
class DecryptedMessageActionTyping(TLObject):
CONSTRUCTOR_ID = 0xccb27641
SUBCLASS_OF_ID = 0x3eecb877
def __init__(self, action: 'TypeSendMessageAction'):
"""
Constructor for secret.DecryptedMessageAction: Instance of either DecryptedMessageActionSetMessageTTL, DecryptedMessageActionReadMessages, DecryptedMessageActionDeleteMessages, DecryptedMessageActionScreenshotMessages, DecryptedMessageActionFlushHistory, DecryptedMessageActionResend, DecryptedMessageActionNotifyLayer, DecryptedMessageActionTyping, DecryptedMessageActionRequestKey, DecryptedMessageActionAcceptKey, DecryptedMessageActionAbortKey, DecryptedMessageActionCommitKey, DecryptedMessageActionNoop.
"""
self.action = action
def to_dict(self):
return {
'_': 'DecryptedMessageActionTyping',
'action': self.action.to_dict() if isinstance(self.action, TLObject) else self.action
}
def __bytes__(self):
return b''.join((
b'Av\xb2\xcc',
bytes(self.action),
))
@classmethod
def from_reader(cls, reader):
_action = reader.tgread_object()
return cls(action=_action)
class DecryptedMessageLayer(TLObject):
CONSTRUCTOR_ID = 0x1be31789
SUBCLASS_OF_ID = 0x18576013
def __init__(self, random_bytes: bytes, layer: int, in_seq_no: int, out_seq_no: int, message: 'TypeDecryptedMessage'):
"""
Constructor for secret.DecryptedMessageLayer: Instance of DecryptedMessageLayer.
"""
self.random_bytes = random_bytes
self.layer = layer
self.in_seq_no = in_seq_no
self.out_seq_no = out_seq_no
self.message = message
def to_dict(self):
return {
'_': 'DecryptedMessageLayer',
'random_bytes': self.random_bytes,
'layer': self.layer,
'in_seq_no': self.in_seq_no,
'out_seq_no': self.out_seq_no,
'message': self.message.to_dict() if isinstance(self.message, TLObject) else self.message
}
def __bytes__(self):
return b''.join((
b'\x89\x17\xe3\x1b',
self.serialize_bytes(self.random_bytes),
struct.pack('<i', self.layer),
struct.pack('<i', self.in_seq_no),
struct.pack('<i', self.out_seq_no),
bytes(self.message),
))
@classmethod
def from_reader(cls, reader):
_random_bytes = reader.tgread_bytes()
_layer = reader.read_int()
_in_seq_no = reader.read_int()
_out_seq_no = reader.read_int()
_message = reader.tgread_object()
return cls(random_bytes=_random_bytes, layer=_layer, in_seq_no=_in_seq_no, out_seq_no=_out_seq_no, message=_message)
class DecryptedMessageMediaAudio(TLObject):
CONSTRUCTOR_ID = 0x57e0a9cb
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, duration: int, mime_type: str, size: int, key: bytes, iv: bytes):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.duration = duration
self.mime_type = mime_type
self.size = size
self.key = key
self.iv = iv
def to_dict(self):
return {
'_': 'DecryptedMessageMediaAudio',
'duration': self.duration,
'mime_type': self.mime_type,
'size': self.size,
'key': self.key,
'iv': self.iv
}
def __bytes__(self):
return b''.join((
b'\xcb\xa9\xe0W',
struct.pack('<i', self.duration),
self.serialize_bytes(self.mime_type),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
))
@classmethod
def from_reader(cls, reader):
_duration = reader.read_int()
_mime_type = reader.tgread_string()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
return cls(duration=_duration, mime_type=_mime_type, size=_size, key=_key, iv=_iv)
class DecryptedMessageMediaAudio8(TLObject):
CONSTRUCTOR_ID = 0x6080758f
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, duration: int, size: int, key: bytes, iv: bytes):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.duration = duration
self.size = size
self.key = key
self.iv = iv
def to_dict(self):
return {
'_': 'DecryptedMessageMediaAudio8',
'duration': self.duration,
'size': self.size,
'key': self.key,
'iv': self.iv
}
def __bytes__(self):
return b''.join((
b'\x8fu\x80`',
struct.pack('<i', self.duration),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
))
@classmethod
def from_reader(cls, reader):
_duration = reader.read_int()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
return cls(duration=_duration, size=_size, key=_key, iv=_iv)
class DecryptedMessageMediaContact(TLObject):
CONSTRUCTOR_ID = 0x588a0a97
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, phone_number: str, first_name: str, last_name: str, user_id: int):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.phone_number = phone_number
self.first_name = first_name
self.last_name = last_name
self.user_id = user_id
def to_dict(self):
return {
'_': 'DecryptedMessageMediaContact',
'phone_number': self.phone_number,
'first_name': self.first_name,
'last_name': self.last_name,
'user_id': self.user_id
}
def __bytes__(self):
return b''.join((
b'\x97\n\x8aX',
self.serialize_bytes(self.phone_number),
self.serialize_bytes(self.first_name),
self.serialize_bytes(self.last_name),
struct.pack('<i', self.user_id),
))
@classmethod
def from_reader(cls, reader):
_phone_number = reader.tgread_string()
_first_name = reader.tgread_string()
_last_name = reader.tgread_string()
_user_id = reader.read_int()
return cls(phone_number=_phone_number, first_name=_first_name, last_name=_last_name, user_id=_user_id)
class DecryptedMessageMediaDocument(TLObject):
CONSTRUCTOR_ID = 0x7afe8ae2
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, thumb: bytes, thumb_w: int, thumb_h: int, mime_type: str, size: int, key: bytes, iv: bytes, attributes: List['TypeDocumentAttribute'], caption: str):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.thumb = thumb
self.thumb_w = thumb_w
self.thumb_h = thumb_h
self.mime_type = mime_type
self.size = size
self.key = key
self.iv = iv
self.attributes = attributes
self.caption = caption
def to_dict(self):
return {
'_': 'DecryptedMessageMediaDocument',
'thumb': self.thumb,
'thumb_w': self.thumb_w,
'thumb_h': self.thumb_h,
'mime_type': self.mime_type,
'size': self.size,
'key': self.key,
'iv': self.iv,
'attributes': [] if self.attributes is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.attributes],
'caption': self.caption
}
def __bytes__(self):
return b''.join((
b'\xe2\x8a\xfez',
self.serialize_bytes(self.thumb),
struct.pack('<i', self.thumb_w),
struct.pack('<i', self.thumb_h),
self.serialize_bytes(self.mime_type),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.attributes)),b''.join(bytes(x) for x in self.attributes),
self.serialize_bytes(self.caption),
))
@classmethod
def from_reader(cls, reader):
_thumb = reader.tgread_bytes()
_thumb_w = reader.read_int()
_thumb_h = reader.read_int()
_mime_type = reader.tgread_string()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
reader.read_int()
_attributes = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_attributes.append(_x)
_caption = reader.tgread_string()
return cls(thumb=_thumb, thumb_w=_thumb_w, thumb_h=_thumb_h, mime_type=_mime_type, size=_size, key=_key, iv=_iv, attributes=_attributes, caption=_caption)
class DecryptedMessageMediaDocument23(TLObject):
CONSTRUCTOR_ID = 0xb095434b
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, thumb: bytes, thumb_w: int, thumb_h: int, file_name: str, mime_type: str, size: int, key: bytes, iv: bytes):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.thumb = thumb
self.thumb_w = thumb_w
self.thumb_h = thumb_h
self.file_name = file_name
self.mime_type = mime_type
self.size = size
self.key = key
self.iv = iv
def to_dict(self):
return {
'_': 'DecryptedMessageMediaDocument23',
'thumb': self.thumb,
'thumb_w': self.thumb_w,
'thumb_h': self.thumb_h,
'file_name': self.file_name,
'mime_type': self.mime_type,
'size': self.size,
'key': self.key,
'iv': self.iv
}
def __bytes__(self):
return b''.join((
b'KC\x95\xb0',
self.serialize_bytes(self.thumb),
struct.pack('<i', self.thumb_w),
struct.pack('<i', self.thumb_h),
self.serialize_bytes(self.file_name),
self.serialize_bytes(self.mime_type),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
))
@classmethod
def from_reader(cls, reader):
_thumb = reader.tgread_bytes()
_thumb_w = reader.read_int()
_thumb_h = reader.read_int()
_file_name = reader.tgread_string()
_mime_type = reader.tgread_string()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
return cls(thumb=_thumb, thumb_w=_thumb_w, thumb_h=_thumb_h, file_name=_file_name, mime_type=_mime_type, size=_size, key=_key, iv=_iv)
class DecryptedMessageMediaEmpty(TLObject):
CONSTRUCTOR_ID = 0x89f5c4a
SUBCLASS_OF_ID = 0x96a0e005
def to_dict(self):
return {
'_': 'DecryptedMessageMediaEmpty'
}
def __bytes__(self):
return b''.join((
b'J\\\x9f\x08',
))
@classmethod
def from_reader(cls, reader):
return cls()
class DecryptedMessageMediaExternalDocument(TLObject):
CONSTRUCTOR_ID = 0xfa95b0dd
SUBCLASS_OF_ID = 0x96a0e005
# noinspection PyShadowingBuiltins
def __init__(self, id: int, access_hash: int, date: Optional[datetime], mime_type: str, size: int, thumb: 'TypePhotoSize', dc_id: int, attributes: List['TypeDocumentAttribute']):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.id = id
self.access_hash = access_hash
self.date = date
self.mime_type = mime_type
self.size = size
self.thumb = thumb
self.dc_id = dc_id
self.attributes = attributes
def to_dict(self):
return {
'_': 'DecryptedMessageMediaExternalDocument',
'id': self.id,
'access_hash': self.access_hash,
'date': self.date,
'mime_type': self.mime_type,
'size': self.size,
'thumb': self.thumb.to_dict() if isinstance(self.thumb, TLObject) else self.thumb,
'dc_id': self.dc_id,
'attributes': [] if self.attributes is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.attributes]
}
def __bytes__(self):
return b''.join((
b'\xdd\xb0\x95\xfa',
struct.pack('<q', self.id),
struct.pack('<q', self.access_hash),
self.serialize_datetime(self.date),
self.serialize_bytes(self.mime_type),
struct.pack('<i', self.size),
bytes(self.thumb),
struct.pack('<i', self.dc_id),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.attributes)),b''.join(bytes(x) for x in self.attributes),
))
@classmethod
def from_reader(cls, reader):
_id = reader.read_long()
_access_hash = reader.read_long()
_date = reader.tgread_date()
_mime_type = reader.tgread_string()
_size = reader.read_int()
_thumb = reader.tgread_object()
_dc_id = reader.read_int()
reader.read_int()
_attributes = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_attributes.append(_x)
return cls(id=_id, access_hash=_access_hash, date=_date, mime_type=_mime_type, size=_size, thumb=_thumb, dc_id=_dc_id, attributes=_attributes)
class DecryptedMessageMediaGeoPoint(TLObject):
CONSTRUCTOR_ID = 0x35480a59
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, lat: float, long: float):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.lat = lat
self.long = long
def to_dict(self):
return {
'_': 'DecryptedMessageMediaGeoPoint',
'lat': self.lat,
'long': self.long
}
def __bytes__(self):
return b''.join((
b'Y\nH5',
struct.pack('<d', self.lat),
struct.pack('<d', self.long),
))
@classmethod
def from_reader(cls, reader):
_lat = reader.read_double()
_long = reader.read_double()
return cls(lat=_lat, long=_long)
class DecryptedMessageMediaPhoto(TLObject):
CONSTRUCTOR_ID = 0xf1fa8d78
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, thumb: bytes, thumb_w: int, thumb_h: int, w: int, h: int, size: int, key: bytes, iv: bytes, caption: str):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.thumb = thumb
self.thumb_w = thumb_w
self.thumb_h = thumb_h
self.w = w
self.h = h
self.size = size
self.key = key
self.iv = iv
self.caption = caption
def to_dict(self):
return {
'_': 'DecryptedMessageMediaPhoto',
'thumb': self.thumb,
'thumb_w': self.thumb_w,
'thumb_h': self.thumb_h,
'w': self.w,
'h': self.h,
'size': self.size,
'key': self.key,
'iv': self.iv,
'caption': self.caption
}
def __bytes__(self):
return b''.join((
b'x\x8d\xfa\xf1',
self.serialize_bytes(self.thumb),
struct.pack('<i', self.thumb_w),
struct.pack('<i', self.thumb_h),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
self.serialize_bytes(self.caption),
))
@classmethod
def from_reader(cls, reader):
_thumb = reader.tgread_bytes()
_thumb_w = reader.read_int()
_thumb_h = reader.read_int()
_w = reader.read_int()
_h = reader.read_int()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
_caption = reader.tgread_string()
return cls(thumb=_thumb, thumb_w=_thumb_w, thumb_h=_thumb_h, w=_w, h=_h, size=_size, key=_key, iv=_iv, caption=_caption)
class DecryptedMessageMediaPhoto23(TLObject):
CONSTRUCTOR_ID = 0x32798a8c
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, thumb: bytes, thumb_w: int, thumb_h: int, w: int, h: int, size: int, key: bytes, iv: bytes):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.thumb = thumb
self.thumb_w = thumb_w
self.thumb_h = thumb_h
self.w = w
self.h = h
self.size = size
self.key = key
self.iv = iv
def to_dict(self):
return {
'_': 'DecryptedMessageMediaPhoto23',
'thumb': self.thumb,
'thumb_w': self.thumb_w,
'thumb_h': self.thumb_h,
'w': self.w,
'h': self.h,
'size': self.size,
'key': self.key,
'iv': self.iv
}
def __bytes__(self):
return b''.join((
b'\x8c\x8ay2',
self.serialize_bytes(self.thumb),
struct.pack('<i', self.thumb_w),
struct.pack('<i', self.thumb_h),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
))
@classmethod
def from_reader(cls, reader):
_thumb = reader.tgread_bytes()
_thumb_w = reader.read_int()
_thumb_h = reader.read_int()
_w = reader.read_int()
_h = reader.read_int()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
return cls(thumb=_thumb, thumb_w=_thumb_w, thumb_h=_thumb_h, w=_w, h=_h, size=_size, key=_key, iv=_iv)
class DecryptedMessageMediaVenue(TLObject):
CONSTRUCTOR_ID = 0x8a0df56f
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, lat: float, long: float, title: str, address: str, provider: str, venue_id: str):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.lat = lat
self.long = long
self.title = title
self.address = address
self.provider = provider
self.venue_id = venue_id
def to_dict(self):
return {
'_': 'DecryptedMessageMediaVenue',
'lat': self.lat,
'long': self.long,
'title': self.title,
'address': self.address,
'provider': self.provider,
'venue_id': self.venue_id
}
def __bytes__(self):
return b''.join((
b'o\xf5\r\x8a',
struct.pack('<d', self.lat),
struct.pack('<d', self.long),
self.serialize_bytes(self.title),
self.serialize_bytes(self.address),
self.serialize_bytes(self.provider),
self.serialize_bytes(self.venue_id),
))
@classmethod
def from_reader(cls, reader):
_lat = reader.read_double()
_long = reader.read_double()
_title = reader.tgread_string()
_address = reader.tgread_string()
_provider = reader.tgread_string()
_venue_id = reader.tgread_string()
return cls(lat=_lat, long=_long, title=_title, address=_address, provider=_provider, venue_id=_venue_id)
class DecryptedMessageMediaVideo(TLObject):
CONSTRUCTOR_ID = 0x970c8c0e
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, thumb: bytes, thumb_w: int, thumb_h: int, duration: int, mime_type: str, w: int, h: int, size: int, key: bytes, iv: bytes, caption: str):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.thumb = thumb
self.thumb_w = thumb_w
self.thumb_h = thumb_h
self.duration = duration
self.mime_type = mime_type
self.w = w
self.h = h
self.size = size
self.key = key
self.iv = iv
self.caption = caption
def to_dict(self):
return {
'_': 'DecryptedMessageMediaVideo',
'thumb': self.thumb,
'thumb_w': self.thumb_w,
'thumb_h': self.thumb_h,
'duration': self.duration,
'mime_type': self.mime_type,
'w': self.w,
'h': self.h,
'size': self.size,
'key': self.key,
'iv': self.iv,
'caption': self.caption
}
def __bytes__(self):
return b''.join((
b'\x0e\x8c\x0c\x97',
self.serialize_bytes(self.thumb),
struct.pack('<i', self.thumb_w),
struct.pack('<i', self.thumb_h),
struct.pack('<i', self.duration),
self.serialize_bytes(self.mime_type),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
self.serialize_bytes(self.caption),
))
@classmethod
def from_reader(cls, reader):
_thumb = reader.tgread_bytes()
_thumb_w = reader.read_int()
_thumb_h = reader.read_int()
_duration = reader.read_int()
_mime_type = reader.tgread_string()
_w = reader.read_int()
_h = reader.read_int()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
_caption = reader.tgread_string()
return cls(thumb=_thumb, thumb_w=_thumb_w, thumb_h=_thumb_h, duration=_duration, mime_type=_mime_type, w=_w, h=_h, size=_size, key=_key, iv=_iv, caption=_caption)
class DecryptedMessageMediaVideo23(TLObject):
CONSTRUCTOR_ID = 0x524a415d
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, thumb: bytes, thumb_w: int, thumb_h: int, duration: int, mime_type: str, w: int, h: int, size: int, key: bytes, iv: bytes):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.thumb = thumb
self.thumb_w = thumb_w
self.thumb_h = thumb_h
self.duration = duration
self.mime_type = mime_type
self.w = w
self.h = h
self.size = size
self.key = key
self.iv = iv
def to_dict(self):
return {
'_': 'DecryptedMessageMediaVideo23',
'thumb': self.thumb,
'thumb_w': self.thumb_w,
'thumb_h': self.thumb_h,
'duration': self.duration,
'mime_type': self.mime_type,
'w': self.w,
'h': self.h,
'size': self.size,
'key': self.key,
'iv': self.iv
}
def __bytes__(self):
return b''.join((
b']AJR',
self.serialize_bytes(self.thumb),
struct.pack('<i', self.thumb_w),
struct.pack('<i', self.thumb_h),
struct.pack('<i', self.duration),
self.serialize_bytes(self.mime_type),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
))
@classmethod
def from_reader(cls, reader):
_thumb = reader.tgread_bytes()
_thumb_w = reader.read_int()
_thumb_h = reader.read_int()
_duration = reader.read_int()
_mime_type = reader.tgread_string()
_w = reader.read_int()
_h = reader.read_int()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
return cls(thumb=_thumb, thumb_w=_thumb_w, thumb_h=_thumb_h, duration=_duration, mime_type=_mime_type, w=_w, h=_h, size=_size, key=_key, iv=_iv)
class DecryptedMessageMediaVideo8(TLObject):
CONSTRUCTOR_ID = 0x4cee6ef3
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, thumb: bytes, thumb_w: int, thumb_h: int, duration: int, w: int, h: int, size: int, key: bytes, iv: bytes):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.thumb = thumb
self.thumb_w = thumb_w
self.thumb_h = thumb_h
self.duration = duration
self.w = w
self.h = h
self.size = size
self.key = key
self.iv = iv
def to_dict(self):
return {
'_': 'DecryptedMessageMediaVideo8',
'thumb': self.thumb,
'thumb_w': self.thumb_w,
'thumb_h': self.thumb_h,
'duration': self.duration,
'w': self.w,
'h': self.h,
'size': self.size,
'key': self.key,
'iv': self.iv
}
def __bytes__(self):
return b''.join((
b'\xf3n\xeeL',
self.serialize_bytes(self.thumb),
struct.pack('<i', self.thumb_w),
struct.pack('<i', self.thumb_h),
struct.pack('<i', self.duration),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
struct.pack('<i', self.size),
self.serialize_bytes(self.key),
self.serialize_bytes(self.iv),
))
@classmethod
def from_reader(cls, reader):
_thumb = reader.tgread_bytes()
_thumb_w = reader.read_int()
_thumb_h = reader.read_int()
_duration = reader.read_int()
_w = reader.read_int()
_h = reader.read_int()
_size = reader.read_int()
_key = reader.tgread_bytes()
_iv = reader.tgread_bytes()
return cls(thumb=_thumb, thumb_w=_thumb_w, thumb_h=_thumb_h, duration=_duration, w=_w, h=_h, size=_size, key=_key, iv=_iv)
class DecryptedMessageMediaWebPage(TLObject):
CONSTRUCTOR_ID = 0xe50511d8
SUBCLASS_OF_ID = 0x96a0e005
def __init__(self, url: str):
"""
Constructor for secret.DecryptedMessageMedia: Instance of either DecryptedMessageMediaEmpty, DecryptedMessageMediaPhoto23, DecryptedMessageMediaVideo8, DecryptedMessageMediaGeoPoint, DecryptedMessageMediaContact, DecryptedMessageMediaDocument23, DecryptedMessageMediaAudio8, DecryptedMessageMediaVideo23, DecryptedMessageMediaAudio, DecryptedMessageMediaExternalDocument, DecryptedMessageMediaPhoto, DecryptedMessageMediaVideo, DecryptedMessageMediaDocument, DecryptedMessageMediaVenue, DecryptedMessageMediaWebPage.
"""
self.url = url
def to_dict(self):
return {
'_': 'DecryptedMessageMediaWebPage',
'url': self.url
}
def __bytes__(self):
return b''.join((
b'\xd8\x11\x05\xe5',
self.serialize_bytes(self.url),
))
@classmethod
def from_reader(cls, reader):
_url = reader.tgread_string()
return cls(url=_url)
class DecryptedMessageService(TLObject):
CONSTRUCTOR_ID = 0x73164160
SUBCLASS_OF_ID = 0x5182c3e8
def __init__(self, action: 'TypeDecryptedMessageAction', random_id: int=None):
"""
Constructor for secret.DecryptedMessage: Instance of either DecryptedMessage8, DecryptedMessageService8, DecryptedMessage23, DecryptedMessageService, DecryptedMessage46, DecryptedMessage.
"""
self.action = action
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
def to_dict(self):
return {
'_': 'DecryptedMessageService',
'action': self.action.to_dict() if isinstance(self.action, TLObject) else self.action,
'random_id': self.random_id
}
def __bytes__(self):
return b''.join((
b'`A\x16s',
struct.pack('<q', self.random_id),
bytes(self.action),
))
@classmethod
def from_reader(cls, reader):
_random_id = reader.read_long()
_action = reader.tgread_object()
return cls(action=_action, random_id=_random_id)
class DecryptedMessageService8(TLObject):
CONSTRUCTOR_ID = 0xaa48327d
SUBCLASS_OF_ID = 0x5182c3e8
def __init__(self, random_bytes: bytes, action: 'TypeDecryptedMessageAction', random_id: int=None):
"""
Constructor for secret.DecryptedMessage: Instance of either DecryptedMessage8, DecryptedMessageService8, DecryptedMessage23, DecryptedMessageService, DecryptedMessage46, DecryptedMessage.
"""
self.random_bytes = random_bytes
self.action = action
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
def to_dict(self):
return {
'_': 'DecryptedMessageService8',
'random_bytes': self.random_bytes,
'action': self.action.to_dict() if isinstance(self.action, TLObject) else self.action,
'random_id': self.random_id
}
def __bytes__(self):
return b''.join((
b'}2H\xaa',
struct.pack('<q', self.random_id),
self.serialize_bytes(self.random_bytes),
bytes(self.action),
))
@classmethod
def from_reader(cls, reader):
_random_id = reader.read_long()
_random_bytes = reader.tgread_bytes()
_action = reader.tgread_object()
return cls(random_bytes=_random_bytes, action=_action, random_id=_random_id)
class DocumentAttributeAnimated(TLObject):
CONSTRUCTOR_ID = 0x11b58939
SUBCLASS_OF_ID = 0x989b1da0
def to_dict(self):
return {
'_': 'DocumentAttributeAnimated'
}
def __bytes__(self):
return b''.join((
b'9\x89\xb5\x11',
))
@classmethod
def from_reader(cls, reader):
return cls()
class DocumentAttributeAudio(TLObject):
CONSTRUCTOR_ID = 0x9852f9c6
SUBCLASS_OF_ID = 0x989b1da0
def __init__(self, duration: int, voice: Optional[bool]=None, title: Optional[str]=None, performer: Optional[str]=None, waveform: Optional[bytes]=None):
"""
Constructor for secret.DocumentAttribute: Instance of either DocumentAttributeImageSize, DocumentAttributeAnimated, DocumentAttributeSticker23, DocumentAttributeVideo, DocumentAttributeAudio23, DocumentAttributeFilename, DocumentAttributeAudio45, DocumentAttributeSticker, DocumentAttributeAudio, DocumentAttributeVideo66.
"""
self.duration = duration
self.voice = voice
self.title = title
self.performer = performer
self.waveform = waveform
def to_dict(self):
return {
'_': 'DocumentAttributeAudio',
'duration': self.duration,
'voice': self.voice,
'title': self.title,
'performer': self.performer,
'waveform': self.waveform
}
def __bytes__(self):
return b''.join((
b'\xc6\xf9R\x98',
struct.pack('<I', (0 if self.voice is None or self.voice is False else 1024) | (0 if self.title is None or self.title is False else 1) | (0 if self.performer is None or self.performer is False else 2) | (0 if self.waveform is None or self.waveform is False else 4)),
struct.pack('<i', self.duration),
b'' if self.title is None or self.title is False else (self.serialize_bytes(self.title)),
b'' if self.performer is None or self.performer is False else (self.serialize_bytes(self.performer)),
b'' if self.waveform is None or self.waveform is False else (self.serialize_bytes(self.waveform)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_voice = bool(flags & 1024)
_duration = reader.read_int()
if flags & 1:
_title = reader.tgread_string()
else:
_title = None
if flags & 2:
_performer = reader.tgread_string()
else:
_performer = None
if flags & 4:
_waveform = reader.tgread_bytes()
else:
_waveform = None
return cls(duration=_duration, voice=_voice, title=_title, performer=_performer, waveform=_waveform)
class DocumentAttributeAudio23(TLObject):
CONSTRUCTOR_ID = 0x51448e5
SUBCLASS_OF_ID = 0x989b1da0
def __init__(self, duration: int):
"""
Constructor for secret.DocumentAttribute: Instance of either DocumentAttributeImageSize, DocumentAttributeAnimated, DocumentAttributeSticker23, DocumentAttributeVideo, DocumentAttributeAudio23, DocumentAttributeFilename, DocumentAttributeAudio45, DocumentAttributeSticker, DocumentAttributeAudio, DocumentAttributeVideo66.
"""
self.duration = duration
def to_dict(self):
return {
'_': 'DocumentAttributeAudio23',
'duration': self.duration
}
def __bytes__(self):
return b''.join((
b'\xe5H\x14\x05',
struct.pack('<i', self.duration),
))
@classmethod
def from_reader(cls, reader):
_duration = reader.read_int()
return cls(duration=_duration)
class DocumentAttributeAudio45(TLObject):
CONSTRUCTOR_ID = 0xded218e0
SUBCLASS_OF_ID = 0x989b1da0
def __init__(self, duration: int, title: str, performer: str):
"""
Constructor for secret.DocumentAttribute: Instance of either DocumentAttributeImageSize, DocumentAttributeAnimated, DocumentAttributeSticker23, DocumentAttributeVideo, DocumentAttributeAudio23, DocumentAttributeFilename, DocumentAttributeAudio45, DocumentAttributeSticker, DocumentAttributeAudio, DocumentAttributeVideo66.
"""
self.duration = duration
self.title = title
self.performer = performer
def to_dict(self):
return {
'_': 'DocumentAttributeAudio45',
'duration': self.duration,
'title': self.title,
'performer': self.performer
}
def __bytes__(self):
return b''.join((
b'\xe0\x18\xd2\xde',
struct.pack('<i', self.duration),
self.serialize_bytes(self.title),
self.serialize_bytes(self.performer),
))
@classmethod
def from_reader(cls, reader):
_duration = reader.read_int()
_title = reader.tgread_string()
_performer = reader.tgread_string()
return cls(duration=_duration, title=_title, performer=_performer)
class DocumentAttributeFilename(TLObject):
CONSTRUCTOR_ID = 0x15590068
SUBCLASS_OF_ID = 0x989b1da0
def __init__(self, file_name: str):
"""
Constructor for secret.DocumentAttribute: Instance of either DocumentAttributeImageSize, DocumentAttributeAnimated, DocumentAttributeSticker23, DocumentAttributeVideo, DocumentAttributeAudio23, DocumentAttributeFilename, DocumentAttributeAudio45, DocumentAttributeSticker, DocumentAttributeAudio, DocumentAttributeVideo66.
"""
self.file_name = file_name
def to_dict(self):
return {
'_': 'DocumentAttributeFilename',
'file_name': self.file_name
}
def __bytes__(self):
return b''.join((
b'h\x00Y\x15',
self.serialize_bytes(self.file_name),
))
@classmethod
def from_reader(cls, reader):
_file_name = reader.tgread_string()
return cls(file_name=_file_name)
class DocumentAttributeImageSize(TLObject):
CONSTRUCTOR_ID = 0x6c37c15c
SUBCLASS_OF_ID = 0x989b1da0
def __init__(self, w: int, h: int):
"""
Constructor for secret.DocumentAttribute: Instance of either DocumentAttributeImageSize, DocumentAttributeAnimated, DocumentAttributeSticker23, DocumentAttributeVideo, DocumentAttributeAudio23, DocumentAttributeFilename, DocumentAttributeAudio45, DocumentAttributeSticker, DocumentAttributeAudio, DocumentAttributeVideo66.
"""
self.w = w
self.h = h
def to_dict(self):
return {
'_': 'DocumentAttributeImageSize',
'w': self.w,
'h': self.h
}
def __bytes__(self):
return b''.join((
b'\\\xc17l',
struct.pack('<i', self.w),
struct.pack('<i', self.h),
))
@classmethod
def from_reader(cls, reader):
_w = reader.read_int()
_h = reader.read_int()
return cls(w=_w, h=_h)
class DocumentAttributeSticker(TLObject):
CONSTRUCTOR_ID = 0x3a556302
SUBCLASS_OF_ID = 0x989b1da0
def __init__(self, alt: str, stickerset: 'TypeInputStickerSet'):
"""
Constructor for secret.DocumentAttribute: Instance of either DocumentAttributeImageSize, DocumentAttributeAnimated, DocumentAttributeSticker23, DocumentAttributeVideo, DocumentAttributeAudio23, DocumentAttributeFilename, DocumentAttributeAudio45, DocumentAttributeSticker, DocumentAttributeAudio, DocumentAttributeVideo66.
"""
self.alt = alt
self.stickerset = stickerset
def to_dict(self):
return {
'_': 'DocumentAttributeSticker',
'alt': self.alt,
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset
}
def __bytes__(self):
return b''.join((
b'\x02cU:',
self.serialize_bytes(self.alt),
bytes(self.stickerset),
))
@classmethod
def from_reader(cls, reader):
_alt = reader.tgread_string()
_stickerset = reader.tgread_object()
return cls(alt=_alt, stickerset=_stickerset)
class DocumentAttributeSticker23(TLObject):
CONSTRUCTOR_ID = 0xfb0a5727
SUBCLASS_OF_ID = 0x989b1da0
def to_dict(self):
return {
'_': 'DocumentAttributeSticker23'
}
def __bytes__(self):
return b''.join((
b"'W\n\xfb",
))
@classmethod
def from_reader(cls, reader):
return cls()
class DocumentAttributeVideo(TLObject):
CONSTRUCTOR_ID = 0x5910cccb
SUBCLASS_OF_ID = 0x989b1da0
def __init__(self, duration: int, w: int, h: int):
"""
Constructor for secret.DocumentAttribute: Instance of either DocumentAttributeImageSize, DocumentAttributeAnimated, DocumentAttributeSticker23, DocumentAttributeVideo, DocumentAttributeAudio23, DocumentAttributeFilename, DocumentAttributeAudio45, DocumentAttributeSticker, DocumentAttributeAudio, DocumentAttributeVideo66.
"""
self.duration = duration
self.w = w
self.h = h
def to_dict(self):
return {
'_': 'DocumentAttributeVideo',
'duration': self.duration,
'w': self.w,
'h': self.h
}
def __bytes__(self):
return b''.join((
b'\xcb\xcc\x10Y',
struct.pack('<i', self.duration),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
))
@classmethod
def from_reader(cls, reader):
_duration = reader.read_int()
_w = reader.read_int()
_h = reader.read_int()
return cls(duration=_duration, w=_w, h=_h)
class DocumentAttributeVideo66(TLObject):
CONSTRUCTOR_ID = 0xef02ce6
SUBCLASS_OF_ID = 0x989b1da0
def __init__(self, duration: int, w: int, h: int, round_message: Optional[bool]=None):
"""
Constructor for secret.DocumentAttribute: Instance of either DocumentAttributeImageSize, DocumentAttributeAnimated, DocumentAttributeSticker23, DocumentAttributeVideo, DocumentAttributeAudio23, DocumentAttributeFilename, DocumentAttributeAudio45, DocumentAttributeSticker, DocumentAttributeAudio, DocumentAttributeVideo66.
"""
self.duration = duration
self.w = w
self.h = h
self.round_message = round_message
def to_dict(self):
return {
'_': 'DocumentAttributeVideo66',
'duration': self.duration,
'w': self.w,
'h': self.h,
'round_message': self.round_message
}
def __bytes__(self):
return b''.join((
b'\xe6,\xf0\x0e',
struct.pack('<I', (0 if self.round_message is None or self.round_message is False else 1)),
struct.pack('<i', self.duration),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_round_message = bool(flags & 1)
_duration = reader.read_int()
_w = reader.read_int()
_h = reader.read_int()
return cls(duration=_duration, w=_w, h=_h, round_message=_round_message)
class FileLocation(TLObject):
CONSTRUCTOR_ID = 0x53d69076
SUBCLASS_OF_ID = 0x5ad8f388
def __init__(self, dc_id: int, volume_id: int, local_id: int, secret: int):
"""
Constructor for secret.FileLocation: Instance of either FileLocationUnavailable, FileLocation.
"""
self.dc_id = dc_id
self.volume_id = volume_id
self.local_id = local_id
self.secret = secret
def to_dict(self):
return {
'_': 'FileLocation',
'dc_id': self.dc_id,
'volume_id': self.volume_id,
'local_id': self.local_id,
'secret': self.secret
}
def __bytes__(self):
return b''.join((
b'v\x90\xd6S',
struct.pack('<i', self.dc_id),
struct.pack('<q', self.volume_id),
struct.pack('<i', self.local_id),
struct.pack('<q', self.secret),
))
@classmethod
def from_reader(cls, reader):
_dc_id = reader.read_int()
_volume_id = reader.read_long()
_local_id = reader.read_int()
_secret = reader.read_long()
return cls(dc_id=_dc_id, volume_id=_volume_id, local_id=_local_id, secret=_secret)
class FileLocationUnavailable(TLObject):
CONSTRUCTOR_ID = 0x7c596b46
SUBCLASS_OF_ID = 0x5ad8f388
def __init__(self, volume_id: int, local_id: int, secret: int):
"""
Constructor for secret.FileLocation: Instance of either FileLocationUnavailable, FileLocation.
"""
self.volume_id = volume_id
self.local_id = local_id
self.secret = secret
def to_dict(self):
return {
'_': 'FileLocationUnavailable',
'volume_id': self.volume_id,
'local_id': self.local_id,
'secret': self.secret
}
def __bytes__(self):
return b''.join((
b'FkY|',
struct.pack('<q', self.volume_id),
struct.pack('<i', self.local_id),
struct.pack('<q', self.secret),
))
@classmethod
def from_reader(cls, reader):
_volume_id = reader.read_long()
_local_id = reader.read_int()
_secret = reader.read_long()
return cls(volume_id=_volume_id, local_id=_local_id, secret=_secret)
class InputStickerSetEmpty(TLObject):
CONSTRUCTOR_ID = 0xffb62b95
SUBCLASS_OF_ID = 0xd1ea5569
def to_dict(self):
return {
'_': 'InputStickerSetEmpty'
}
def __bytes__(self):
return b''.join((
b'\x95+\xb6\xff',
))
@classmethod
def from_reader(cls, reader):
return cls()
class InputStickerSetShortName(TLObject):
CONSTRUCTOR_ID = 0x861cc8a0
SUBCLASS_OF_ID = 0xd1ea5569
def __init__(self, short_name: str):
"""
Constructor for secret.InputStickerSet: Instance of either InputStickerSetShortName, InputStickerSetEmpty.
"""
self.short_name = short_name
def to_dict(self):
return {
'_': 'InputStickerSetShortName',
'short_name': self.short_name
}
def __bytes__(self):
return b''.join((
b'\xa0\xc8\x1c\x86',
self.serialize_bytes(self.short_name),
))
@classmethod
def from_reader(cls, reader):
_short_name = reader.tgread_string()
return cls(short_name=_short_name)
class MessageEntityBlockquote(TLObject):
CONSTRUCTOR_ID = 0x20df5d0
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityBlockquote',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\xd0\xf5\r\x02',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityBold(TLObject):
CONSTRUCTOR_ID = 0xbd610bc9
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityBold',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\xc9\x0ba\xbd',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityBotCommand(TLObject):
CONSTRUCTOR_ID = 0x6cef8ac7
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityBotCommand',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\xc7\x8a\xefl',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityCashtag(TLObject):
CONSTRUCTOR_ID = 0x4c4e743f
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityCashtag',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'?tNL',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityCode(TLObject):
CONSTRUCTOR_ID = 0x28a20571
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityCode',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'q\x05\xa2(',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityEmail(TLObject):
CONSTRUCTOR_ID = 0x64e475c2
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityEmail',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\xc2u\xe4d',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityHashtag(TLObject):
CONSTRUCTOR_ID = 0x6f635b0d
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityHashtag',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\r[co',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityItalic(TLObject):
CONSTRUCTOR_ID = 0x826f8b60
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityItalic',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'`\x8bo\x82',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityMention(TLObject):
CONSTRUCTOR_ID = 0xfa04579d
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityMention',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\x9dW\x04\xfa',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityMentionName(TLObject):
CONSTRUCTOR_ID = 0x352dca58
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int, user_id: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
self.user_id = user_id
def to_dict(self):
return {
'_': 'MessageEntityMentionName',
'offset': self.offset,
'length': self.length,
'user_id': self.user_id
}
def __bytes__(self):
return b''.join((
b'X\xca-5',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
struct.pack('<i', self.user_id),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
_user_id = reader.read_int()
return cls(offset=_offset, length=_length, user_id=_user_id)
class MessageEntityPhone(TLObject):
CONSTRUCTOR_ID = 0x9b69e34b
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityPhone',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'K\xe3i\x9b',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityPre(TLObject):
CONSTRUCTOR_ID = 0x73924be0
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int, language: str):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
self.language = language
def to_dict(self):
return {
'_': 'MessageEntityPre',
'offset': self.offset,
'length': self.length,
'language': self.language
}
def __bytes__(self):
return b''.join((
b'\xe0K\x92s',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
self.serialize_bytes(self.language),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
_language = reader.tgread_string()
return cls(offset=_offset, length=_length, language=_language)
class MessageEntityStrike(TLObject):
CONSTRUCTOR_ID = 0xbf0693d4
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityStrike',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\xd4\x93\x06\xbf',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityTextUrl(TLObject):
CONSTRUCTOR_ID = 0x76a6d327
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int, url: str):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
self.url = url
def to_dict(self):
return {
'_': 'MessageEntityTextUrl',
'offset': self.offset,
'length': self.length,
'url': self.url
}
def __bytes__(self):
return b''.join((
b"'\xd3\xa6v",
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
self.serialize_bytes(self.url),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
_url = reader.tgread_string()
return cls(offset=_offset, length=_length, url=_url)
class MessageEntityUnderline(TLObject):
CONSTRUCTOR_ID = 0x9c4e7e8b
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityUnderline',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\x8b~N\x9c',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityUnknown(TLObject):
CONSTRUCTOR_ID = 0xbb92ba95
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityUnknown',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'\x95\xba\x92\xbb',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class MessageEntityUrl(TLObject):
CONSTRUCTOR_ID = 0x6ed02538
SUBCLASS_OF_ID = 0x8eaa4c27
def __init__(self, offset: int, length: int):
"""
Constructor for secret.MessageEntity: Instance of either MessageEntityUnknown, MessageEntityMention, MessageEntityHashtag, MessageEntityBotCommand, MessageEntityUrl, MessageEntityEmail, MessageEntityBold, MessageEntityItalic, MessageEntityCode, MessageEntityPre, MessageEntityTextUrl, MessageEntityMentionName, MessageEntityPhone, MessageEntityCashtag, MessageEntityUnderline, MessageEntityStrike, MessageEntityBlockquote.
"""
self.offset = offset
self.length = length
def to_dict(self):
return {
'_': 'MessageEntityUrl',
'offset': self.offset,
'length': self.length
}
def __bytes__(self):
return b''.join((
b'8%\xd0n',
struct.pack('<i', self.offset),
struct.pack('<i', self.length),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_length = reader.read_int()
return cls(offset=_offset, length=_length)
class PhotoCachedSize(TLObject):
CONSTRUCTOR_ID = 0xe9a734fa
SUBCLASS_OF_ID = 0x1fe3e096
# noinspection PyShadowingBuiltins
def __init__(self, type: str, location: 'TypeFileLocation', w: int, h: int, bytes: bytes):
"""
Constructor for secret.PhotoSize: Instance of either PhotoSizeEmpty, PhotoSize, PhotoCachedSize.
"""
self.type = type
self.location = location
self.w = w
self.h = h
self.bytes = bytes
def to_dict(self):
return {
'_': 'PhotoCachedSize',
'type': self.type,
'location': self.location.to_dict() if isinstance(self.location, TLObject) else self.location,
'w': self.w,
'h': self.h,
'bytes': self.bytes
}
def __bytes__(self):
return b''.join((
b'\xfa4\xa7\xe9',
self.serialize_bytes(self.type),
bytes(self.location),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
self.serialize_bytes(self.bytes),
))
@classmethod
def from_reader(cls, reader):
_type = reader.tgread_string()
_location = reader.tgread_object()
_w = reader.read_int()
_h = reader.read_int()
_bytes = reader.tgread_bytes()
return cls(type=_type, location=_location, w=_w, h=_h, bytes=_bytes)
class PhotoSize(TLObject):
CONSTRUCTOR_ID = 0x77bfb61b
SUBCLASS_OF_ID = 0x1fe3e096
# noinspection PyShadowingBuiltins
def __init__(self, type: str, location: 'TypeFileLocation', w: int, h: int, size: int):
"""
Constructor for secret.PhotoSize: Instance of either PhotoSizeEmpty, PhotoSize, PhotoCachedSize.
"""
self.type = type
self.location = location
self.w = w
self.h = h
self.size = size
def to_dict(self):
return {
'_': 'PhotoSize',
'type': self.type,
'location': self.location.to_dict() if isinstance(self.location, TLObject) else self.location,
'w': self.w,
'h': self.h,
'size': self.size
}
def __bytes__(self):
return b''.join((
b'\x1b\xb6\xbfw',
self.serialize_bytes(self.type),
bytes(self.location),
struct.pack('<i', self.w),
struct.pack('<i', self.h),
struct.pack('<i', self.size),
))
@classmethod
def from_reader(cls, reader):
_type = reader.tgread_string()
_location = reader.tgread_object()
_w = reader.read_int()
_h = reader.read_int()
_size = reader.read_int()
return cls(type=_type, location=_location, w=_w, h=_h, size=_size)
class PhotoSizeEmpty(TLObject):
CONSTRUCTOR_ID = 0xe17e23c
SUBCLASS_OF_ID = 0x1fe3e096
# noinspection PyShadowingBuiltins
def __init__(self, type: str):
"""
Constructor for secret.PhotoSize: Instance of either PhotoSizeEmpty, PhotoSize, PhotoCachedSize.
"""
self.type = type
def to_dict(self):
return {
'_': 'PhotoSizeEmpty',
'type': self.type
}
def __bytes__(self):
return b''.join((
b'<\xe2\x17\x0e',
self.serialize_bytes(self.type),
))
@classmethod
def from_reader(cls, reader):
_type = reader.tgread_string()
return cls(type=_type)
class SendMessageCancelAction(TLObject):
CONSTRUCTOR_ID = 0xfd5ec8f5
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageCancelAction'
}
def __bytes__(self):
return b''.join((
b'\xf5\xc8^\xfd',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageChooseContactAction(TLObject):
CONSTRUCTOR_ID = 0x628cbc6f
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageChooseContactAction'
}
def __bytes__(self):
return b''.join((
b'o\xbc\x8cb',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageGeoLocationAction(TLObject):
CONSTRUCTOR_ID = 0x176f8ba1
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageGeoLocationAction'
}
def __bytes__(self):
return b''.join((
b'\xa1\x8bo\x17',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageRecordAudioAction(TLObject):
CONSTRUCTOR_ID = 0xd52f73f7
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageRecordAudioAction'
}
def __bytes__(self):
return b''.join((
b'\xf7s/\xd5',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageRecordRoundAction(TLObject):
CONSTRUCTOR_ID = 0x88f27fbc
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageRecordRoundAction'
}
def __bytes__(self):
return b''.join((
b'\xbc\x7f\xf2\x88',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageRecordVideoAction(TLObject):
CONSTRUCTOR_ID = 0xa187d66f
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageRecordVideoAction'
}
def __bytes__(self):
return b''.join((
b'o\xd6\x87\xa1',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageTypingAction(TLObject):
CONSTRUCTOR_ID = 0x16bf744e
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageTypingAction'
}
def __bytes__(self):
return b''.join((
b'Nt\xbf\x16',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageUploadAudioAction(TLObject):
CONSTRUCTOR_ID = 0xe6ac8a6f
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageUploadAudioAction'
}
def __bytes__(self):
return b''.join((
b'o\x8a\xac\xe6',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageUploadDocumentAction(TLObject):
CONSTRUCTOR_ID = 0x8faee98e
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageUploadDocumentAction'
}
def __bytes__(self):
return b''.join((
b'\x8e\xe9\xae\x8f',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageUploadPhotoAction(TLObject):
CONSTRUCTOR_ID = 0x990a3c1a
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageUploadPhotoAction'
}
def __bytes__(self):
return b''.join((
b'\x1a<\n\x99',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageUploadRoundAction(TLObject):
CONSTRUCTOR_ID = 0xbb718624
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageUploadRoundAction'
}
def __bytes__(self):
return b''.join((
b'$\x86q\xbb',
))
@classmethod
def from_reader(cls, reader):
return cls()
class SendMessageUploadVideoAction(TLObject):
CONSTRUCTOR_ID = 0x92042ff7
SUBCLASS_OF_ID = 0x4f003a1a
def to_dict(self):
return {
'_': 'SendMessageUploadVideoAction'
}
def __bytes__(self):
return b''.join((
b'\xf7/\x04\x92',
))
@classmethod
def from_reader(cls, reader):
return cls()
| 36.711307
| 524
| 0.651295
| 10,085
| 103,893
| 6.44819
| 0.049479
| 0.023389
| 0.024589
| 0.02422
| 0.828725
| 0.806643
| 0.784392
| 0.762141
| 0.752483
| 0.71527
| 0
| 0.020166
| 0.250152
| 103,893
| 2,829
| 525
| 36.724284
| 0.814579
| 0.236965
| 0
| 0.702414
| 1
| 0
| 0.065489
| 0.023582
| 0
| 0
| 0.020742
| 0
| 0
| 1
| 0.142061
| false
| 0
| 0.002786
| 0.083565
| 0.370474
| 0.005571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bc688874f99f4edde0c266aff62239ceeb104df7
| 123
|
py
|
Python
|
app/aicos_rca/__init__.py
|
muhiza/digital.cooperative
|
f57a749e10796b6e00920b21809ab56b9274d944
|
[
"Unlicense"
] | null | null | null |
app/aicos_rca/__init__.py
|
muhiza/digital.cooperative
|
f57a749e10796b6e00920b21809ab56b9274d944
|
[
"Unlicense"
] | null | null | null |
app/aicos_rca/__init__.py
|
muhiza/digital.cooperative
|
f57a749e10796b6e00920b21809ab56b9274d944
|
[
"Unlicense"
] | null | null | null |
from flask import Blueprint
aicos_rca = Blueprint('aicos_rca', __name__, template_folder='templates')
from . import views
| 24.6
| 73
| 0.796748
| 16
| 123
| 5.6875
| 0.6875
| 0.307692
| 0.373626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 123
| 5
| 74
| 24.6
| 0.834862
| 0
| 0
| 0
| 0
| 0
| 0.145161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
bcbc0bf92903522ece56bc587a945e41c23613ac
| 11,683
|
py
|
Python
|
Web/fluid_properties/Validation/HAValidation.py
|
BENGAL-TIGER/CoolPropMDA
|
5a384c2863363b415c13f444bb183cc22232afe1
|
[
"MIT"
] | null | null | null |
Web/fluid_properties/Validation/HAValidation.py
|
BENGAL-TIGER/CoolPropMDA
|
5a384c2863363b415c13f444bb183cc22232afe1
|
[
"MIT"
] | null | null | null |
Web/fluid_properties/Validation/HAValidation.py
|
BENGAL-TIGER/CoolPropMDA
|
5a384c2863363b415c13f444bb183cc22232afe1
|
[
"MIT"
] | null | null | null |
from CoolProp.HumidAirProp import HAPropsSI
import numpy as np
print ' Replicating the tables from ASHRAE RP-1485'
print ' '
print 'A.6.1 Psychrometric Properties of Moist Air at 0C and Below'
print 'Saturated air at 101.325 kPa'
s5=' '*5
print '===================================================='
print "{T:8s}{W:10s}{v:10s}{h:10s}{s:10s}".format(W=s5+' Ws',v=s5+' v',h=s5+'h',s=s5+' s',T=' T')
print "{T:8s}{W:10s}{v:10s}{h:10s}{s:10s}".format(W=' kgw/kg_da',v=' m3/kgda',h=' kJ/kgda',s=' kJ/kgda/K',T=' C')
print '----------------------------------------------------'
for T in np.linspace(-60,0,13)+273.15:
h = HAPropsSI('H','T',T,'R',1.0,'P',101325)/1000
Twb = HAPropsSI('Twb','T',T,'R',1.0,'P',101325)-273.15
W = HAPropsSI('W','T',T,'R',1.0,'P',101325)
v = HAPropsSI('V','T',T,'R',1.0,'P',101325)
s = HAPropsSI('S','T',T,'R',1.0,'P',101325)/1000
print "{T:8.0f}{W:10.7f}{v:10.4f}{h:10.3f}{s:10.4f}".format(W=W,T=T-273.15,v=v,h=h,s=s)
print '===================================================='
print ' '
print 'A.6.2 Psychrometric Properties of Moist Air at 0C and Above'
print 'Saturated air at 101.325 kPa'
s5=' '*5
print '===================================================='
print "{T:8s}{W:10s}{v:10s}{h:10s}{s:10s}".format(W=s5+' Ws',v=s5+' v',h=s5+'h',s=s5+' s',T=' T')
print "{T:8s}{W:10s}{v:10s}{h:10s}{s:10s}".format(W=' kgw/kg_da',v=' m3/kgda',h=' kJ/kgda',s=' kJ/kgda/K',T=' C')
print '----------------------------------------------------'
for T in np.linspace(0,90,19)+273.15:
h=HAPropsSI('H','T',T,'R',1.0,'P',101325)/1000
Twb=HAPropsSI('Twb','T',T,'R',1.0,'P',101325)-273.15
W=HAPropsSI('W','T',T,'R',1.0,'P',101325)
v=HAPropsSI('V','T',T,'R',1.0,'P',101325)
s=HAPropsSI('S','T',T,'R',1.0,'P',101325)/1000
print "{T:8.0f}{W:10.7f}{v:10.3f}{h:10.2f}{s:10.4f}".format(W=W,T=T-273.15,v=v,h=h,s=s)
print '===================================================='
print ' '
def HotAir(num):
from CoolProp.HumidAirProp import HAPropsSI
if num=='8':
Temp=str(200)
T=200+273.15
elif num=='9':
Temp=str(320)
T=320+273.15
print 'A.'+num+'.1 Psychrometric Properties of Moist Air at 101.325 kPa '
print 'Dry Bulb temperature of '+Temp+'C'
s5=' '*5
print '================================================================'
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=s5+' W',Twb=s5+'Twb',v=s5+' v',h=s5+'h',s=s5+' s',R=s5+'RH')
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=' kgw/kg_da',Twb=' C',v=' m3/kgda',h=' kJ/kgda',s=' kJ/kgda/K',R=' %')
print '----------------------------------------------------------------'
for W in [0.0,0.05,0.1,0.20,0.30,0.40,0.50,0.60,0.70,0.80,0.90,1.0]:
h=HAPropsSI('H','T',T,'W',W,'P',101325)/1000
Twb=HAPropsSI('Twb','T',T,'W',W,'P',101325)-273.15
R=HAPropsSI('R','T',T,'W',W,'P',101325)*100
v=HAPropsSI('V','T',T,'W',W,'P',101325)
s=HAPropsSI('S','T',T,'W',W,'P',101325)/1000
print "{W:10.2f}{Twb:10.2f}{v:10.3f}{h:10.2f}{s:10.4f}{R:10.4f}".format(W=W,Twb=Twb,v=v,h=h,s=s,R=R)
print '================================================================'
print ' '
print 'A.'+num+'.2 Psychrometric Properties of Moist Air at 1000 kPa '
print 'Dry Bulb temperature of '+Temp+'C'
print '================================================================'
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=s5+' W',Twb=s5+'Twb',v=s5+' v',h=s5+'h',s=s5+' s',R=s5+'RH')
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=' kgw/kg_da',Twb=' C',v=' m3/kgda',h=' kJ/kgda',s=' kJ/kgda/K',R=' %')
print '----------------------------------------------------------------'
for W in [0.0,0.05,0.1,0.20,0.30,0.40,0.50,0.60,0.70,0.80,0.90,1.0]:
h=HAPropsSI('H','T',T,'W',W,'P',1000e3)/1000
Twb=HAPropsSI('Twb','T',T,'W',W,'P',1000e3)-273.15
R=HAPropsSI('R','T',T,'W',W,'P',1000e3)*100
v=HAPropsSI('V','T',T,'W',W,'P',1000e3)
s=HAPropsSI('S','T',T,'W',W,'P',1000e3)/1000
print "{W:10.2f}{Twb:10.2f}{v:10.3f}{h:10.2f}{s:10.4f}{R:10.4f}".format(W=W,Twb=Twb,v=v,h=h,s=s,R=R)
print '================================================================'
print ' '
s5=' '*5
print 'A.'+num+'.3 Psychrometric Properties of Moist Air at 2000 kPa '
print 'Dry Bulb temperature of '+Temp+'C'
print '================================================================'
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=s5+' W',Twb=s5+'Twb',v=s5+' v',h=s5+'h',s=s5+' s',R=s5+'RH')
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=' kgw/kg_da',Twb=' C',v=' m3/kgda',h=' kJ/kgda',s=' kJ/kgda/K',R=' %')
print '----------------------------------------------------------------'
for W in [0.0,0.05,0.1,0.20,0.30,0.40,0.50,0.60,0.70,0.80,0.90,1.0]:
h=HAPropsSI('H','T',T,'W',W,'P',2000e3)/1000
Twb=HAPropsSI('Twb','T',T,'W',W,'P',2000e3)-273.15
R=HAPropsSI('R','T',T,'W',W,'P',2000e3)*100
v=HAPropsSI('V','T',T,'W',W,'P',2000e3)
s=HAPropsSI('S','T',T,'W',W,'P',2000e3)/1000
print "{W:10.2f}{Twb:10.2f}{v:10.3f}{h:10.2f}{s:10.4f}{R:10.4f}".format(W=W,Twb=Twb,v=v,h=h,s=s,R=R)
print '================================================================'
print ' '
s5=' '*5
print 'A.'+num+'.4 Psychrometric Properties of Moist Air at 5000 kPa '
print 'Dry Bulb temperature of '+Temp+'C'
print '================================================================'
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=s5+' W',Twb=s5+'Twb',v=s5+' v',h=s5+'h',s=s5+' s',R=s5+'RH')
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=' kgw/kg_da',Twb=' C',v=' m3/kgda',h=' kJ/kgda',s=' kJ/kgda/K',R=' %')
print '----------------------------------------------------------------'
if Temp=='200':
Wrange = [0.0,0.05,0.1,0.15,0.20,0.25,0.30]
else:
Wrange = [0.0,0.05,0.1,0.15,0.20,0.25,0.30,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
for W in Wrange:
h=HAPropsSI('H','T',T,'W',W,'P',5000e3)/1000
Twb=HAPropsSI('Twb','T',T,'W',W,'P',5000e3)-273.15
R=HAPropsSI('R','T',T,'W',W,'P',5000e3)*100
v=HAPropsSI('V','T',T,'W',W,'P',5000e3)
s=HAPropsSI('S','T',T,'W',W,'P',5000e3)/1000
print "{W:10.2f}{Twb:10.2f}{v:10.3f}{h:10.2f}{s:10.4f}{R:10.4f}".format(W=W,Twb=Twb,v=v,h=h,s=s,R=R)
print '================================================================'
print ' '
s5=' '*5
print 'A.'+num+'.5 Psychrometric Properties of Moist Air at 10,000 kPa '
print 'Dry Bulb temperature of '+Temp+'C'
print '================================================================'
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=s5+' W',Twb=s5+'Twb',v=s5+' v',h=s5+'h',s=s5+' s',R=s5+'RH')
print "{W:10s}{Twb:10s}{v:10s}{h:10s}{s:10s}{R:10s}".format(W=' kgw/kg_da',Twb=' C',v=' m3/kgda',h=' kJ/kgda',s=' kJ/kgda/K',R=' %')
print '----------------------------------------------------------------'
if Temp=='200':
Wrange = [0.0,0.05,0.1]
else:
Wrange = [0.0,0.05,0.1,0.15,0.20,0.25,0.30,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
for W in Wrange:
h=HAPropsSI('H','T',T,'W',W,'P',10000e3)/1000
Twb=HAPropsSI('Twb','T',T,'W',W,'P',10000e3)-273.15
R=HAPropsSI('R','T',T,'W',W,'P',10000e3)*100
v=HAPropsSI('V','T',T,'W',W,'P',10000e3)
s=HAPropsSI('S','T',T,'W',W,'P',10000e3)/1000
print "{W:10.2f}{Twb:10.2f}{v:10.3f}{h:10.2f}{s:10.4f}{R:10.4f}".format(W=W,Twb=Twb,v=v,h=h,s=s,R=R)
print '================================================================'
HotAir('8')
print ' '
HotAir('9')
##############################
#### Virial Coefficients #####
##############################
def Virials(variables):
from CoolProp.HumidAirProp import HAProps_Aux
import numpy as np
varString="%-10s"%('T')
units="%-10s"%('C')
#Build the header
for var in variables:
varString+="%-20s"%(var)
units+="%-20s" %(HAProps_Aux(var,300,100,0.0)[1])
print varString
print units
#Build the table
for T in np.linspace(-60,200,27)+273.15:
values="%-10.1f" %(T-273.15)
for var in variables:
values+="%-20.10e" %(HAProps_Aux(var,T,100,0.0)[0])
print values
print ""
print "Pure fluid Virial Coefficients"
print "------------------------------"
Virials(['Baa','Caaa','Bww','Cwww'])
Virials(['Baw','Caaw','Caww'])
print ""
print "Pure fluid Virial Coefficients Derivatives"
print "------------------------------------------"
Virials(['dBaa','dCaaa','dBww','dCwww'])
Virials(['dBaw','dCaaw','dCaww'])
##############################
####### Water Saturation #####
##############################
print ""
print "Water saturation pressure p_ws [kPa]"
from CoolProp.HumidAirProp import HAProps_Aux
import numpy as np
Tv=np.linspace(-60,300,13)+273.15
print "%-10s %-20s"%('T','p_ws')
print "%-10s %-20s"%('C',HAProps_Aux('p_ws',Tv[-1],100,0.0)[1])
#Build the table
for T in Tv:
values="%-10.2f" %(T-273.15)
values+="%-20.10e" %(HAProps_Aux('p_ws',T,100,0.0)[0])
print values
##############################
####### Henry Constant #######
##############################
print ""
print "Henry Constant (zero for T < 273.15 K)"
from CoolProp.HumidAirProp import HAProps_Aux
import numpy as np
Tv=np.linspace(0,300,11)+273.16
print "%-10s %-20s"%('T','beta_H')
print "%-10s %-20s"%('C',HAProps_Aux('beta_H',Tv[-1],100,0.0)[1])
#Build the table
for T in Tv:
values="%-10.2f" %(T-273.15)
values+="%-20.10e" %(HAProps_Aux('beta_H',T,100,0.0)[0])
print values
##########################################
####### Isothermal Compressibility #######
##########################################
print ""
print "Isothermal Compressibility of water (kT) [1/Pa]"
from CoolProp.HumidAirProp import HAProps_Aux
import numpy as np
Tv=np.linspace(-60,300,13)+273.15
Pv=[101325,200000,500000,1000000]
variables="%-10s"%('T')
for p in Pv:
variables+="%-20s"%("p = %-0.3f Pa "%(p))
print variables
#Build the actual table
for T in Tv:
values="%-10.2f" %(T-273.15)
for p in Pv:
values+="%-20.10e" %(HAProps_Aux('kT',T,p,0.0)[0])
print values
##########################################
####### Saturated Molar Volume Water #####
##########################################
print ""
print "Molar volume of saturated liquid water or ice (vbar_ws) [m^3/mol_H2O]"
from CoolProp.HumidAirProp import HAProps_Aux
import numpy as np
Tv=np.linspace(-60,300,13)+273.15
Pv=[101325,200000,500000,1000000]
variables="%-10s"%('T')
for p in Pv:
variables+="%-20s"%("p = %-0.3f Pa "%(p))
print variables
#Build the actual table
for T in Tv:
values="%-10.2f" %(T-273.15)
for p in Pv:
values+="%-20.10e" %(HAProps_Aux('vbar_ws',T,p,0.0)[0])
print values
##########################################
########### Enhancement Factor ###########
##########################################
print ""
print "Enhancement factor (f) [no units]"
from CoolProp.HumidAirProp import HAProps_Aux
import numpy as np
Tv=np.array([-60,-40,-20,0,40,80,120,160,200,250,300,350])+273.15
Pv=[101325,200000,500000,1000000,10000000]
variables="%-10s"%(u'T')
for p in Pv:
variables+="%-20s"%("p = %-0.3f Pa "%(p))
print variables
#Build the actual table
for T in Tv:
values="%-10.2f" %(T-273.15)
for p in Pv:
values+="%-20.10e" %(HAProps_Aux('f',T,p,0.0)[0])
print values
| 43.431227
| 148
| 0.47659
| 1,951
| 11,683
| 2.837519
| 0.093798
| 0.01409
| 0.013548
| 0.018064
| 0.824241
| 0.805275
| 0.743316
| 0.694545
| 0.658598
| 0.618497
| 0
| 0.129742
| 0.140375
| 11,683
| 269
| 149
| 43.431227
| 0.421488
| 0.022169
| 0
| 0.626667
| 0
| 0.093333
| 0.381123
| 0.20987
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.066667
| null | null | 0.426667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
bce2d3cc793ecef6a164d63394bbfc3b98ec2d4f
| 100
|
py
|
Python
|
SDWLE/cards_copy/__init__.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
SDWLE/cards_copy/__init__.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
SDWLE/cards_copy/__init__.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
from SDWLE.cards.minions import *
from SDWLE.cards.spells import *
from SDWLE.cards.weapons import *
| 33.333333
| 33
| 0.8
| 15
| 100
| 5.333333
| 0.466667
| 0.3375
| 0.525
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11
| 100
| 3
| 34
| 33.333333
| 0.898876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
bce459cb395f7349527639e3ea73252af3601729
| 33,671
|
py
|
Python
|
rest_api/tests/unit/test_batch_requests.py
|
mealchain/beta
|
7dc1a1aea175bfb3f1008939f098a1d58bb455a6
|
[
"Apache-2.0"
] | null | null | null |
rest_api/tests/unit/test_batch_requests.py
|
mealchain/beta
|
7dc1a1aea175bfb3f1008939f098a1d58bb455a6
|
[
"Apache-2.0"
] | null | null | null |
rest_api/tests/unit/test_batch_requests.py
|
mealchain/beta
|
7dc1a1aea175bfb3f1008939f098a1d58bb455a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from aiohttp.test_utils import unittest_run_loop
from components import Mocks, BaseApiTest
from sawtooth_rest_api.protobuf.validator_pb2 import Message
from sawtooth_rest_api.protobuf import client_pb2
class BatchListTests(BaseApiTest):
async def get_application(self, loop):
self.set_status_and_connection(
Message.CLIENT_BATCH_LIST_REQUEST,
client_pb2.ClientBatchListRequest,
client_pb2.ClientBatchListResponse)
handlers = self.build_handlers(loop, self.connection)
return self.build_app(loop, '/batches', handlers.list_batches)
@unittest_run_loop
async def test_batch_list(self):
"""Verifies a GET /batches without parameters works properly.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids of '2', '1', and '0'
It should send a Protobuf request with:
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of '2'
- a link property that ends in '/batches?head=2'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '2', '1', and '0'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('2', '1', '0')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response, '/batches?head=2')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '2', '1', '0')
@unittest_run_loop
async def test_batch_list_with_validator_error(self):
"""Verifies a GET /batches with a validator error breaks properly.
It will receive a Protobuf response with:
- a status of INTERNAL_ERROR
It should send back a JSON response with:
- a status of 500
- an error property with a code of 10
"""
self.connection.preset_response(self.status.INTERNAL_ERROR)
response = await self.get_assert_status('/batches', 500)
self.assert_has_valid_error(response, 10)
@unittest_run_loop
async def test_batch_list_with_no_genesis(self):
"""Verifies a GET /batches with validator not ready breaks properly.
It will receive a Protobuf response with:
- a status of NOT_READY
It should send back a JSON response with:
- a status of 503
- an error property with a code of 15
"""
self.connection.preset_response(self.status.NOT_READY)
response = await self.get_assert_status('/batches', 503)
self.assert_has_valid_error(response, 15)
@unittest_run_loop
async def test_batch_list_with_head(self):
"""Verifies a GET /batches with a head parameter works properly.
It will receive a Protobuf response with:
- a head id of '1'
- a paging response with a start of 0, and 2 total resources
- two batches with ids of 1' and '0'
It should send a Protobuf request with:
- a head_id property of '1'
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of '1'
- a link property that ends in '/batches?head=1'
- a paging property that matches the paging response
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids '1' and '0'
"""
paging = Mocks.make_paging_response(0, 2)
batches = Mocks.make_batches('1', '0')
self.connection.preset_response(head_id='1', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?head=1')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(head_id='1', paging=controls)
self.assert_has_valid_head(response, '1')
self.assert_has_valid_link(response, '/batches?head=1')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], '1', '0')
@unittest_run_loop
async def test_batch_list_with_bad_head(self):
"""Verifies a GET /batches with a bad head breaks properly.
It will receive a Protobuf response with:
- a status of NO_ROOT
It should send back a JSON response with:
- a response status of 404
- an error property with a code of 50
"""
self.connection.preset_response(self.status.NO_ROOT)
response = await self.get_assert_status('/batches?head=bad', 404)
self.assert_has_valid_error(response, 50)
@unittest_run_loop
async def test_batch_list_with_ids(self):
"""Verifies GET /batches with an id filter works properly.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 2 total resources
- two batches with ids of '0' and '2'
It should send a Protobuf request with:
- a batch_ids property of ['0', '2']
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of '2', the latest
- a link property that ends in '/batches?head=2&id=0,2'
- a paging property that matches the paging response
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids '0' and '2'
"""
paging = Mocks.make_paging_response(0, 2)
batches = Mocks.make_batches('0', '2')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?id=0,2')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(batch_ids=['0', '2'], paging=controls)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response, '/batches?head=2&id=0,2')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], '0', '2')
@unittest_run_loop
async def test_batch_list_with_bad_ids(self):
"""Verifies GET /batches with a bad id filter breaks properly.
It will receive a Protobuf response with:
- a status of NO_RESOURCE
- a head id of '2'
It should send back a JSON response with:
- a response status of 200
- a head property of '2', the latest
- a link property that ends in '/batches?head=2&id=bad,notgood'
- a paging property with only a total_count of 0
- a data property that is an empty list
"""
paging = Mocks.make_paging_response(None, 0)
self.connection.preset_response(
self.status.NO_RESOURCE,
head_id='2',
paging=paging)
response = await self.get_assert_200('/batches?id=bad,notgood')
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response, '/batches?head=2&id=bad,notgood')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 0)
@unittest_run_loop
async def test_batch_list_with_head_and_ids(self):
"""Verifies GET /batches with head and id parameters work properly.
It should send a Protobuf request with:
- a head_id property of '1'
- a paging response with a start of 0, and 1 total resource
- a batch_ids property of ['0']
It will receive a Protobuf response with:
- a head id of '1'
- one batch with an id of '0'
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of '1'
- a link property that ends in '/batches?head=1&id=0'
- a paging property that matches the paging response
- a data property that is a list of 1 dict
- and that dict is a full batch with an id of '0'
"""
paging = Mocks.make_paging_response(0, 1)
batches = Mocks.make_batches('0')
self.connection.preset_response(head_id='1', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?id=0&head=1')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(
head_id='1',
batch_ids=['0'],
paging=controls)
self.assert_has_valid_head(response, '1')
self.assert_has_valid_link(response, '/batches?head=1&id=0')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 1)
self.assert_batches_well_formed(response['data'], '0')
@unittest_run_loop
async def test_batch_list_paginated(self):
"""Verifies GET /batches paginated by min id works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with a start of 1, and 4 total resources
- one batch with the id 'c'
It should send a Protobuf request with:
- paging controls with a count of 1, and a start_index of 1
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&min=1&count=1'
- paging that matches the response, with next and previous links
- a data property that is a list of 1 dict
- and that dict is a full batch with the id 'c'
"""
paging = Mocks.make_paging_response(1, 4)
batches = Mocks.make_batches('c')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?min=1&count=1')
controls = Mocks.make_paging_controls(1, start_index=1)
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&min=1&count=1')
self.assert_has_valid_paging(response, paging,
'/batches?head=d&min=2&count=1',
'/batches?head=d&min=0&count=1')
self.assert_has_valid_data_list(response, 1)
self.assert_batches_well_formed(response['data'], 'c')
@unittest_run_loop
async def test_batch_list_with_zero_count(self):
"""Verifies a GET /batches with a count of zero breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 53
"""
response = await self.get_assert_status('/batches?min=2&count=0', 400)
self.assert_has_valid_error(response, 53)
@unittest_run_loop
async def test_batch_list_with_bad_paging(self):
"""Verifies a GET /batches with a bad paging breaks properly.
It will receive a Protobuf response with:
- a status of INVALID_PAGING
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 54
"""
self.connection.preset_response(self.status.INVALID_PAGING)
response = await self.get_assert_status('/batches?min=-1', 400)
self.assert_has_valid_error(response, 54)
@unittest_run_loop
async def test_batch_list_paginated_with_just_count(self):
"""Verifies GET /batches paginated just by count works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with a start of 0, and 4 total resources
- two batches with the ids 'd' and 'c'
It should send a Protobuf request with:
- paging controls with a count of 2
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&count=2'
- paging that matches the response with a next link
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids 'd' and 'c'
"""
paging = Mocks.make_paging_response(0, 4)
batches = Mocks.make_batches('d', 'c')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?count=2')
controls = Mocks.make_paging_controls(2)
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&count=2')
self.assert_has_valid_paging(response, paging,
'/batches?head=d&min=2&count=2')
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], 'd', 'c')
@unittest_run_loop
async def test_batch_list_paginated_without_count(self):
"""Verifies GET /batches paginated without count works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with a start of 2, and 4 total resources
- two batches with the ids 'b' and 'a'
It should send a Protobuf request with:
- paging controls with a start_index of 2
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&min=2'
- paging that matches the response, with a previous link
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids 'd' and 'c'
"""
paging = Mocks.make_paging_response(2, 4)
batches = Mocks.make_batches('b', 'a')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?min=2')
controls = Mocks.make_paging_controls(None, start_index=2)
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&min=2')
self.assert_has_valid_paging(response, paging,
previous_link='/batches?head=d&min=0&count=2')
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], 'b', 'a')
@unittest_run_loop
async def test_batch_list_paginated_by_min_id(self):
"""Verifies GET /batches paginated by a min id works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with:
* a start_index of 1
* total_resources of 4
* a previous_id of 'd'
- three batches with the ids 'c', 'b' and 'a'
It should send a Protobuf request with:
- paging controls with a count of 5, and a start_id of 'c'
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&min=c&count=5'
- paging that matches the response, with a previous link
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids 'c', 'b', and 'a'
"""
paging = Mocks.make_paging_response(1, 4, previous_id='d')
batches = Mocks.make_batches('c', 'b', 'a')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?min=c&count=5')
controls = Mocks.make_paging_controls(5, start_id='c')
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&min=c&count=5')
self.assert_has_valid_paging(response, paging,
previous_link='/batches?head=d&max=d&count=5')
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], 'c', 'b', 'a')
@unittest_run_loop
async def test_batch_list_paginated_by_max_id(self):
"""Verifies GET /batches paginated by a max id works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with:
* a start_index of 1
* a total_resources of 4
* a previous_id of 'd'
* a next_id of 'a'
- two batches with the ids 'c' and 'b'
It should send a Protobuf request with:
- paging controls with a count of 2, and an end_id of 'b'
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&max=b&count=2'
- paging that matches the response, with next and previous links
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids 'c' and 'b'
"""
paging = Mocks.make_paging_response(1, 4, previous_id='d', next_id='a')
batches = Mocks.make_batches('c', 'b')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?max=b&count=2')
controls = Mocks.make_paging_controls(2, end_id='b')
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&max=b&count=2')
self.assert_has_valid_paging(response, paging,
'/batches?head=d&min=a&count=2',
'/batches?head=d&max=d&count=2')
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], 'c', 'b')
@unittest_run_loop
async def test_batch_list_paginated_by_max_index(self):
"""Verifies GET /batches paginated by a max index works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with a start of 0, and 4 total resources
- three batches with the ids 'd', 'c' and 'b'
It should send a Protobuf request with:
- paging controls with a count of 3, and an start_index of 0
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&min=3&count=7'
- paging that matches the response, with a next link
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids 'd', 'c', and 'b'
"""
paging = Mocks.make_paging_response(0, 4)
batches = Mocks.make_batches('d', 'c', 'b')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?max=2&count=7')
controls = Mocks.make_paging_controls(3, start_index=0)
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&max=2&count=7')
self.assert_has_valid_paging(response, paging,
'/batches?head=d&min=3&count=7')
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], 'd', 'c', 'b')
@unittest_run_loop
async def test_batch_list_sorted(self):
"""Verifies GET /batches can send proper sort controls.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '0', '1', and '2'
It should send a Protobuf request with:
- empty paging controls
- sort controls with a key of 'header_signature'
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- a link property ending in '/batches?head=2&sort=header_signature'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '0', '1', and '2'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('0', '1', '2')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?sort=header_signature')
page_controls = Mocks.make_paging_controls()
sorting = Mocks.make_sort_controls('header_signature')
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=header_signature')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '0', '1', '2')
@unittest_run_loop
async def test_batch_list_with_bad_sort(self):
"""Verifies a GET /batches with a bad sort breaks properly.
It will receive a Protobuf response with:
- a status of INVALID_PAGING
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 57
"""
self.connection.preset_response(self.status.INVALID_SORT)
response = await self.get_assert_status('/batches?sort=bad', 400)
self.assert_has_valid_error(response, 57)
@unittest_run_loop
async def test_batch_list_sorted_with_nested_keys(self):
"""Verifies GET /batches can send proper sort controls with nested keys.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '0', '1', and '2'
It should send a Protobuf request with:
- empty paging controls
- sort controls with keys of 'header' and 'signer_pubkey'
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- a link ending in '/batches?head=2&sort=header.signer_pubkey'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '0', '1', and '2'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('0', '1', '2')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200(
'/batches?sort=header.signer_pubkey')
page_controls = Mocks.make_paging_controls()
sorting = Mocks.make_sort_controls('header', 'signer_pubkey')
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=header.signer_pubkey')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '0', '1', '2')
@unittest_run_loop
async def test_batch_list_sorted_in_reverse(self):
"""Verifies a GET /batches can send proper sort parameters.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '2', '1', and '0'
It should send a Protobuf request with:
- empty paging controls
- sort controls with a key of 'header_signature' that is reversed
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- a link property ending in '/batches?head=2&sort=-header_signature'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '2', '1', and '0'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('2', '1', '0')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?sort=-header_signature')
page_controls = Mocks.make_paging_controls()
sorting = Mocks.make_sort_controls(
'header_signature', reverse=True)
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=-header_signature')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '2', '1', '0')
@unittest_run_loop
async def test_batch_list_sorted_by_length(self):
"""Verifies a GET /batches can send proper sort parameters.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '0', '1', and '2'
It should send a Protobuf request with:
- empty paging controls
- sort controls with a key of 'transactions' sorted by length
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- a link property ending in '/batches?head=2&sort=transactions.length'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '0', '1', and '2'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('0', '1', '2')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?sort=transactions.length')
page_controls = Mocks.make_paging_controls()
sorting = Mocks.make_sort_controls('transactions', compare_length=True)
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=transactions.length')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '0', '1', '2')
@unittest_run_loop
async def test_batch_list_sorted_by_many_keys(self):
"""Verifies a GET /batches can send proper sort parameters.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '2', '1', and '0'
It should send a Protobuf request with:
- empty paging controls
- multiple sort controls with:
* a key of 'header_signature' that is reversed
* a key of 'transactions' that is sorted by length
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- link with '/batches?head=2&sort=-header_signature,transactions.length'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '2', '1', and '0'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('2', '1', '0')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200(
'/batches?sort=-header_signature,transactions.length')
page_controls = Mocks.make_paging_controls()
sorting = (Mocks.make_sort_controls('header_signature', reverse=True) +
Mocks.make_sort_controls('transactions', compare_length=True))
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=-header_signature,transactions.length')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '2', '1', '0')
class BatchGetTests(BaseApiTest):
async def get_application(self, loop):
self.set_status_and_connection(
Message.CLIENT_BATCH_GET_REQUEST,
client_pb2.ClientBatchGetRequest,
client_pb2.ClientBatchGetResponse)
handlers = self.build_handlers(loop, self.connection)
return self.build_app(loop, '/batches/{batch_id}', handlers.fetch_batch)
@unittest_run_loop
async def test_batch_get(self):
"""Verifies a GET /batches/{batch_id} works properly.
It should send a Protobuf request with:
- a batch_id property of '1'
It will receive a Protobuf response with:
- a batch with an id of '1'
It should send back a JSON response with:
- a response status of 200
- no head property
- a link property that ends in '/batches/1'
- a data property that is a full batch with an id of '1'
"""
self.connection.preset_response(batch=Mocks.make_batches('1')[0])
response = await self.get_assert_200('/batches/1')
self.connection.assert_valid_request_sent(batch_id='1')
self.assertNotIn('head', response)
self.assert_has_valid_link(response, '/batches/1')
self.assertIn('data', response)
self.assert_batches_well_formed(response['data'], '1')
@unittest_run_loop
async def test_batch_get_with_validator_error(self):
"""Verifies GET /batches/{batch_id} w/ validator error breaks properly.
It will receive a Protobuf response with:
- a status of INTERNAL_ERROR
It should send back a JSON response with:
- a status of 500
- an error property with a code of 10
"""
self.connection.preset_response(self.status.INTERNAL_ERROR)
response = await self.get_assert_status('/batches/1', 500)
self.assert_has_valid_error(response, 10)
@unittest_run_loop
async def test_batch_get_with_bad_id(self):
"""Verifies a GET /batches/{batch_id} with unfound id breaks properly.
It will receive a Protobuf response with:
- a status of NO_RESOURCE
It should send back a JSON response with:
- a response status of 404
- an error property with a code of 71
"""
self.connection.preset_response(self.status.NO_RESOURCE)
response = await self.get_assert_status('/batches/bad', 404)
self.assert_has_valid_error(response, 71)
| 42.947704
| 88
| 0.63378
| 4,625
| 33,671
| 4.441297
| 0.048649
| 0.023611
| 0.0462
| 0.06397
| 0.920062
| 0.897717
| 0.870308
| 0.828246
| 0.787401
| 0.755319
| 0
| 0.021798
| 0.279261
| 33,671
| 783
| 89
| 43.002554
| 0.824625
| 0.0188
| 0
| 0.509677
| 0
| 0
| 0.087896
| 0.056018
| 0
| 0
| 0
| 0
| 0.425806
| 1
| 0
| false
| 0
| 0.012903
| 0
| 0.025806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c0552ff9a7e2dd3772ef8d801d301edb2c274a5
| 9,292
|
py
|
Python
|
tests/integrations/test_events_views.py
|
satyaakam/fossevents.in
|
b5f1dcae56d3af35beea9e25fbcdaf4735ce0192
|
[
"MIT"
] | 17
|
2015-07-08T10:41:59.000Z
|
2020-04-01T00:25:50.000Z
|
tests/integrations/test_events_views.py
|
OmairK/fossevents.in
|
db73b08d2f058a94054184150198bfbaeb1f21a9
|
[
"MIT"
] | 58
|
2015-04-06T12:32:54.000Z
|
2021-06-10T20:38:18.000Z
|
tests/integrations/test_events_views.py
|
OmairK/fossevents.in
|
db73b08d2f058a94054184150198bfbaeb1f21a9
|
[
"MIT"
] | 34
|
2015-04-28T09:40:12.000Z
|
2021-03-29T04:25:55.000Z
|
import pytest
from django.core.urlresolvers import reverse
from django.utils import timezone
from fossevents.events.services import get_event_review_url
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_homepage(client):
event = f.EventFactory(is_published=False)
event2 = f.EventFactory(is_published=False, start_date=timezone.now()-timezone.timedelta(days=9),
end_date=timezone.now()-timezone.timedelta(days=8))
url = reverse('home')
response = client.get(url)
assert response.status_code == 200
# should have 'events' in the template context
assert 'events' in response.context
assert 'upcoming_events' in response.context
assert 'past_events' in response.context
# should not display any event, if none are published
assert len(response.context['events']) == 0
assert len(response.context['upcoming_events']) == 0
assert len(response.context['past_events']) == 0
# should now contain one event, after it's published
event.is_published = True
event.save()
event2.is_published = True
event2.save()
response = client.get(url)
assert len(response.context['events']) == 0
assert len(response.context['upcoming_events']) == 1
assert len(response.context['past_events']) == 1
assert response.context['upcoming_events'][0].id == event.id
assert response.context['past_events'][0].id == event2.id
def test_homepage_search(client):
event = f.EventFactory(is_published=True, name='test_event')
f.EventFactory(is_published=True, start_date=timezone.now()-timezone.timedelta(days=9),
end_date=timezone.now()-timezone.timedelta(days=8))
url = reverse('home')
response = client.get(url, {'q': 'test'})
assert response.status_code == 200
# should have 'events' in the template context
assert 'events' in response.context
assert 'upcoming_events' in response.context
assert 'past_events' in response.context
assert len(response.context['events']) == 1
assert len(response.context['upcoming_events']) == 0
assert len(response.context['past_events']) == 0
assert response.context['events'][0].id == event.id
def test_event_create(client, mocker):
url = reverse('events:create')
data = {
'name': 'Event01',
'description': 'Event01 description',
'start_date': '2016-08-12',
'end_date': '2016-08-13',
'homepage': 'http://example.com',
'owner_email': 'test@example.com'
}
response = client.post(url, data)
assert response.status_code == 302
def test_event_create_without_url_scheme(client, mocker):
url = reverse('events:create')
data = {
'name': 'Event01',
'description': 'Event01 description',
'start_date': '2016-08-12 21:00',
'end_date': '2016-08-13 18:00',
'homepage': 'example.com',
'owner_email': 'test@example.com'
}
response = client.post(url, data)
assert response.status_code == 302
EventErrorCasesData = [
({}, 'name'),
({
# Name required field
'name': '',
'description': 'Event01 description',
'start_date': '2016-08-12',
'end_date': '2016-08-13',
'homepage': 'http://example.com',
'owner_email': 'test@example.com'
}, 'name'),
({
# Description required field
'name': 'Event01',
'description': '',
'start_date': '2016-08-12',
'end_date': '2016-08-13',
'homepage': 'http://example.com',
'owner_email': 'test@example.com'
}, 'description'),
({
# Start date required field
'name': 'Event01',
'description': 'Event01 description',
'start_date': '',
'end_date': '2016-08-13',
'homepage': 'http://example.com',
'owner_email': 'test@example.com'
}, 'start_date'),
({
# End date required field
'name': 'Event01',
'description': 'Event01 description',
'start_date': '2016-08-12',
'end_date': '',
'homepage': 'http://example.com',
'owner_email': 'test@example.com'
}, 'end_date'),
({
# Format of start date
'name': 'Event01',
'description': 'Event01 description',
'start_date': '12-08-2016',
'end_date': '2016-08-13',
'homepage': 'http://example.com',
'owner_email': 'test@example.com'
}, 'start_date'),
({
# Format of end date
'name': 'Event01',
'description': 'Event01 description',
'start_date': '2016-08-12',
'end_date': '13-08-2016',
'homepage': 'http://example.com',
'owner_email': 'test@example.com'
}, 'end_date'),
({
# End date should be greater than start date
'name': 'Event01',
'description': 'Event01 description',
'start_date': '2016-08-12',
'end_date': '2016-08-11',
'homepage': 'http://example.com',
'owner_email': 'test@example.com'
}, 'end_date'),
({
# Invalid url
'name': 'Event01',
'description': 'Event01 description',
'start_date': '2016-08-12',
'end_date': '2016-08-11',
'homepage': 'example',
'owner_email': 'test@example.com'
}, 'homepage'),
({
# Owner email required field
'name': 'Event01',
'description': 'Event01 description',
'start_date': '2016-08-12',
'end_date': '2016-08-13',
'homepage': 'http://example.com',
'owner_email': ''
}, 'owner_email'),
]
@pytest.mark.parametrize("test_data,error_field", EventErrorCasesData)
def test_event_create_error(test_data, error_field, client):
url = reverse('events:create')
response = client.post(url, test_data)
assert response.status_code == 200
assert len(response.context['form'][error_field].errors)
def test_event_detail_anonymous_user(client):
event = f.EventFactory(is_published=False)
url = event.get_absolute_url()
response = client.get(url)
assert response.status_code == 200
assert not response.context[0].get('form', None)
def test_event_detail_user(client):
event = f.EventFactory(is_published=False)
user = f.UserFactory()
client.login(user=user)
url = event.get_absolute_url()
response = client.get(url)
assert response.status_code == 200
assert not response.context[0].get('form', None)
def test_event_detail_moderator(client):
event = f.EventFactory(is_published=False)
user = f.UserFactory(is_moderator=True)
client.login(user=user)
url = event.get_absolute_url()
response = client.get(url)
assert response.status_code == 200
assert response.context[0].get('form', None)
def test_event_detail_staff(client):
event = f.EventFactory(is_published=False)
user = f.UserFactory(is_staff=True)
client.login(user=user)
url = event.get_absolute_url()
response = client.get(url)
assert response.status_code == 200
assert not response.context[0].get('form', None)
def test_event_detail_admin(client):
event = f.EventFactory(is_published=False)
user = f.UserFactory(is_superuser=True)
client.login(user=user)
url = event.get_absolute_url()
response = client.get(url)
assert response.status_code == 200
assert not response.context[0].get('form', None)
def test_event_review(client):
event = f.EventFactory(is_published=False)
user = f.UserFactory(is_moderator=True)
client.login(user=user)
home_url = reverse('home')
response = client.get(home_url)
# should not display any event, if none are published
assert len(response.context['events']) == 0
assert len(response.context['upcoming_events']) == 0
assert len(response.context['past_events']) == 0
url = get_event_review_url(event)
data = {
'is_approved': 'true',
'comment': 'Approving event'
}
response = client.post(url, data)
assert response.status_code == 302
# Event get visible after review
response = client.get(home_url)
assert len(response.context['events']) == 0
assert len(response.context['upcoming_events']) == 1
assert len(response.context['past_events']) == 0
assert response.context['upcoming_events'][0].id == event.id
def test_event_review_reject(client):
event = f.EventFactory(is_published=True)
user = f.UserFactory(is_moderator=True)
client.login(user=user)
home_url = reverse('home')
response = client.get(home_url)
assert len(response.context['events']) == 0
assert len(response.context['upcoming_events']) == 1
assert len(response.context['past_events']) == 0
assert response.context['upcoming_events'][0].id == event.id
url = get_event_review_url(event)
data = {
'is_approved': 'false',
'comment': 'Rejecting event'
}
response = client.post(url, data)
assert response.status_code == 302
# Event get visible after review
response = client.get(home_url)
assert len(response.context['events']) == 0
assert len(response.context['upcoming_events']) == 0
assert len(response.context['past_events']) == 0
| 32.152249
| 101
| 0.634417
| 1,126
| 9,292
| 5.089698
| 0.110124
| 0.099459
| 0.065259
| 0.092131
| 0.842436
| 0.815215
| 0.802478
| 0.781888
| 0.766533
| 0.734078
| 0
| 0.039099
| 0.22105
| 9,292
| 288
| 102
| 32.263889
| 0.752694
| 0.0565
| 0
| 0.719298
| 0
| 0
| 0.229257
| 0.0024
| 0
| 0
| 0
| 0
| 0.219298
| 1
| 0.052632
| false
| 0
| 0.02193
| 0
| 0.074561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c10cbba72a486eb062b5c6c1fa15e4c132fef27
| 101
|
py
|
Python
|
04_pytest/test_hello.py
|
covrebo/python100
|
758233f9a52b2ffae8cd5c44e6794aceb1fd1614
|
[
"MIT"
] | null | null | null |
04_pytest/test_hello.py
|
covrebo/python100
|
758233f9a52b2ffae8cd5c44e6794aceb1fd1614
|
[
"MIT"
] | 1
|
2021-05-11T02:03:56.000Z
|
2021-05-11T02:03:56.000Z
|
04_pytest/test_hello.py
|
covrebo/python100
|
758233f9a52b2ffae8cd5c44e6794aceb1fd1614
|
[
"MIT"
] | null | null | null |
from hello import hello_name
def test_hello_name():
assert hello_name('clark') == 'hello, clark'
| 25.25
| 48
| 0.732673
| 15
| 101
| 4.666667
| 0.533333
| 0.385714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148515
| 101
| 4
| 48
| 25.25
| 0.813953
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4c22152568c51a356b5f4a4a4e0df344f06f43f5
| 26,722
|
py
|
Python
|
tests/st/ops/cpu/test_resize_bilinear_op.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/st/ops/cpu/test_resize_bilinear_op.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/st/ops/cpu/test_resize_bilinear_op.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore import context, Tensor
from mindspore.ops import operations as P
from mindspore import nn
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class NetResizeBilinear(nn.Cell):
def __init__(self, size=None, align_corner=False):
super(NetResizeBilinear, self).__init__()
self.op = P.ResizeBilinear(size=size, align_corners=align_corner)
def construct(self, inputs):
return self.op(inputs)
def test_resize_nn_grayscale_integer_ratio_half(datatype=np.float16):
input_tensor = Tensor(np.array(
[[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]).astype(datatype))
# larger h and w
resize_nn = NetResizeBilinear((9, 9))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1333, 0.1666, 0.2, 0.2333, 0.2666, 0.3, 0.3, 0.3],
[0.2, 0.2333, 0.2666, 0.2998, 0.3333, 0.3667, 0.4, 0.4, 0.4],
[0.2998, 0.3333, 0.3665, 0.4, 0.433, 0.4668, 0.5, 0.5, 0.5],
[0.4, 0.4333, 0.4666, 0.5, 0.533, 0.567, 0.6, 0.6, 0.6],
[0.5, 0.533, 0.5664, 0.6, 0.6333, 0.667, 0.7, 0.7, 0.7],
[0.6, 0.6333, 0.6665, 0.6997, 0.733, 0.7666, 0.8, 0.8, 0.8],
[0.7, 0.7334, 0.7666, 0.8, 0.833, 0.8667, 0.9, 0.9, 0.9],
[0.7, 0.7334, 0.7666, 0.8, 0.833, 0.8667, 0.9, 0.9, 0.9],
[0.7, 0.7334, 0.7666, 0.8, 0.833, 0.8667, 0.9, 0.9, 0.9]]]]
).astype(np.float16))
error = np.ones(shape=[9, 9]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h and w
resize_nn = NetResizeBilinear((1, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1]]]]).astype(np.float16))
error = np.ones(shape=[1, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, larger w
resize_nn = NetResizeBilinear((1, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.1499, 0.2, 0.25, 0.3, 0.3]]]]).astype(np.float16))
error = np.ones(shape=[1, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, smaller w
resize_nn = NetResizeBilinear((6, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1],
[0.2499],
[0.4],
[0.55],
[0.7],
[0.7]]]]).astype(np.float16))
error = np.ones(shape=[6, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, same w
resize_nn = NetResizeBilinear((1, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.2, 0.3]]]]).astype(np.float16))
error = np.ones(shape=[1, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, same w
resize_nn = NetResizeBilinear((6, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3],
[0.2499, 0.35, 0.4502],
[0.4, 0.5, 0.6],
[0.55, 0.65, 0.75],
[0.7, 0.8, 0.9],
[0.7, 0.8, 0.9]]]]).astype(np.float16))
error = np.ones(shape=[6, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, smaller w
resize_nn = NetResizeBilinear((3, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1],
[0.4],
[0.7]]]]).astype(np.float16))
error = np.ones(shape=[3, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, larger w
resize_nn = NetResizeBilinear((3, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1499, 0.2, 0.25, 0.3, 0.3],
[0.4, 0.45, 0.5, 0.55, 0.6, 0.6],
[0.7, 0.75, 0.8, 0.8496, 0.9, 0.9]]]]).astype(np.float16))
error = np.ones(shape=[3, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same w, same h (identity)
resize_nn = NetResizeBilinear((3, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array(
[[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9]]]]).astype(np.float16))
error = np.ones(shape=[3, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
def test_resize_nn_grayscale_integer_ratio_float(datatype=np.float32):
input_tensor = Tensor(np.array(
[[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]).astype(datatype))
# larger h and w
resize_nn = NetResizeBilinear((9, 9))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.13333334, 0.16666667, 0.2, 0.23333335, 0.26666668, 0.3, 0.3, 0.3],
[0.20000002, 0.23333335, 0.26666668, 0.3, 0.33333337, 0.3666667, 0.40000004,
0.40000004, 0.40000004],
[0.3, 0.33333337, 0.36666667, 0.40000004, 0.43333337, 0.4666667, 0.5, 0.5,
0.5],
[0.4, 0.43333334, 0.46666667, 0.5, 0.53333336, 0.5666667, 0.6, 0.6, 0.6],
[0.5, 0.53333336, 0.56666666, 0.6, 0.6333333, 0.66666675, 0.70000005,
0.70000005, 0.70000005],
[0.6, 0.6333334, 0.6666667, 0.70000005, 0.73333335, 0.7666667, 0.8, 0.8, 0.8],
[0.7, 0.73333335, 0.76666665, 0.8, 0.8333333, 0.8666667, 0.9, 0.9, 0.9],
[0.7, 0.73333335, 0.76666665, 0.8, 0.8333333, 0.8666667, 0.9, 0.9, 0.9],
[0.7, 0.73333335, 0.76666665, 0.8, 0.8333333, 0.8666667, 0.9, 0.9,
0.9]]]]).astype(np.float32))
error = np.ones(shape=[9, 9]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h and w
resize_nn = NetResizeBilinear((1, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1]]]]).astype(np.float32))
error = np.ones(shape=[1, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, larger w
resize_nn = NetResizeBilinear((1, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.15, 0.2, 0.25, 0.3, 0.3]]]]).astype(np.float32))
error = np.ones(shape=[1, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, smaller w
resize_nn = NetResizeBilinear((6, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1], [0.25], [0.4], [0.55], [0.7], [0.7]]]]).astype(np.float32))
error = np.ones(shape=[6, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, same w
resize_nn = NetResizeBilinear((1, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.2, 0.3]]]]).astype(np.float32))
error = np.ones(shape=[1, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, same w
resize_nn = NetResizeBilinear((6, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3],
[0.25, 0.35000002, 0.45000002],
[0.4, 0.5, 0.6],
[0.55, 0.65, 0.75],
[0.7, 0.8, 0.9],
[0.7, 0.8, 0.9]]]]).astype(np.float32))
error = np.ones(shape=[6, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, smaller w
resize_nn = NetResizeBilinear((3, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1], [0.4], [0.7]]]]).astype(np.float32))
error = np.ones(shape=[3, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, larger w
resize_nn = NetResizeBilinear((3, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.15, 0.2, 0.25, 0.3, 0.3],
[0.4, 0.45, 0.5, 0.55, 0.6, 0.6],
[0.7, 0.75, 0.8, 0.85, 0.9, 0.9]]]]).astype(np.float32))
error = np.ones(shape=[3, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same w, same h (identity)
resize_nn = NetResizeBilinear((3, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array(
[[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]).astype(np.float32))
error = np.ones(shape=[3, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
def test_resize_nn_grayscale_not_integer_ratio_half(datatype=np.float16):
input_tensor = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.9, 0.0, 0.1, 0.2]]]]).astype(datatype))
# larger h and w
resize_nn = NetResizeBilinear((7, 7))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1571, 0.2142, 0.2715, 0.3286, 0.3857, 0.4],
[0.2715, 0.3286, 0.3857, 0.4429, 0.5, 0.557, 0.5713],
[0.4429, 0.5, 0.557, 0.6143, 0.6714, 0.7285, 0.7427],
[0.6143, 0.5083, 0.4429, 0.5005, 0.557, 0.6143, 0.6284],
[0.7856, 0.4346, 0.1855, 0.2429, 0.2998, 0.357, 0.3716],
[0.9, 0.3857, 0.01428, 0.0714, 0.1285, 0.1857, 0.2],
[0.9, 0.3857, 0.01428, 0.0714, 0.1285, 0.1857, 0.2]]]]).astype(np.float16))
error = np.ones(shape=[7, 7]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h and w
resize_nn = NetResizeBilinear((2, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.2333, 0.3667],
[0.7, 0.3333, 0.4666]]]]).astype(np.float16))
error = np.ones(shape=[2, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, larger w
resize_nn = NetResizeBilinear((2, 7))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1571, 0.2142, 0.2715, 0.3286, 0.3857, 0.4],
[0.7, 0.4714, 0.3142, 0.3716, 0.4285, 0.4856, 0.5]]]]).astype(np.float16))
error = np.ones(shape=[2, 7]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, smaller w
resize_nn = NetResizeBilinear((5, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2333, 0.3667],
[0.3398, 0.4731, 0.6064],
[0.58, 0.513, 0.6465],
[0.82, 0.1533, 0.2866],
[0.9, 0.03333, 0.1666]]]]).astype(np.float16))
error = np.ones(shape=[5, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, same w
resize_nn = NetResizeBilinear((2, 4))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.7, 0.3, 0.4001, 0.5]]]]).astype(np.float16))
error = np.ones(shape=[2, 4]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, same w
resize_nn = NetResizeBilinear((8, 4))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.2499, 0.35, 0.4502, 0.55],
[0.4, 0.5, 0.6, 0.6997],
[0.55, 0.525, 0.625, 0.7246],
[0.7, 0.3, 0.4001, 0.5],
[0.8496, 0.0752, 0.1753, 0.2754],
[0.9, 0., 0.1, 0.2],
[0.9, 0., 0.1, 0.2]]]]).astype(np.float16))
error = np.ones(shape=[8, 4]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, smaller w
resize_nn = NetResizeBilinear((3, 2))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.3],
[0.5, 0.7],
[0.9, 0.1]]]]).astype(np.float16))
error = np.ones(shape=[3, 2]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, larger w
resize_nn = NetResizeBilinear((3, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1666, 0.2333, 0.3, 0.3667, 0.4],
[0.5, 0.567, 0.6333, 0.7, 0.7666, 0.8],
[0.9, 0.3003, 0.03333, 0.1, 0.1666, 0.2]]]]).astype(np.float16))
error = np.ones(shape=[3, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same w, same h (identity)
resize_nn = NetResizeBilinear((3, 4))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.9, 0., 0.1, 0.2]]]]).astype(np.float16))
error = np.ones(shape=[3, 4]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
def test_resize_nn_grayscale_not_integer_ratio_float(datatype=np.float32):
input_tensor = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.9, 0.0, 0.1, 0.2]]]]).astype(datatype))
# larger h and w
resize_nn = NetResizeBilinear((7, 7))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.15714286, 0.21428573, 0.27142859, 0.32857144, 0.3857143, 0.4],
[0.27142859, 0.32857144, 0.38571432, 0.44285715, 0.5, 0.55714285, 0.5714286],
[0.44285715, 0.5, 0.5571429, 0.6142857, 0.67142856, 0.7285714, 0.74285716],
[0.6142857, 0.5081633, 0.4428572, 0.5, 0.55714285, 0.6142857, 0.62857145],
[0.78571427, 0.43469384, 0.1857143, 0.24285716, 0.3, 0.35714287, 0.37142855],
[0.9, 0.38571423, 0.01428572, 0.07142859, 0.12857144, 0.1857143, 0.2],
[0.9, 0.38571423, 0.01428572, 0.07142859, 0.12857144, 0.1857143,
0.2]]]]).astype(np.float32))
error = np.ones(shape=[7, 7]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h and w
resize_nn = NetResizeBilinear((2, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.23333335, 0.36666667],
[0.7, 0.33333334, 0.46666667]]]]).astype(np.float32))
error = np.ones(shape=[2, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, larger w
resize_nn = NetResizeBilinear((2, 7))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.15714286, 0.21428573, 0.27142859, 0.32857144,
0.3857143, 0.4],
[0.7, 0.47142854, 0.31428576, 0.37142858, 0.42857143,
0.4857143, 0.5]]]]).astype(np.float32))
error = np.ones(shape=[2, 7]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, smaller w
resize_nn = NetResizeBilinear((5, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.23333335, 0.36666667],
[0.34, 0.47333336, 0.6066667],
[0.58000004, 0.5133333, 0.64666665],
[0.82000005, 0.1533333, 0.28666663],
[0.9, 0.03333334, 0.16666669]]]]).astype(np.float32))
error = np.ones(shape=[5, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, same w
resize_nn = NetResizeBilinear((2, 4))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.7, 0.3, 0.4, 0.5]]]]).astype(np.float32))
error = np.ones(shape=[2, 4]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, same w
resize_nn = NetResizeBilinear((8, 4))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.25, 0.35000002, 0.45, 0.55],
[0.4, 0.5, 0.6, 0.70000005],
[0.55, 0.52500004, 0.625, 0.725],
[0.7, 0.3, 0.4, 0.5],
[0.84999996, 0.07499999,
0.17500001, 0.27499998],
[0.9, 0., 0.1, 0.2],
[0.9, 0., 0.1, 0.2]]]]).astype(np.float32))
error = np.ones(shape=[8, 4]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, smaller w
resize_nn = NetResizeBilinear((3, 2))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.3],
[0.5, 0.7],
[0.9, 0.1]]]]).astype(np.float32))
error = np.ones(shape=[3, 2]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, larger w
resize_nn = NetResizeBilinear((3, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.16666667, 0.23333335, 0.3, 0.36666667, 0.4],
[0.5, 0.56666666, 0.6333333, 0.7, 0.76666665, 0.8],
[0.9, 0.29999995, 0.03333334, 0.1, 0.16666669, 0.2]]]]).astype(np.float32))
error = np.ones(shape=[3, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same w, same h (identity)
resize_nn = NetResizeBilinear((3, 4))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.9, 0., 0.1, 0.2]]]]).astype(np.float32))
error = np.ones(shape=[3, 4]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
def test_resize_nn_grayscale_multiple_images_half(datatype=np.float16):
input_tensor = Tensor(np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]],
[[[0.4, 0.5, 0.6], [0.7, 0.8, 0.9], [0.1, 0.2, 0.3]]],
[[[0.7, 0.8, 0.9], [0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]]]).astype(datatype))
resize_nn = NetResizeBilinear((2, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1499, 0.2, 0.25, 0.3, 0.3],
[0.55, 0.6, 0.65, 0.6997, 0.75, 0.75]]],
[[[0.4, 0.45, 0.5, 0.55, 0.6, 0.6],
[0.4001, 0.45, 0.5, 0.55, 0.6, 0.6]]],
[[[0.7, 0.75, 0.8, 0.8496, 0.9, 0.9],
[0.2499, 0.2998, 0.35, 0.4, 0.4502, 0.4502]]]]).astype(np.float16))
error = np.ones(shape=[3, 3, 2, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
def test_resize_nn_grayscale_multiple_images_float(datatype=np.float32):
input_tensor = Tensor(np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]],
[[[0.4, 0.5, 0.6], [0.7, 0.8, 0.9], [0.1, 0.2, 0.3]]],
[[[0.7, 0.8, 0.9], [0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]]]).astype(datatype))
resize_nn = NetResizeBilinear((2, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.15, 0.2, 0.25, 0.3, 0.3],
[0.55, 0.6, 0.65, 0.70000005, 0.75, 0.75]]],
[[[0.4, 0.45, 0.5, 0.55, 0.6, 0.6],
[0.4, 0.45, 0.5, 0.55, 0.6, 0.6]]],
[[[0.7, 0.75, 0.8, 0.85, 0.9, 0.9],
[0.25, 0.3, 0.35000002, 0.4, 0.45000002, 0.45000002]]]]).astype(np.float32))
error = np.ones(shape=[3, 3, 2, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
def test_resize_nn_grayscale_align_corners_half(datatype=np.float16):
input_tensor = Tensor(
np.array([[[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]]]).astype(datatype))
resize_nn_corners_aligned = NetResizeBilinear(
size=(3, 7), align_corner=True)
output_corners_aligned = resize_nn_corners_aligned(input_tensor)
resize_nn = NetResizeBilinear((3, 7))
output = resize_nn(input_tensor)
expected_output_align = Tensor(np.array([[[[0.1, 0.1499, 0.2, 0.25, 0.3, 0.35, 0.4],
[0.2998, 0.3499, 0.4, 0.4502, 0.5, 0.55, 0.5996],
[0.5, 0.55, 0.6, 0.6504, 0.7, 0.75, 0.8]]]]).astype(np.float16))
expected_output = Tensor(np.array([[[[0.1, 0.1571, 0.2142, 0.2715, 0.3286, 0.3857, 0.4],
[0.3667, 0.4238, 0.481, 0.538, 0.595, 0.6523, 0.6665],
[0.5, 0.557, 0.6143, 0.672, 0.7285, 0.7856, 0.8]]]]).astype(np.float16))
error = np.ones(shape=[3, 7]) * 1.0e-6
diff_align = output_corners_aligned.asnumpy() - expected_output_align.asnumpy()
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
assert np.all(abs(diff_align) < error)
def test_resize_nn_grayscale_align_corners_float(datatype=np.float32):
input_tensor = Tensor(
np.array([[[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]]]).astype(datatype))
resize_nn_corners_aligned = NetResizeBilinear(
size=(3, 7), align_corner=True)
output_corners_aligned = resize_nn_corners_aligned(input_tensor)
resize_nn = NetResizeBilinear((3, 7))
output = resize_nn(input_tensor)
expected_output_align = Tensor(np.array([[[[0.1, 0.15, 0.2, 0.25, 0.3,
0.35000002, 0.4],
[0.3, 0.35000002, 0.40000004, 0.45, 0.5,
0.55, 0.6],
[0.5, 0.55, 0.6, 0.65, 0.7,
0.75, 0.8]]]]).astype(datatype))
expected_output = Tensor(np.array([[[[0.1, 0.15714286, 0.21428573, 0.27142859, 0.32857144,
0.3857143, 0.4],
[0.36666667, 0.42380953, 0.48095244, 0.53809524, 0.5952381,
0.65238094, 0.6666667],
[0.5, 0.55714285, 0.61428577, 0.67142856, 0.7285714,
0.78571427, 0.8]]]]).astype(datatype))
error = np.ones(shape=[3, 7]) * 1.0e-6
diff_align = output_corners_aligned.asnumpy() - expected_output_align.asnumpy()
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
assert np.all(abs(diff_align) < error)
| 48.762774
| 119
| 0.49652
| 3,855
| 26,722
| 3.361868
| 0.083009
| 0.05679
| 0.014352
| 0.054012
| 0.854012
| 0.821373
| 0.821373
| 0.801775
| 0.792747
| 0.77554
| 0
| 0.208147
| 0.331188
| 26,722
| 547
| 120
| 48.85192
| 0.51701
| 0.049061
| 0
| 0.693208
| 0
| 0
| 0.000118
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 1
| 0.023419
| false
| 0
| 0.009368
| 0.002342
| 0.037471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c3fa75233a258cb20bd25b72828f0cf7bf8051c
| 22,230
|
py
|
Python
|
sdk/python/pulumi_aws/mq/broker.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/mq/broker.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/mq/broker.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Broker(pulumi.CustomResource):
apply_immediately: pulumi.Output[bool]
"""
Specifies whether any broker modifications
are applied immediately, or during the next maintenance window. Default is `false`.
"""
arn: pulumi.Output[str]
"""
The ARN of the broker.
"""
auto_minor_version_upgrade: pulumi.Output[bool]
"""
Enables automatic upgrades to new minor versions for brokers, as Apache releases the versions.
"""
broker_name: pulumi.Output[str]
"""
The name of the broker.
"""
configuration: pulumi.Output[dict]
"""
Configuration of the broker. See below.
* `id` (`str`) - The Configuration ID.
* `revision` (`float`) - Revision of the Configuration.
"""
deployment_mode: pulumi.Output[str]
"""
The deployment mode of the broker. Supported: `SINGLE_INSTANCE` and `ACTIVE_STANDBY_MULTI_AZ`. Defaults to `SINGLE_INSTANCE`.
"""
encryption_options: pulumi.Output[dict]
"""
Configuration block containing encryption options. See below.
* `kms_key_id` (`str`) - Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. Requires setting `use_aws_owned_key` to `false`. To perform drift detection when AWS managed CMKs or customer managed CMKs are in use, this value must be configured.
* `useAwsOwnedKey` (`bool`) - Boolean to enable an AWS owned Key Management Service (KMS) Customer Master Key (CMK) that is not in your account. Defaults to `true`. Setting to `false` without configuring `kms_key_id` will create an AWS managed Customer Master Key (CMK) aliased to `aws/mq` in your account.
"""
engine_type: pulumi.Output[str]
"""
The type of broker engine. Currently, Amazon MQ supports only `ActiveMQ`.
"""
engine_version: pulumi.Output[str]
"""
The version of the broker engine. See the [AmazonMQ Broker Engine docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html) for supported versions.
"""
host_instance_type: pulumi.Output[str]
"""
The broker's instance type. e.g. `mq.t2.micro` or `mq.m4.large`
"""
instances: pulumi.Output[list]
"""
A list of information about allocated brokers (both active & standby).
* `instances.0.console_url` - The URL of the broker's [ActiveMQ Web Console](http://activemq.apache.org/web-console.html).
* `instances.0.ip_address` - The IP Address of the broker.
* `instances.0.endpoints` - The broker's wire-level protocol endpoints in the following order & format referenceable e.g. as `instances.0.endpoints.0` (SSL):
* `ssl://broker-id.mq.us-west-2.amazonaws.com:61617`
* `amqp+ssl://broker-id.mq.us-west-2.amazonaws.com:5671`
* `stomp+ssl://broker-id.mq.us-west-2.amazonaws.com:61614`
* `mqtt+ssl://broker-id.mq.us-west-2.amazonaws.com:8883`
* `wss://broker-id.mq.us-west-2.amazonaws.com:61619`
* `consoleUrl` (`str`)
* `endpoints` (`list`)
* `ip_address` (`str`)
"""
logs: pulumi.Output[dict]
"""
Logging configuration of the broker. See below.
* `audit` (`bool`) - Enables audit logging. User management action made using JMX or the ActiveMQ Web Console is logged. Defaults to `false`.
* `general` (`bool`) - Enables general logging via CloudWatch. Defaults to `false`.
"""
maintenance_window_start_time: pulumi.Output[dict]
"""
Maintenance window start time. See below.
* `dayOfWeek` (`str`) - The day of the week. e.g. `MONDAY`, `TUESDAY`, or `WEDNESDAY`
* `timeOfDay` (`str`) - The time, in 24-hour format. e.g. `02:00`
* `timeZone` (`str`) - The time zone, UTC by default, in either the Country/City format, or the UTC offset format. e.g. `CET`
"""
publicly_accessible: pulumi.Output[bool]
"""
Whether to enable connections from applications outside of the VPC that hosts the broker's subnets.
"""
security_groups: pulumi.Output[list]
"""
The list of security group IDs assigned to the broker.
"""
subnet_ids: pulumi.Output[list]
"""
The list of subnet IDs in which to launch the broker. A `SINGLE_INSTANCE` deployment requires one subnet. An `ACTIVE_STANDBY_MULTI_AZ` deployment requires two subnets.
"""
tags: pulumi.Output[dict]
"""
A map of tags to assign to the resource.
"""
users: pulumi.Output[list]
"""
The list of all ActiveMQ usernames for the specified broker. See below.
* `consoleAccess` (`bool`) - Whether to enable access to the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) for the user.
* `groups` (`list`) - The list of groups (20 maximum) to which the ActiveMQ user belongs.
* `password` (`str`) - The password of the user. It must be 12 to 250 characters long, at least 4 unique characters, and must not contain commas.
* `username` (`str`) - The username of the user.
"""
def __init__(__self__, resource_name, opts=None, apply_immediately=None, auto_minor_version_upgrade=None, broker_name=None, configuration=None, deployment_mode=None, encryption_options=None, engine_type=None, engine_version=None, host_instance_type=None, logs=None, maintenance_window_start_time=None, publicly_accessible=None, security_groups=None, subnet_ids=None, tags=None, users=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an MQ Broker Resource. This resources also manages users for the broker.
For more information on Amazon MQ, see [Amazon MQ documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/welcome.html).
Changes to an MQ Broker can occur when you change a
parameter, such as `configuration` or `user`, and are reflected in the next maintenance
window. Because of this, this provider may report a difference in its planning
phase because a modification has not yet taken place. You can use the
`apply_immediately` flag to instruct the service to apply the change immediately
(see documentation below).
> **Note:** using `apply_immediately` can result in a
brief downtime as the broker reboots.
> **Note:** All arguments including the username and password will be stored in the raw state as plain-text.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.mq.Broker("example",
broker_name="example",
configuration={
"id": aws_mq_configuration["test"]["id"],
"revision": aws_mq_configuration["test"]["latest_revision"],
},
engine_type="ActiveMQ",
engine_version="5.15.0",
host_instance_type="mq.t2.micro",
security_groups=[aws_security_group["test"]["id"]],
users=[{
"password": "MindTheGap",
"username": "ExampleUser",
}])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any broker modifications
are applied immediately, or during the next maintenance window. Default is `false`.
:param pulumi.Input[bool] auto_minor_version_upgrade: Enables automatic upgrades to new minor versions for brokers, as Apache releases the versions.
:param pulumi.Input[str] broker_name: The name of the broker.
:param pulumi.Input[dict] configuration: Configuration of the broker. See below.
:param pulumi.Input[str] deployment_mode: The deployment mode of the broker. Supported: `SINGLE_INSTANCE` and `ACTIVE_STANDBY_MULTI_AZ`. Defaults to `SINGLE_INSTANCE`.
:param pulumi.Input[dict] encryption_options: Configuration block containing encryption options. See below.
:param pulumi.Input[str] engine_type: The type of broker engine. Currently, Amazon MQ supports only `ActiveMQ`.
:param pulumi.Input[str] engine_version: The version of the broker engine. See the [AmazonMQ Broker Engine docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html) for supported versions.
:param pulumi.Input[str] host_instance_type: The broker's instance type. e.g. `mq.t2.micro` or `mq.m4.large`
:param pulumi.Input[dict] logs: Logging configuration of the broker. See below.
:param pulumi.Input[dict] maintenance_window_start_time: Maintenance window start time. See below.
:param pulumi.Input[bool] publicly_accessible: Whether to enable connections from applications outside of the VPC that hosts the broker's subnets.
:param pulumi.Input[list] security_groups: The list of security group IDs assigned to the broker.
:param pulumi.Input[list] subnet_ids: The list of subnet IDs in which to launch the broker. A `SINGLE_INSTANCE` deployment requires one subnet. An `ACTIVE_STANDBY_MULTI_AZ` deployment requires two subnets.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
:param pulumi.Input[list] users: The list of all ActiveMQ usernames for the specified broker. See below.
The **configuration** object supports the following:
* `id` (`pulumi.Input[str]`) - The Configuration ID.
* `revision` (`pulumi.Input[float]`) - Revision of the Configuration.
The **encryption_options** object supports the following:
* `kms_key_id` (`pulumi.Input[str]`) - Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. Requires setting `use_aws_owned_key` to `false`. To perform drift detection when AWS managed CMKs or customer managed CMKs are in use, this value must be configured.
* `useAwsOwnedKey` (`pulumi.Input[bool]`) - Boolean to enable an AWS owned Key Management Service (KMS) Customer Master Key (CMK) that is not in your account. Defaults to `true`. Setting to `false` without configuring `kms_key_id` will create an AWS managed Customer Master Key (CMK) aliased to `aws/mq` in your account.
The **logs** object supports the following:
* `audit` (`pulumi.Input[bool]`) - Enables audit logging. User management action made using JMX or the ActiveMQ Web Console is logged. Defaults to `false`.
* `general` (`pulumi.Input[bool]`) - Enables general logging via CloudWatch. Defaults to `false`.
The **maintenance_window_start_time** object supports the following:
* `dayOfWeek` (`pulumi.Input[str]`) - The day of the week. e.g. `MONDAY`, `TUESDAY`, or `WEDNESDAY`
* `timeOfDay` (`pulumi.Input[str]`) - The time, in 24-hour format. e.g. `02:00`
* `timeZone` (`pulumi.Input[str]`) - The time zone, UTC by default, in either the Country/City format, or the UTC offset format. e.g. `CET`
The **users** object supports the following:
* `consoleAccess` (`pulumi.Input[bool]`) - Whether to enable access to the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) for the user.
* `groups` (`pulumi.Input[list]`) - The list of groups (20 maximum) to which the ActiveMQ user belongs.
* `password` (`pulumi.Input[str]`) - The password of the user. It must be 12 to 250 characters long, at least 4 unique characters, and must not contain commas.
* `username` (`pulumi.Input[str]`) - The username of the user.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['apply_immediately'] = apply_immediately
__props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade
if broker_name is None:
raise TypeError("Missing required property 'broker_name'")
__props__['broker_name'] = broker_name
__props__['configuration'] = configuration
__props__['deployment_mode'] = deployment_mode
__props__['encryption_options'] = encryption_options
if engine_type is None:
raise TypeError("Missing required property 'engine_type'")
__props__['engine_type'] = engine_type
if engine_version is None:
raise TypeError("Missing required property 'engine_version'")
__props__['engine_version'] = engine_version
if host_instance_type is None:
raise TypeError("Missing required property 'host_instance_type'")
__props__['host_instance_type'] = host_instance_type
__props__['logs'] = logs
__props__['maintenance_window_start_time'] = maintenance_window_start_time
__props__['publicly_accessible'] = publicly_accessible
if security_groups is None:
raise TypeError("Missing required property 'security_groups'")
__props__['security_groups'] = security_groups
__props__['subnet_ids'] = subnet_ids
__props__['tags'] = tags
if users is None:
raise TypeError("Missing required property 'users'")
__props__['users'] = users
__props__['arn'] = None
__props__['instances'] = None
super(Broker, __self__).__init__(
'aws:mq/broker:Broker',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, apply_immediately=None, arn=None, auto_minor_version_upgrade=None, broker_name=None, configuration=None, deployment_mode=None, encryption_options=None, engine_type=None, engine_version=None, host_instance_type=None, instances=None, logs=None, maintenance_window_start_time=None, publicly_accessible=None, security_groups=None, subnet_ids=None, tags=None, users=None):
"""
Get an existing Broker resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any broker modifications
are applied immediately, or during the next maintenance window. Default is `false`.
:param pulumi.Input[str] arn: The ARN of the broker.
:param pulumi.Input[bool] auto_minor_version_upgrade: Enables automatic upgrades to new minor versions for brokers, as Apache releases the versions.
:param pulumi.Input[str] broker_name: The name of the broker.
:param pulumi.Input[dict] configuration: Configuration of the broker. See below.
:param pulumi.Input[str] deployment_mode: The deployment mode of the broker. Supported: `SINGLE_INSTANCE` and `ACTIVE_STANDBY_MULTI_AZ`. Defaults to `SINGLE_INSTANCE`.
:param pulumi.Input[dict] encryption_options: Configuration block containing encryption options. See below.
:param pulumi.Input[str] engine_type: The type of broker engine. Currently, Amazon MQ supports only `ActiveMQ`.
:param pulumi.Input[str] engine_version: The version of the broker engine. See the [AmazonMQ Broker Engine docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html) for supported versions.
:param pulumi.Input[str] host_instance_type: The broker's instance type. e.g. `mq.t2.micro` or `mq.m4.large`
:param pulumi.Input[list] instances: A list of information about allocated brokers (both active & standby).
* `instances.0.console_url` - The URL of the broker's [ActiveMQ Web Console](http://activemq.apache.org/web-console.html).
* `instances.0.ip_address` - The IP Address of the broker.
* `instances.0.endpoints` - The broker's wire-level protocol endpoints in the following order & format referenceable e.g. as `instances.0.endpoints.0` (SSL):
* `ssl://broker-id.mq.us-west-2.amazonaws.com:61617`
* `amqp+ssl://broker-id.mq.us-west-2.amazonaws.com:5671`
* `stomp+ssl://broker-id.mq.us-west-2.amazonaws.com:61614`
* `mqtt+ssl://broker-id.mq.us-west-2.amazonaws.com:8883`
* `wss://broker-id.mq.us-west-2.amazonaws.com:61619`
:param pulumi.Input[dict] logs: Logging configuration of the broker. See below.
:param pulumi.Input[dict] maintenance_window_start_time: Maintenance window start time. See below.
:param pulumi.Input[bool] publicly_accessible: Whether to enable connections from applications outside of the VPC that hosts the broker's subnets.
:param pulumi.Input[list] security_groups: The list of security group IDs assigned to the broker.
:param pulumi.Input[list] subnet_ids: The list of subnet IDs in which to launch the broker. A `SINGLE_INSTANCE` deployment requires one subnet. An `ACTIVE_STANDBY_MULTI_AZ` deployment requires two subnets.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
:param pulumi.Input[list] users: The list of all ActiveMQ usernames for the specified broker. See below.
The **configuration** object supports the following:
* `id` (`pulumi.Input[str]`) - The Configuration ID.
* `revision` (`pulumi.Input[float]`) - Revision of the Configuration.
The **encryption_options** object supports the following:
* `kms_key_id` (`pulumi.Input[str]`) - Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. Requires setting `use_aws_owned_key` to `false`. To perform drift detection when AWS managed CMKs or customer managed CMKs are in use, this value must be configured.
* `useAwsOwnedKey` (`pulumi.Input[bool]`) - Boolean to enable an AWS owned Key Management Service (KMS) Customer Master Key (CMK) that is not in your account. Defaults to `true`. Setting to `false` without configuring `kms_key_id` will create an AWS managed Customer Master Key (CMK) aliased to `aws/mq` in your account.
The **instances** object supports the following:
* `consoleUrl` (`pulumi.Input[str]`)
* `endpoints` (`pulumi.Input[list]`)
* `ip_address` (`pulumi.Input[str]`)
The **logs** object supports the following:
* `audit` (`pulumi.Input[bool]`) - Enables audit logging. User management action made using JMX or the ActiveMQ Web Console is logged. Defaults to `false`.
* `general` (`pulumi.Input[bool]`) - Enables general logging via CloudWatch. Defaults to `false`.
The **maintenance_window_start_time** object supports the following:
* `dayOfWeek` (`pulumi.Input[str]`) - The day of the week. e.g. `MONDAY`, `TUESDAY`, or `WEDNESDAY`
* `timeOfDay` (`pulumi.Input[str]`) - The time, in 24-hour format. e.g. `02:00`
* `timeZone` (`pulumi.Input[str]`) - The time zone, UTC by default, in either the Country/City format, or the UTC offset format. e.g. `CET`
The **users** object supports the following:
* `consoleAccess` (`pulumi.Input[bool]`) - Whether to enable access to the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) for the user.
* `groups` (`pulumi.Input[list]`) - The list of groups (20 maximum) to which the ActiveMQ user belongs.
* `password` (`pulumi.Input[str]`) - The password of the user. It must be 12 to 250 characters long, at least 4 unique characters, and must not contain commas.
* `username` (`pulumi.Input[str]`) - The username of the user.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["apply_immediately"] = apply_immediately
__props__["arn"] = arn
__props__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__["broker_name"] = broker_name
__props__["configuration"] = configuration
__props__["deployment_mode"] = deployment_mode
__props__["encryption_options"] = encryption_options
__props__["engine_type"] = engine_type
__props__["engine_version"] = engine_version
__props__["host_instance_type"] = host_instance_type
__props__["instances"] = instances
__props__["logs"] = logs
__props__["maintenance_window_start_time"] = maintenance_window_start_time
__props__["publicly_accessible"] = publicly_accessible
__props__["security_groups"] = security_groups
__props__["subnet_ids"] = subnet_ids
__props__["tags"] = tags
__props__["users"] = users
return Broker(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 61.922006
| 440
| 0.68511
| 2,919
| 22,230
| 5.050017
| 0.126756
| 0.047012
| 0.036904
| 0.024693
| 0.80137
| 0.773014
| 0.759989
| 0.744997
| 0.718337
| 0.705719
| 0
| 0.006878
| 0.215205
| 22,230
| 358
| 441
| 62.094972
| 0.838072
| 0.504274
| 0
| 0.019231
| 1
| 0
| 0.169789
| 0.018733
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0.009615
| 0.057692
| 0.019231
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c428e809ddd02a7490ed7149859b2faba0dbe6a
| 78,449
|
py
|
Python
|
src/openprocurement/tender/openua/tests/lot_blanks.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 10
|
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/tender/openua/tests/lot_blanks.py
|
quintagroup/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 26
|
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/tender/openua/tests/lot_blanks.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 15
|
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from copy import deepcopy
from openprocurement.api.models import get_now
from openprocurement.api.constants import RELEASE_2020_04_19, TWO_PHASE_COMMIT_FROM
from openprocurement.api.utils import parse_date
from openprocurement.tender.core.tests.cancellation import (
activate_cancellation_after_2020_04_19,
)
from openprocurement.tender.belowthreshold.tests.base import (
test_organization, test_author, test_cancellation, test_claim
)
from openprocurement.tender.openua.tests.base import test_bids
# TenderLotResourceTest
def patch_tender_currency(self):
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["value"]["currency"], "UAH")
# update tender currency without mimimalStep currency change
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"value": {"currency": "GBP"}}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": ["currency should be identical to currency of value of tender"],
"location": "body",
"name": "minimalStep",
}
],
)
# update tender currency
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"value": {"currency": "GBP"}, "minimalStep": {"currency": "GBP"}}},
)
self.assertEqual(response.status, "200 OK")
# log currency is updated too
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["value"]["currency"], "GBP")
# try to update lot currency
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"currency": "USD"}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["value"]["currency"], "GBP")
# try to update minimalStep currency
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"minimalStep": {"currency": "USD"}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["minimalStep"]["currency"], "GBP")
# try to update lot minimalStep currency and lot value currency in single request
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"currency": "USD"}, "minimalStep": {"currency": "USD"}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertEqual(lot["value"]["currency"], "GBP")
self.assertEqual(lot["minimalStep"]["currency"], "GBP")
self.set_enquiry_period_end()
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"currency": "USD"}, "minimalStep": {"currency": "USD"}}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
def patch_tender_vat(self):
# set tender VAT
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": True}}},
)
self.assertEqual(response.status, "200 OK")
# create lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertTrue(lot["value"]["valueAddedTaxIncluded"])
# update tender VAT
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(self.tender_id, self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": False}, "minimalStep": {"valueAddedTaxIncluded": False}}},
)
self.assertEqual(response.status, "200 OK")
# log VAT is updated too
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertFalse(lot["value"]["valueAddedTaxIncluded"])
# try to update lot VAT
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": True}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertFalse(lot["value"]["valueAddedTaxIncluded"])
# try to update minimalStep VAT
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"minimalStep": {"valueAddedTaxIncluded": True}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertFalse(lot["minimalStep"]["valueAddedTaxIncluded"])
# try to update minimalStep VAT and value VAT in single request
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": True}, "minimalStep": {"valueAddedTaxIncluded": True}}},
)
self.assertEqual(response.status, "200 OK")
# but the value stays unchanged
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
self.assertFalse(lot["value"]["valueAddedTaxIncluded"])
self.assertEqual(lot["minimalStep"]["valueAddedTaxIncluded"], lot["value"]["valueAddedTaxIncluded"])
self.set_enquiry_period_end()
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lot["id"], self.tender_token),
{"data": {"value": {"currency": "USD"}, "minimalStep": {"currency": "USD"}}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
def get_tender_lot(self):
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
set(response.json["data"]),
set(["status", "date", "description", "title", "minimalStep", "auctionPeriod", "value", "id"]),
)
self.set_status("active.qualification")
response = self.app.get("/tenders/{}/lots/{}".format(self.tender_id, lot["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
api_lot = response.json["data"]
lot.pop("auctionPeriod")
api_lot.pop("auctionPeriod")
self.assertEqual(api_lot, lot)
response = self.app.get("/tenders/{}/lots/some_id".format(self.tender_id), status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(response.json["errors"], [{"description": "Not Found", "location": "url", "name": "lot_id"}])
response = self.app.get("/tenders/some_id/lots/some_id", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
def get_tender_lots(self):
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(self.tender_id, self.tender_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lot = response.json["data"]
response = self.app.get("/tenders/{}/lots".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
set(response.json["data"][0]),
set(["status", "description", "date", "title", "minimalStep", "auctionPeriod", "value", "id"]),
)
self.set_status("active.qualification")
response = self.app.get("/tenders/{}/lots".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
api_lot = response.json["data"][0]
lot.pop("auctionPeriod")
api_lot.pop("auctionPeriod")
self.assertEqual(api_lot, lot)
response = self.app.get("/tenders/some_id/lots", status=404)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
# TenderLotEdgeCasesTest
def question_blocking(self):
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/questions".format(self.tender_id),
{
"data": {
"title": "question title",
"description": "question description",
"questionOf": "lot",
"relatedItem": self.initial_lots[0]["id"],
"author": test_author,
}
},
)
question = response.json["data"]
self.assertEqual(question["questionOf"], "lot")
self.assertEqual(question["relatedItem"], self.initial_lots[0]["id"])
self.set_status(self.question_claim_block_status, extra={"status": "active.tendering"})
self.check_chronograph()
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.json["data"]["status"], "active.tendering")
# cancel lot
cancellation = dict(**test_cancellation)
cancellation.update({
"status": "active",
"cancellationOf": "lot",
"relatedLot": self.initial_lots[0]["id"],
})
response = self.app.post_json(
"/tenders/{}/cancellations?acc_token={}".format(self.tender_id, self.tender_token),
{"data": cancellation},
)
cancellation_id = response.json["data"]["id"]
if get_now() > RELEASE_2020_04_19:
activate_cancellation_after_2020_04_19(self, cancellation_id)
self.check_chronograph()
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.json["data"]["status"], self.question_claim_block_status)
def claim_blocking(self):
self.app.authorization = ("Basic", ("broker", ""))
claim_data = deepcopy(test_claim)
claim_data["relatedLot"] = self.initial_lots[0]["id"]
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{
"data": claim_data
},
)
self.assertEqual(response.status, "201 Created")
complaint = response.json["data"]
self.assertEqual(complaint["relatedLot"], self.initial_lots[0]["id"])
self.set_status(self.question_claim_block_status, extra={"status": "active.tendering"})
self.check_chronograph()
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.json["data"]["status"], "active.tendering")
# cancel lot
cancellation = dict(**test_cancellation)
cancellation.update({
"status": "active",
"cancellationOf": "lot",
"relatedLot": self.initial_lots[0]["id"],
})
response = self.app.post_json(
"/tenders/{}/cancellations?acc_token={}".format(self.tender_id, self.tender_token),
{"data": cancellation},
)
cancellation_id = response.json["data"]["id"]
if get_now() > RELEASE_2020_04_19:
activate_cancellation_after_2020_04_19(self, cancellation_id)
self.check_chronograph()
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.json["data"]["status"], self.question_claim_block_status)
def next_check_value_with_unanswered_question(self):
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/questions".format(self.tender_id),
{
"data": {
"title": "question title",
"description": "question description",
"questionOf": "lot",
"relatedItem": self.initial_lots[0]["id"],
"author": test_author,
}
},
)
question = response.json["data"]
self.assertEqual(question["questionOf"], "lot")
self.assertEqual(question["relatedItem"], self.initial_lots[0]["id"])
self.set_status(self.question_claim_block_status, extra={"status": "active.tendering"})
response = self.check_chronograph()
self.assertEqual(response.json["data"]["status"], "active.tendering")
self.assertNotIn("next_check", response.json["data"])
self.app.authorization = ("Basic", ("broker", ""))
cancellation = dict(**test_cancellation)
cancellation.update({
"status": "active",
"cancellationOf": "lot",
"relatedLot": self.initial_lots[0]["id"],
})
response = self.app.post_json(
"/tenders/{}/cancellations?acc_token={}".format(self.tender_id, self.tender_token),
{"data": cancellation},
)
cancellation_id = response.json["data"]["id"]
if RELEASE_2020_04_19 < get_now():
activate_cancellation_after_2020_04_19(self, cancellation_id)
else:
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertIn("next_check", response.json["data"])
self.assertEqual(
parse_date(response.json["data"]["next_check"]),
parse_date(response.json["data"]["tenderPeriod"]["endDate"])
)
response = self.check_chronograph()
self.assertEqual(response.json["data"]["status"], self.question_claim_block_status)
self.assertIn("next_check", response.json["data"])
self.assertGreater(
parse_date(response.json["data"]["next_check"]),
parse_date(response.json["data"]["tenderPeriod"]["endDate"])
)
def next_check_value_with_unanswered_claim(self):
self.app.authorization = ("Basic", ("broker", ""))
claim = deepcopy(test_claim)
claim["relatedLot"] = self.initial_lots[0]["id"]
response = self.app.post_json(
"/tenders/{}/complaints".format(self.tender_id),
{
"data": claim
},
)
self.assertEqual(response.status, "201 Created")
complaint = response.json["data"]
self.assertEqual(complaint["relatedLot"], self.initial_lots[0]["id"])
self.set_status(self.question_claim_block_status, extra={"status": "active.tendering"})
response = self.check_chronograph()
self.assertEqual(response.json["data"]["status"], "active.tendering")
self.assertNotIn("next_check", response.json["data"])
self.app.authorization = ("Basic", ("broker", ""))
cancellation = dict(**test_cancellation)
cancellation.update({
"status": "active",
"cancellationOf": "lot",
"relatedLot": self.initial_lots[0]["id"],
})
response = self.app.post_json(
"/tenders/{}/cancellations?acc_token={}".format(self.tender_id, self.tender_token),
{"data": cancellation},
)
cancellation_id = response.json["data"]["id"]
if RELEASE_2020_04_19 < get_now():
activate_cancellation_after_2020_04_19(self, cancellation_id)
else:
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertIn("next_check", response.json["data"])
self.assertEqual(
parse_date(response.json["data"]["next_check"]),
parse_date(response.json["data"]["tenderPeriod"]["endDate"])
)
response = self.check_chronograph()
self.assertEqual(response.json["data"]["status"], self.question_claim_block_status)
self.assertIn("next_check", response.json["data"])
self.assertGreater(
parse_date(response.json["data"]["next_check"]),
parse_date(response.json["data"]["tenderPeriod"]["endDate"])
)
# TenderLotBidderResourceTest
def create_tender_bidder_invalid(self):
bid_data = deepcopy(self.test_bids_data[0])
del bid_data["value"]
request_path = f"/tenders/{self.tender_id}/bids"
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": ["This field is required."], "location": "body", "name": "lotValues"}],
)
bid_data["lotValues"] = [{"value": {"amount": 500}}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"relatedLot": ["This field is required."]}],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": "0" * 32}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"relatedLot": ["relatedLot should be one of lots"]}],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 5000000}, "relatedLot": self.initial_lots[0]["id"]}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"value": ["value of bid should be less than value of lot"]}],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 500, "valueAddedTaxIncluded": False}, "relatedLot": self.initial_lots[0]["id"]}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [
{
"value": [
"valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of lot"
]
}
],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 500, "currency": "USD"}, "relatedLot": self.initial_lots[0]["id"]}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"value": ["currency of bid should be identical to currency of value of lot"]}],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": self.initial_lots[0]["id"]}]
bid_data["value"] = {"amount": 500}
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": ["value should be posted for each lot of bid"], "location": "body", "name": "value"}],
)
bid_data["tenderers"] = test_organization
bid_data["tenderers"] = test_organization
del bid_data["value"]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("invalid literal for int() with base 10", response.json["errors"][0]["description"])
def patch_tender_bidder(self):
lot_id = self.initial_lots[0]["id"]
bid_data = deepcopy(self.test_bids_data[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id}]
response = self.app.post_json(
"/tenders/{}/bids".format(self.tender_id),
{"data": bid_data},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
bidder = response.json["data"]
lot = bidder["lotValues"][0]
owner_token = response.json["access"]["token"]
response = self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(self.tender_id, bidder["id"], owner_token),
{"data": {"tenderers": [{"name": "Державне управління управлінням справами"}]}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["lotValues"][0]["date"], lot["date"])
self.assertNotEqual(response.json["data"]["tenderers"][0]["name"], bidder["tenderers"][0]["name"])
response = self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(self.tender_id, bidder["id"], owner_token),
{"data": {"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id}], "tenderers": [test_organization]}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["lotValues"][0]["date"], lot["date"])
self.assertEqual(response.json["data"]["tenderers"][0]["name"], bidder["tenderers"][0]["name"])
response = self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(self.tender_id, bidder["id"], owner_token),
{"data": {"lotValues": [{"value": {"amount": 400}, "relatedLot": lot_id}]}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["lotValues"][0]["value"]["amount"], 400)
self.assertNotEqual(response.json["data"]["lotValues"][0]["date"], lot["date"])
self.set_status("complete")
response = self.app.get("/tenders/{}/bids/{}".format(self.tender_id, bidder["id"]))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["lotValues"][0]["value"]["amount"], 400)
response = self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(self.tender_id, bidder["id"], owner_token),
{"data": {"lotValues": [{"value": {"amount": 500}, "relatedLot": lot_id}]}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Can't update bid in current (complete) tender status")
# TenderLotFeatureBidderResourceTest
def create_tender_bidder_feature_invalid(self):
bid_data = deepcopy(self.test_bids_data[0])
del bid_data["value"]
request_path = "/tenders/{}/bids".format(self.tender_id)
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{"description": ["This field is required."], "location": "body", "name": "lotValues"},
{"description": ["All features parameters is required."], "location": "body", "name": "parameters"},
],
)
bid_data["lotValues"] = [{"value": {"amount": 500}}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"relatedLot": ["This field is required."]}],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": "0" * 32}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"relatedLot": ["relatedLot should be one of lots"]}],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 5000000}, "relatedLot": self.lot_id}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"value": ["value of bid should be less than value of lot"]}],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 500, "valueAddedTaxIncluded": False}, "relatedLot": self.lot_id}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [
{
"value": [
"valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of lot"
]
}
],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 500, "currency": "USD"}, "relatedLot": self.lot_id}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"value": ["currency of bid should be identical to currency of value of lot"]}],
"location": "body",
"name": "lotValues",
}
],
)
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": self.lot_id}]
bid_data["tenderers"] = test_organization
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertIn("invalid literal for int() with base 10", response.json["errors"][0]["description"])
bid_data["tenderers"] = [test_organization]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": self.lot_id}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": ["All features parameters is required."], "location": "body", "name": "parameters"}],
)
bid_data.update({
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
"parameters": [{"code": "code_item", "value": 0.01}],
})
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[{"description": ["All features parameters is required."], "location": "body", "name": "parameters"}],
)
bid_data["parameters"] = [{"code": "code_invalid", "value": 0.01}]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"code": ["code should be one of feature code."]}],
"location": "body",
"name": "parameters",
}
],
)
bid_data["parameters"] = [
{"code": "code_item", "value": 0.01},
{"code": "code_tenderer", "value": 0},
{"code": "code_lot", "value": 0.01},
]
response = self.app.post_json(
request_path,
{"data": bid_data},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": [{"value": ["value should be one of feature value."]}],
"location": "body",
"name": "parameters",
}
],
)
def create_tender_bidder_feature(self):
request_path = "/tenders/{}/bids".format(self.tender_id)
bid_data = deepcopy(self.test_bids_data[0])
bid_data.pop("value", None)
bid_data.update({
"lotValues": [{"value": {"amount": 500}, "relatedLot": self.lot_id}],
"parameters": [
{"code": "code_item", "value": 0.01},
{"code": "code_tenderer", "value": 0.01},
{"code": "code_lot", "value": 0.01},
]
})
response = self.app.post_json(
request_path,
{"data": bid_data},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
bidder = response.json["data"]
self.assertEqual(bidder["tenderers"][0]["name"], test_organization["name"])
self.assertIn("id", bidder)
self.assertIn(bidder["id"], response.headers["Location"])
self.set_status("complete")
response = self.app.post_json(
request_path,
{"data": bid_data},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Can't add bid in current (complete) tender status")
# TenderLotProcessTest
def proc_1lot_1bid(self):
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lot_id = response.json["data"]["id"]
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token), {"data": {"items": [{"relatedLot": lot_id}]}}
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
start_date = get_now() + timedelta(days=self.days_till_auction_starts)
response = self.set_status("active.tendering", {"lots": [{"auctionPeriod": {"startDate": start_date.isoformat()}}]})
self.assertIn("auctionPeriod", response.json["data"]["lots"][0])
# create bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(test_bids[0])
del bid_data["value"]
bid_data["lotValues"] = [{"subcontractingDetails": "test", "value": {"amount": 500}, "relatedLot": lot_id}]
self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
# switch to active.qualification
self.set_status("active.auction", {"lots": [{"auctionPeriod": {"startDate": None}}], "status": "active.tendering"})
response = self.check_chronograph()
self.assertEqual(response.json["data"]["lots"][0]["status"], "unsuccessful")
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_1lot_1bid_patch(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
self.set_initial_status(response.json)
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lot_id = response.json["data"]["id"]
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token), {"data": {"items": [{"relatedLot": lot_id}]}}
)
self.assertEqual(response.status, "200 OK")
# create bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(self.test_bids_data[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id}]
bid, bid_token = self.create_bid(tender_id, bid_data)
bid_id = bid["id"]
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(tender_id, lot_id, owner_token),
{"data": {"value": {"amount": 499}, "minimalStep": {"amount": 14.0}}}
)
self.assertEqual(response.status, "200 OK")
response = self.app.get("/tenders/{}/bids/{}?acc_token={}".format(tender_id, bid_id, bid_token))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["status"], "invalid")
def proc_1lot_2bid(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lot_id = response.json["data"]["id"]
self.initial_lots = [response.json["data"]]
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token), {"data": {"items": [{"relatedLot": lot_id}]}}
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
start_date = get_now() + timedelta(days=self.days_till_auction_starts)
response = self.set_status("active.tendering", {"lots": [{"auctionPeriod": {"startDate": start_date.isoformat()}}]})
self.assertIn("auctionPeriod", response.json["data"]["lots"][0])
# create bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(self.test_bids_data[0])
del bid_data["value"]
bid_data["lotValues"] = [{"subcontractingDetails": "test", "value": {"amount": 450}, "relatedLot": lot_id}]
bid, bid_token = self.create_bid(self.tender_id, bid_data)
bid_id = bid["id"]
# create second bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data["lotValues"] = [{"value": {"amount": 475}, "relatedLot": lot_id}]
self.create_bid(self.tender_id, bid_data)
# switch to active.auction
self.set_status("active.auction")
# get auction info
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.get("/tenders/{}/auction".format(tender_id))
auction_bids_data = response.json["data"]["bids"]
# posting auction urls
self.app.patch_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id),
{
"data": {
"lots": [
{"id": i["id"], "auctionUrl": "https://tender.auction.url"} for i in response.json["data"]["lots"]
],
"bids": [
{
"id": i["id"],
"lotValues": [
{
"relatedLot": j["relatedLot"],
"participationUrl": "https://tender.auction.url/for_bid/{}".format(i["id"]),
}
for j in i["lotValues"]
],
}
for i in auction_bids_data
],
}
},
)
# view bid participationUrl
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/bids/{}?acc_token={}".format(tender_id, bid_id, bid_token))
self.assertEqual(
response.json["data"]["lotValues"][0]["participationUrl"],
"https://tender.auction.url/for_bid/{}".format(bid_id),
)
# posting auction results
self.app.authorization = ("Basic", ("auction", ""))
self.app.post_json("/tenders/{}/auction/{}".format(tender_id, lot_id),
{"data": {"bids": [
{"id": b["id"], "lotValues": [{"relatedLot": l["relatedLot"]} for l in b["lotValues"]]}
for b in auction_bids_data]}})
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending"][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "active", "qualified": True, "eligible": True}},
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("complete", {"status": "active.awarded"})
# time travel
tender = self.db.get(tender_id)
for i in tender.get("awards", []):
i["complaintPeriod"]["endDate"] = i["complaintPeriod"]["startDate"]
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertEqual(response.json["data"]["lots"][0]["status"], "complete")
self.assertEqual(response.json["data"]["status"], "complete")
def proc_1lot_3bid_1un(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lot_id = response.json["data"]["id"]
self.initial_lots = [response.json["data"]]
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token), {"data": {"items": [{"relatedLot": lot_id}]}}
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
start_date = get_now() + timedelta(days=self.days_till_auction_starts)
response = self.set_status("active.tendering", {"lots": [{"auctionPeriod": {"startDate": start_date.isoformat()}}]})
self.assertIn("auctionPeriod", response.json["data"]["lots"][0])
# create bids
bid_data = deepcopy(self.test_bids_data[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 450}, "relatedLot": lot_id}]
bids_data = {}
for i in range(3):
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
bids_data[response.json["data"]["id"]] = response.json["access"]["token"]
response = self.app.patch_json(
"/tenders/{}/lots/{}?acc_token={}".format(tender_id, lot_id, owner_token), {"data": {"value": {"amount": 1000}}}
)
self.assertEqual(response.status, "200 OK")
# create second bid
for bid_id, bid_token in list(bids_data.items())[:-1]:
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(tender_id, bid_id, bid_token), {"data": {"status": "active"}}
)
# bids_data[response.json['data']['id']] = response.json['access']['token']
# switch to active.auction
self.set_status("active.auction")
# get auction info
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.get("/tenders/{}/auction".format(tender_id))
auction_bids_data = response.json["data"]["bids"]
# posting auction urls
auction_data = {
"data": {
"lots": [
{"id": i["id"], "auctionUrl": "https://tender.auction.url"} for i in response.json["data"]["lots"]
],
"bids": [],
}
}
for i in auction_bids_data:
if i.get("status", "active") == "active":
auction_data["data"]["bids"].append(
{
"id": i["id"],
"lotValues": [
{
"relatedLot": j["relatedLot"],
"participationUrl": "https://tender.auction.url/for_bid/{}".format(i["id"]),
}
for j in i["lotValues"]
],
}
)
else:
auction_data["data"]["bids"].append({"id": i["id"]})
self.app.patch_json("/tenders/{}/auction/{}".format(tender_id, lot_id), auction_data)
# posting auction results
self.app.authorization = ("Basic", ("auction", ""))
self.app.post_json("/tenders/{}/auction/{}".format(tender_id, lot_id),
{"data": {"bids": [
{"id": b["id"], "lotValues": [{"relatedLot": l["relatedLot"]} for l in b["lotValues"]]}
for b in auction_bids_data]}})
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending"][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "active", "qualified": True, "eligible": True}},
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("complete", {"status": "active.awarded"})
# time travel
tender = self.db.get(tender_id)
for i in tender.get("awards", []):
i["complaintPeriod"]["endDate"] = i["complaintPeriod"]["startDate"]
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertEqual(response.json["data"]["lots"][0]["status"], "complete")
self.assertEqual(response.json["data"]["status"], "complete")
def proc_2lot_1bid_0com_1can(self):
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
self.set_status(
"active.tendering",
{
"lots": [
{
"auctionPeriod": {
"startDate": (get_now() + timedelta(days=self.days_till_auction_starts)).isoformat()
}
}
for i in lots
]
},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(test_bids[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots]
self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
# switch to active.qualification
self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
response = self.check_chronograph()
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_2lot_2bid_1lot_del(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
self.initial_lots = lots
# add item
self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# switch to active.tendering
start_date = get_now() + timedelta(days=self.days_till_auction_starts)
self.set_status(
"active.tendering", {"lots": [{"auctionPeriod": {"startDate": start_date.isoformat()}} for i in lots]}
)
# create bid
bids = []
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(test_bids[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots]
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
bids.append(response.json)
# create second bid
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
bids.append(response.json)
response = self.app.delete("/tenders/{}/lots/{}?acc_token={}".format(self.tender_id, lots[0], owner_token))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
def proc_2lot_1bid_2com_1win(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
start_date = get_now() + timedelta(days=self.days_till_auction_starts)
self.set_status(
"active.tendering", {"lots": [{"auctionPeriod": {"startDate": start_date.isoformat()}} for i in lots]}
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(test_bids[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots]
self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
# switch to active.qualification
self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
self.check_chronograph()
for lot_id in lots:
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
if len([i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id]) == 0:
return
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("complete", {"status": "active.awarded"})
# time travel
tender = self.db.get(tender_id)
for i in tender.get("awards", []):
i["complaintPeriod"]["endDate"] = i["complaintPeriod"]["startDate"]
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertTrue(all([i["status"] == "complete" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "complete")
def proc_2lot_1bid_0com_0win(self):
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
start_date = get_now() + timedelta(days=self.days_till_auction_starts)
self.set_status(
"active.tendering", {"lots": [{"auctionPeriod": {"startDate": start_date.isoformat()}} for i in lots]}
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(test_bids[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots]
self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
# switch to active.qualification
self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
response = self.check_chronograph()
self.assertTrue(all([i["status"] == "unsuccessful" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_2lot_1bid_1com_1win(self):
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
# add item
self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
start_date = get_now() + timedelta(days=self.days_till_auction_starts)
self.set_status(
"active.tendering", {"lots": [{"auctionPeriod": {"startDate": start_date.isoformat()}} for i in lots]}
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(test_bids[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots]
self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
# switch to active.qualification
self.set_status(
"active.auction", {"lots": [{"auctionPeriod": {"startDate": None}} for i in lots], "status": "active.tendering"}
)
response = self.check_chronograph()
self.assertEqual(response.json["data"]["status"], "unsuccessful")
def proc_2lot_2bid_2com_2win(self):
self.app.authorization = ("Basic", ("broker", ""))
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
self.initial_lots = lots
# add item
self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
start_date = get_now() + timedelta(days=self.days_till_auction_starts)
self.set_status(
"active.tendering", {"lots": [{"auctionPeriod": {"startDate": start_date.isoformat()}} for i in lots]}
)
bid_data = deepcopy(self.test_bids_data[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots]
# create bid
self.app.authorization = ("Basic", ("broker", ""))
self.create_bid(self.tender_id, bid_data)
# create second bid
self.app.authorization = ("Basic", ("broker", ""))
self.create_bid(self.tender_id, bid_data)
# switch to active.auction
self.set_status("active.auction")
# get auction info
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.get("/tenders/{}/auction".format(tender_id))
auction_bids_data = response.json["data"]["bids"]
for lot_id in lots:
# posting auction urls
self.app.patch_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id),
{
"data": {
"lots": [
{"id": i["id"], "auctionUrl": "https://tender.auction.url"}
for i in response.json["data"]["lots"]
],
"bids": [
{
"id": i["id"],
"lotValues": [
{
"relatedLot": j["relatedLot"],
"participationUrl": "https://tender.auction.url/for_bid/{}".format(i["id"]),
}
for j in i["lotValues"]
],
}
for i in auction_bids_data
],
}
},
)
# posting auction results
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.post_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id),
{"data": {"bids": [
{"id": b["id"], "lotValues": [{"relatedLot": l["relatedLot"]} for l in b["lotValues"]]}
for b in auction_bids_data]}}
)
# for first lot
lot_id = lots[0]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "active", "qualified": True, "eligible": True}},
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("complete", {"status": "active.awarded"})
# time travel
tender = self.db.get(tender_id)
for i in tender.get("awards", []):
now = get_now().isoformat()
i["complaintPeriod"] = {"startDate": now, "endDate": now}
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# for second lot
lot_id = lots[1]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as unsuccessful
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}},
)
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "active", "qualified": True, "eligible": True}},
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("complete", {"status": "active.awarded"})
# time travel
tender = self.db.get(tender_id)
for i in tender.get("awards", []):
i["complaintPeriod"]["endDate"] = i["complaintPeriod"]["startDate"]
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertTrue(all([i["status"] == "complete" for i in response.json["data"]["lots"]]))
self.assertEqual(response.json["data"]["status"], "complete")
def lots_features_delete(self):
# create tender
response = self.app.post_json("/tenders", {"data": self.test_features_tender_data})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
tender = response.json["data"]
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
self.set_initial_status(response.json)
self.assertEqual(tender["features"], self.test_features_tender_data["features"])
# add lot
lots = []
for lot in self.test_lots_data * 2:
response = self.app.post_json("/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": lot})
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
lots.append(response.json["data"]["id"])
# add features
self.app.patch_json(
"/tenders/{}?acc_token={}&opt_pretty=1".format(tender["id"], owner_token),
{
"data": {
"features": [
{
"code": "code_item",
"featureOf": "item",
"relatedItem": "1",
"title": "item feature",
"enum": [{"value": 0.01, "title": "good"}, {"value": 0.02, "title": "best"}],
},
{
"code": "code_lot",
"featureOf": "lot",
"relatedItem": lots[1],
"title": "lot feature",
"enum": [{"value": 0.01, "title": "good"}, {"value": 0.02, "title": "best"}],
},
{
"code": "code_tenderer",
"featureOf": "tenderer",
"title": "tenderer feature",
"enum": [{"value": 0.01, "title": "good"}, {"value": 0.02, "title": "best"}],
},
]
}
},
)
# create bid
bid_data = deepcopy(test_bids[0])
del bid_data["value"]
bid_data.update({
"lotValues": [{"value": {"amount": 500}, "relatedLot": lots[1]}],
"parameters": [{"code": "code_lot", "value": 0.01}, {"code": "code_tenderer", "value": 0.01}]
})
response = self.app.post_json(
"/tenders/{}/bids".format(tender_id),
{"data": bid_data},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
bid_id = response.json["data"]["id"]
bid_token = response.json["access"]["token"]
self.set_responses(self.tender_id, response.json)
# delete features
self.app.patch_json("/tenders/{}?acc_token={}".format(tender["id"], owner_token), {"data": {"features": []}})
response = self.app.get("/tenders/{}?opt_pretty=1".format(tender_id))
self.assertNotIn("features", response.json["data"])
# patch bid without parameters
bid_data["parameters"] = []
response = self.app.patch_json(
"/tenders/{}/bids/{}?acc_token={}".format(tender_id, bid_id, bid_token),
{"data": bid_data},
)
self.assertEqual(response.status, "200 OK")
self.assertNotIn("parameters", response.json["data"])
def proc_2lot_2bid_1claim_1com_1win(self):
# create tender
response = self.app.post_json("/tenders", {"data": self.initial_data})
tender_id = self.tender_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
lots = []
for lot in 2 * self.test_lots_data:
# add lot
response = self.app.post_json(
"/tenders/{}/lots?acc_token={}".format(tender_id, owner_token), {"data": self.test_lots_data[0]}
)
self.assertEqual(response.status, "201 Created")
lots.append(response.json["data"]["id"])
self.initial_lots = lots
# add item
self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [self.initial_data["items"][0] for i in lots]}},
)
# add relatedLot for item
response = self.app.patch_json(
"/tenders/{}?acc_token={}".format(tender_id, owner_token),
{"data": {"items": [{"relatedLot": i} for i in lots]}},
)
self.assertEqual(response.status, "200 OK")
# switch to active.tendering
self.set_status(
"active.tendering",
{"lots": [{"auctionPeriod": {"startDate": (get_now() + timedelta(days=16)).isoformat()}} for i in lots]},
)
# create bid
self.app.authorization = ("Basic", ("broker", ""))
bid_data = deepcopy(test_bids[0])
del bid_data["value"]
bid_data["lotValues"] = [{"value": {"amount": 500}, "relatedLot": lot_id} for lot_id in lots]
bid, bid_token = self.create_bid(self.tender_id, bid_data)
# create second bid
self.app.authorization = ("Basic", ("broker", ""))
self.create_bid(self.tender_id, bid_data)
# switch to active.auction
self.set_status("active.auction")
# get auction info
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.get("/tenders/{}/auction".format(tender_id))
auction_bids_data = response.json["data"]["bids"]
for lot_id in lots:
# posting auction urls
self.app.patch_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id),
{
"data": {
"lots": [
{"id": i["id"], "auctionUrl": "https://tender.auction.url"}
for i in response.json["data"]["lots"]
],
"bids": [
{
"id": i["id"],
"lotValues": [
{
"relatedLot": j["relatedLot"],
"participationUrl": "https://tender.auction.url/for_bid/{}".format(i["id"]),
}
for j in i["lotValues"]
],
}
for i in auction_bids_data
],
}
},
)
# posting auction results
self.app.authorization = ("Basic", ("auction", ""))
response = self.app.post_json(
"/tenders/{}/auction/{}".format(tender_id, lot_id),
{"data": {"bids": [
{"id": b["id"], "lotValues": [{"relatedLot": l["relatedLot"]} for l in b["lotValues"]]}
for b in auction_bids_data]}}
)
# for first lot
lot_id = lots[0]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "active", "qualified": True, "eligible": True}},
)
# add complaint
claim = deepcopy(test_claim)
claim["relatedLot"] = lot_id
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(tender_id, award_id, bid_token),
{
"data": claim
},
)
self.assertEqual(response.status, "201 Created")
# cancel lot
if RELEASE_2020_04_19 < get_now():
self.set_all_awards_complaint_period_end()
cancellation = dict(**test_cancellation)
cancellation.update({
"status": "active",
"cancellationOf": "lot",
"relatedLot": lot_id,
})
response = self.app.post_json(
"/tenders/{}/cancellations?acc_token={}".format(tender_id, owner_token),
{"data": cancellation},
)
self.assertEqual(response.status, "201 Created")
cancellation_id = response.json["data"]["id"]
if get_now() > RELEASE_2020_04_19:
activate_cancellation_after_2020_04_19(self, cancellation_id, tender_id, owner_token)
# for second lot
lot_id = lots[1]
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as unsuccessful
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}},
)
# get awards
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}/awards?acc_token={}".format(tender_id, owner_token))
# get pending award
award_id = [i["id"] for i in response.json["data"] if i["status"] == "pending" and i["lotID"] == lot_id][0]
# set award as active
self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(tender_id, award_id, owner_token),
{"data": {"status": "active", "qualified": True, "eligible": True}},
)
# get contract id
response = self.app.get("/tenders/{}".format(tender_id))
contract_id = response.json["data"]["contracts"][-1]["id"]
# after stand slill period
self.set_status("complete", {"status": "active.awarded"})
# time travel
tender = self.db.get(tender_id)
for i in tender.get("awards", []):
i["complaintPeriod"]["endDate"] = i["complaintPeriod"]["startDate"]
self.db.save(tender)
# sign contract
self.app.authorization = ("Basic", ("broker", ""))
self.app.patch_json(
"/tenders/{}/contracts/{}?acc_token={}".format(tender_id, contract_id, owner_token),
{"data": {"status": "active", "value": {"valueAddedTaxIncluded": False}}},
)
# check status
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.get("/tenders/{}".format(tender_id))
self.assertTrue([i["status"] for i in response.json["data"]["lots"]], ["cancelled", "complete"])
self.assertEqual(response.json["data"]["status"], "complete")
| 40.47936
| 130
| 0.598797
| 8,805
| 78,449
| 5.185236
| 0.032822
| 0.03695
| 0.109318
| 0.057802
| 0.944454
| 0.936087
| 0.922705
| 0.908818
| 0.895173
| 0.88617
| 0
| 0.012996
| 0.223151
| 78,449
| 1,937
| 131
| 40.500258
| 0.736163
| 0.04473
| 0
| 0.730069
| 0
| 0
| 0.232985
| 0.0492
| 0
| 0
| 0
| 0
| 0.166981
| 1
| 0.015066
| false
| 0
| 0.005022
| 0
| 0.020716
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9122864a19a16d1576b1dad9a56a3810032a7845
| 2,763
|
py
|
Python
|
test-import-vulnerable-api-master/test-import-vulnerable-api-master/test/test_vapi.py
|
rmit-cyber-ready-cic/Security99
|
2d32865aef91f09b0edac2dd926ce603769052d7
|
[
"MIT"
] | null | null | null |
test-import-vulnerable-api-master/test-import-vulnerable-api-master/test/test_vapi.py
|
rmit-cyber-ready-cic/Security99
|
2d32865aef91f09b0edac2dd926ce603769052d7
|
[
"MIT"
] | null | null | null |
test-import-vulnerable-api-master/test-import-vulnerable-api-master/test/test_vapi.py
|
rmit-cyber-ready-cic/Security99
|
2d32865aef91f09b0edac2dd926ce603769052d7
|
[
"MIT"
] | null | null | null |
import json
import unittest
from test import BaseTestCase
class TestvAPI(BaseTestCase):
def test_tokens_1(self):
headers = { "Content-type": "application/json"}
r = self.client.open(
"/tokens",
method='POST',
data=json.dumps({'username': "blah1'", 'password': 'blah'}),
headers=headers)
print(r.status_code, r.data)
self.assertEqual(r.status_code,500)
def test_tokens_2(self):
headers = { "Content-type": "application/json"}
r = self.client.open(
"/tokens",
method='POST',
data=json.dumps({"username": "blah1'", "password": "blah"}),
headers=headers)
print(r.status_code, r.data)
self.assertEqual(r.status_code,500)
def test_tokens_3(self):
headers = { "Content-type": "application/json"}
r = self.client.open(
"/tokens",
method='POST',
data=json.dumps({"username": "blah1'", "password": "blah"}),
headers=headers)
print(r.status_code, r.data)
self.assertEqual(r.status_code,500)
def test_tokens_4(self):
headers = { "Content-type": "application/json"}
r = self.client.post(
"/tokens",
data=json.dumps({'username': 'blah1\'', "password": "blah"}),
headers=headers)
print(r.status_code, r.data)
self.assertEqual(r.status_code,500)
def test_widget_1(self):
headers = { "Content-type": "application/json", "X-Auth-Token": "4d94fc705cd9b2b36b2280dd543d9004"}
r = self.client.post(
"/widget",
data=json.dumps({'name': 'blah1'}),
headers=headers)
# print(r.status_code, r.data)
self.assertEqual(r.status_code,200)
def test_widget_2(self):
headers = { "Content-type": "application/json", "X-Auth-Token": "4d94fc705cd9b2b36b2280dd543d9004"}
r = self.client.post(
"/widget",
data=json.dumps({'name': 'blah'}),
headers=headers)
self.assertEqual(r.status_code,403)
def test_widget_3(self):
headers = { "Content-type": "application/json", "X-Auth-Token": "tokenwithsinglequote'"}
r = self.client.post(
"/widget",
data=json.dumps({'name': 'blah1'}),
headers=headers)
self.assertEqual(r.status_code,500)
def test_widget_4(self):
headers = { "Content-type": "application/json", "X-Auth-Token": "unknowntoken"}
r = self.client.post(
"/widget",
data=json.dumps({'name': 'blah1'}),
headers=headers)
self.assertEqual(r.status_code,401)
if __name__ == '__main__':
unittest.main()
| 33.695122
| 107
| 0.566051
| 300
| 2,763
| 5.09
| 0.16
| 0.059594
| 0.093648
| 0.115259
| 0.874918
| 0.874918
| 0.874918
| 0.843484
| 0.843484
| 0.736084
| 0
| 0.040602
| 0.277959
| 2,763
| 81
| 108
| 34.111111
| 0.724812
| 0.010134
| 0
| 0.681159
| 0
| 0.043478
| 0.213685
| 0.031101
| 0
| 0
| 0
| 0
| 0.115942
| 1
| 0.115942
| false
| 0.057971
| 0.043478
| 0
| 0.173913
| 0.057971
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
9151a267e4959e56ce6115bcf21955ec8757e9a1
| 110
|
py
|
Python
|
testcgi.py
|
MrSquigy/web-server
|
09918321ff5124e5731517a97cdae1b41b6ee0e7
|
[
"Unlicense"
] | 1
|
2022-03-26T18:08:37.000Z
|
2022-03-26T18:08:37.000Z
|
testcgi.py
|
MrSquigy/web-server
|
09918321ff5124e5731517a97cdae1b41b6ee0e7
|
[
"Unlicense"
] | null | null | null |
testcgi.py
|
MrSquigy/web-server
|
09918321ff5124e5731517a97cdae1b41b6ee0e7
|
[
"Unlicense"
] | null | null | null |
from datetime import datetime
print('<html>\n<body>\n<p>Generated %s</p>\n</body>\n</html>' % datetime.now())
| 36.666667
| 79
| 0.672727
| 19
| 110
| 3.894737
| 0.578947
| 0.135135
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 110
| 3
| 79
| 36.666667
| 0.72549
| 0
| 0
| 0
| 1
| 0.5
| 0.477477
| 0.468468
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 8
|
e6a868ada37ab9fb27f973b4bfe648387bb4279f
| 30,946
|
py
|
Python
|
python/paddle/fluid/contrib/layers/rnn_impl.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | 3
|
2019-07-17T09:30:31.000Z
|
2021-12-27T03:16:55.000Z
|
python/paddle/fluid/contrib/layers/rnn_impl.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | 1
|
2019-07-30T05:22:32.000Z
|
2019-07-30T05:22:32.000Z
|
python/paddle/fluid/contrib/layers/rnn_impl.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | 4
|
2019-09-30T02:15:34.000Z
|
2019-09-30T02:41:30.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import layers
from paddle.fluid.dygraph import Layer
from paddle.fluid.layers.control_flow import StaticRNN
__all__ = ['BasicGRUUnit', 'basic_gru', 'BasicLSTMUnit', 'basic_lstm']
class BasicGRUUnit(Layer):
"""
****
BasicGRUUnit class, using basic operators to build GRU
The algorithm can be described as the equations below.
.. math::
u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u)
r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r)
m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
Args:
name_scope(string) : The name scope used to identify parameters and biases
hidden_size (integer): The hidden size used in the Unit.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of GRU unit.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
dtype(string): data type used in this unit
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import BasicGRUUnit
input_size = 128
hidden_size = 256
input = layers.data( name = "input", shape = [-1, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
gru_unit = BasicGRUUnit( "gru_unit", hidden_size )
new_hidden = gru_unit( input, pre_hidden )
"""
def __init__(self,
name_scope,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype='float32'):
super(BasicGRUUnit, self).__init__(name_scope, dtype)
self._name = name_scope
self._hiden_size = hidden_size
self._param_attr = param_attr
self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid
self._activation = activation or layers.tanh
self._dtype = dtype
def _build_once(self, input, pre_hidden):
self._input_size = input.shape[-1]
assert (self._input_size > 0)
self._gate_weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, 2 * self._hiden_size],
dtype=self._dtype)
self._candidate_weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, self._hiden_size],
dtype=self._dtype)
self._gate_bias = self.create_parameter(
self._bias_attr,
shape=[2 * self._hiden_size],
dtype=self._dtype,
is_bias=True)
self._candidate_bias = self.create_parameter(
self._bias_attr,
shape=[self._hiden_size],
dtype=self._dtype,
is_bias=True)
def forward(self, input, pre_hidden):
concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._gate_weight)
gate_input = layers.elementwise_add(gate_input, self._gate_bias)
gate_input = self._gate_activation(gate_input)
r, u = layers.split(gate_input, num_or_sections=2, dim=1)
r_hidden = r * pre_hidden
candidate = layers.matmul(
layers.concat([input, pre_hidden], 1), self._candidate_weight)
candidate = layers.elementwise_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_hidden = u * pre_hidden + (1 - u) * c
return new_hidden
def basic_gru(input,
init_hidden,
hidden_size,
num_layers=1,
sequence_length=None,
dropout_prob=0.0,
bidirectional=False,
batch_first=True,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype='float32',
name='basic_gru'):
"""
GRU implementation using basic operator, supports multiple layers and bidirection gru.
.. math::
u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u)
r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r)
m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
Args:
input (Variable): GRU input tensor,
if batch_first = False, shape should be ( seq_len x batch_size x input_size )
if batch_first = True, shape should be ( batch_size x seq_len x hidden_size )
init_hidden(Variable|None): The initial hidden state of the GRU
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to tensor with ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
hidden_size (int): Hidden size of the GRU
num_layers (int): The total number of layers of the GRU
sequence_length (Variabe|None): A Tensor (shape [batch_size]) stores each real length of each instance,
This tensor will be convert to a mask to mask the padding ids
If it's None means NO padding ids
dropout_prob(float|0.0): Dropout prob, dropout ONLY works after rnn output of earch layers,
NOT between time steps
bidirectional (bool|False): If it is bidirectional
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of GRU unit.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
dtype(string): data type used in this unit
name(string): name used to identify parameters and biases
Returns:
rnn_out(Tensor),last_hidden(Tensor)
- rnn_out is result of GRU hidden, with shape (seq_len x batch_size x hidden_size) \
if is_bidirec set to True, shape will be ( seq_len x batch_sze x hidden_size*2)
- last_hidden is the hidden state of the last step of GRU \
shape is ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size),
can be reshaped to a tensor with shape( num_layers x 2 x batch_size x hidden_size)
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import basic_gru
batch_size = 20
input_size = 128
hidden_size = 256
num_layers = 2
dropout = 0.5
bidirectional = True
batch_first = False
input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32')
rnn_out, last_hidden = basic_gru( input, pre_hidden, hidden_size, num_layers = num_layers, \
sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \
batch_first = batch_first)
"""
fw_unit_list = []
for i in range(num_layers):
new_name = name + "_layers_" + str(i)
fw_unit_list.append(
BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr,
gate_activation, activation, dtype))
if bidirectional:
bw_unit_list = []
for i in range(num_layers):
new_name = name + "_reverse_layers_" + str(i)
bw_unit_list.append(
BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr,
gate_activation, activation, dtype))
if batch_first:
input = layers.transpose(input, [1, 0, 2])
mask = None
if sequence_length:
max_seq_len = layers.shape(input)[0]
mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32')
mask = layers.transpose(mask, [1, 0])
direc_num = 1
if bidirectional:
direc_num = 2
if init_hidden:
init_hidden = layers.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size])
def get_single_direction_output(rnn_input,
unit_list,
mask=None,
direc_index=0):
rnn = StaticRNN()
with rnn.step():
step_input = rnn.step_input(rnn_input)
if mask:
step_mask = rnn.step_input(mask)
for i in range(num_layers):
if init_hidden:
pre_hidden = rnn.memory(init=init_hidden[i, direc_index])
else:
pre_hidden = rnn.memory(
batch_ref=rnn_input,
shape=[-1, hidden_size],
ref_batch_dim_idx=1)
new_hidden = unit_list[i](step_input, pre_hidden)
if mask:
new_hidden = layers.elementwise_mul(
new_hidden, step_mask, axis=0) - layers.elementwise_mul(
pre_hidden, (step_mask - 1), axis=0)
rnn.update_memory(pre_hidden, new_hidden)
rnn.step_output(new_hidden)
step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0:
step_input = layers.dropout(
step_input,
dropout_prob=dropout_prob, )
rnn.step_output(step_input)
rnn_out = rnn()
last_hidden_array = []
rnn_output = rnn_out[-1]
for i in range(num_layers):
last_hidden = rnn_out[i]
last_hidden = last_hidden[-1]
last_hidden_array.append(last_hidden)
last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size])
return rnn_output, last_hidden_output
# seq_len, batch_size, hidden_size
fw_rnn_out, fw_last_hidden = get_single_direction_output(
input, fw_unit_list, mask, direc_index=0)
if bidirectional:
bw_input = layers.reverse(input, axis=[0])
bw_mask = None
if mask:
bw_mask = layers.reverse(mask, axis=[0])
bw_rnn_out, bw_last_hidden = get_single_direction_output(
bw_input, bw_unit_list, bw_mask, direc_index=1)
bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0])
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size])
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden
else:
rnn_out = fw_rnn_out
last_hidden = fw_last_hidden
if batch_first:
rnn_out = fluid.layser.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden
def basic_lstm(input,
init_hidden,
init_cell,
hidden_size,
num_layers=1,
sequence_length=None,
dropout_prob=0.0,
bidirectional=False,
batch_first=True,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
forget_bias=1.0,
dtype='float32',
name='basic_lstm'):
"""
LSTM implementation using basic operators, supports multiple layers and bidirection LSTM.
.. math::
i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i)
f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias )
o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o)
\\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c)
c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t}
h_t &= o_t \odot tanh(c_t)
Args:
input (Variable): lstm input tensor,
if batch_first = False, shape should be ( seq_len x batch_size x input_size )
if batch_first = True, shape should be ( batch_size x seq_len x hidden_size )
init_hidden(Variable|None): The initial hidden state of the LSTM
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to a tensor with shape ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
init_cell(Variable|None): The initial hidden state of the LSTM
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to a tensor with shape ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
hidden_size (int): Hidden size of the LSTM
num_layers (int): The total number of layers of the LSTM
sequence_length (Variabe|None): A tensor (shape [batch_size]) stores each real length of each instance,
This tensor will be convert to a mask to mask the padding ids
If it's None means NO padding ids
dropout_prob(float|0.0): Dropout prob, dropout ONLY work after rnn output of earch layers,
NOT between time steps
bidirectional (bool|False): If it is bidirectional
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of LSTM unit.
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
forget_bias (float|1.0) : Forget bias used to compute the forget gate
dtype(string): Data type used in this unit
name(string): Name used to identify parameters and biases
Returns:
rnn_out(Tensor), last_hidden(Tensor), last_cell(Tensor)
- rnn_out is the result of LSTM hidden, shape is (seq_len x batch_size x hidden_size) \
if is_bidirec set to True, it's shape will be ( seq_len x batch_sze x hidden_size*2)
- last_hidden is the hidden state of the last step of LSTM \
with shape ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, it's shape will be ( num_layers*2 x batch_size x hidden_size),
and can be reshaped to a tensor ( num_layers x 2 x batch_size x hidden_size) to use.
- last_cell is the hidden state of the last step of LSTM \
with shape ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, it's shape will be ( num_layers*2 x batch_size x hidden_size),
and can be reshaped to a tensor ( num_layers x 2 x batch_size x hidden_size) to use.
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import basic_lstm
batch_size = 20
input_size = 128
hidden_size = 256
num_layers = 2
dropout = 0.5
bidirectional = True
batch_first = False
input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
pre_cell = layers.data( name = "pre_cell", shape=[-1, hidden_size], dtype='float32')
sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32')
rnn_out, last_hidden, last_cell = basic_lstm( input, pre_hidden, pre_cell, \
hidden_size, num_layers = num_layers, \
sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \
batch_first = batch_first)
"""
fw_unit_list = []
for i in range(num_layers):
new_name = name + "_layers_" + str(i)
fw_unit_list.append(
BasicLSTMUnit(
new_name,
hidden_size,
param_attr=param_attr,
bias_attr=bias_attr,
gate_activation=gate_activation,
activation=activation,
forget_bias=forget_bias,
dtype=dtype))
if bidirectional:
bw_unit_list = []
for i in range(num_layers):
new_name = name + "_reverse_layers_" + str(i)
bw_unit_list.append(
BasicLSTMUnit(
new_name,
hidden_size,
param_attr=param_attr,
bias_attr=bias_attr,
gate_activation=gate_activation,
activation=activation,
forget_bias=forget_bias,
dtype=dtype))
if batch_first:
input = layers.transpose(input, [1, 0, 2])
mask = None
if sequence_length:
max_seq_len = layers.shape(input)[0]
mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32')
mask = layers.transpose(mask, [1, 0])
direc_num = 1
if bidirectional:
direc_num = 2
# convert to [num_layers, 2, batch_size, hidden_size]
if init_hidden:
init_hidden = layers.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size])
init_cell = layers.reshape(
init_cell, shape=[num_layers, direc_num, -1, hidden_size])
# forward direction
def get_single_direction_output(rnn_input,
unit_list,
mask=None,
direc_index=0):
rnn = StaticRNN()
with rnn.step():
step_input = rnn.step_input(rnn_input)
if mask:
step_mask = rnn.step_input(mask)
for i in range(num_layers):
if init_hidden:
pre_hidden = rnn.memory(init=init_hidden[i, direc_index])
pre_cell = rnn.memory(init=init_cell[i, direc_index])
else:
pre_hidden = rnn.memory(
batch_ref=rnn_input, shape=[-1, hidden_size])
pre_cell = rnn.memory(
batch_ref=rnn_input, shape=[-1, hidden_size])
new_hidden, new_cell = unit_list[i](step_input, pre_hidden,
pre_cell)
if mask:
new_hidden = layers.elementwise_mul(
new_hidden, step_mask, axis=0) - layers.elementwise_mul(
pre_hidden, (step_mask - 1), axis=0)
new_cell = layers.elementwise_mul(
new_cell, step_mask, axis=0) - layers.elementwise_mul(
pre_cell, (step_mask - 1), axis=0)
rnn.update_memory(pre_hidden, new_hidden)
rnn.update_memory(pre_cell, new_cell)
rnn.step_output(new_hidden)
rnn.step_output(new_cell)
step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0:
step_input = layers.dropout(
step_input,
dropout_prob=dropout_prob,
dropout_implementation='upscale_in_train')
rnn.step_output(step_input)
rnn_out = rnn()
last_hidden_array = []
last_cell_array = []
rnn_output = rnn_out[-1]
for i in range(num_layers):
last_hidden = rnn_out[i * 2]
last_hidden = last_hidden[-1]
last_hidden_array.append(last_hidden)
last_cell = rnn_out[i * 2 + 1]
last_cell = last_cell[-1]
last_cell_array.append(last_cell)
last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size])
last_cell_output = layers.concat(last_cell_array, axis=0)
last_cell_output = layers.reshape(
last_cell_output, shape=[num_layers, -1, hidden_size])
return rnn_output, last_hidden_output, last_cell_output
# seq_len, batch_size, hidden_size
fw_rnn_out, fw_last_hidden, fw_last_cell = get_single_direction_output(
input, fw_unit_list, mask, direc_index=0)
if bidirectional:
bw_input = layers.reverse(input, axis=[0])
bw_mask = None
if mask:
bw_mask = layers.reverse(mask, axis=[0])
bw_rnn_out, bw_last_hidden, bw_last_cell = get_single_direction_output(
bw_input, bw_unit_list, bw_mask, direc_index=1)
bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0])
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size])
last_cell = layers.concat([fw_last_cell, bw_last_cell], axis=1)
last_cell = layers.reshape(
last_cell, shape=[num_layers * direc_num, -1, hidden_size])
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden, last_cell
else:
rnn_out = fw_rnn_out
last_hidden = fw_last_hidden
last_cell = fw_last_cell
if batch_first:
rnn_out = layers.transpose(rnn_out, [1, 0, 2])
return rnn_out, last_hidden, last_cell
class BasicLSTMUnit(Layer):
"""
****
BasicLSTMUnit class, Using basic operator to build LSTM
The algorithm can be described as the code below.
.. math::
i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i)
f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias )
o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o)
\\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c)
c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t}
h_t &= o_t \odot tanh(c_t)
- $W$ terms denote weight matrices (e.g. $W_{ix}$ is the matrix
of weights from the input gate to the input)
- The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector).
- sigmoid is the logistic sigmoid function.
- $i, f, o$ and $c$ are the input gate, forget gate, output gate,
and cell activation vectors, respectively, all of which have the same size as
the cell output activation vector $h$.
- The :math:`\odot` is the element-wise product of the vectors.
- :math:`tanh` is the activation functions.
- :math:`\\tilde{c_t}` is also called candidate hidden state,
which is computed based on the current input and the previous hidden state.
Args:
name_scope(string) : The name scope used to identify parameter and bias name
hidden_size (integer): The hidden size used in the Unit.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of LSTM unit.
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized as zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cells (actNode).
Default: 'fluid.layers.tanh'
forget_bias(float|1.0): forget bias used when computing forget gate
dtype(string): data type used in this unit
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import BasicLSTMUnit
input_size = 128
hidden_size = 256
input = layers.data( name = "input", shape = [-1, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
pre_cell = layers.data( name = "pre_cell", shape=[-1, hidden_size], dtype='float32')
lstm_unit = BasicLSTMUnit( "gru_unit", hidden_size)
new_hidden, new_cell = lstm_unit( input, pre_hidden, pre_cell )
"""
def __init__(self,
name_scope,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
forget_bias=1.0,
dtype='float32'):
super(BasicLSTMUnit, self).__init__(name_scope, dtype)
self._name = name_scope
self._hiden_size = hidden_size
self._param_attr = param_attr
self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid
self._activation = activation or layers.tanh
self._forget_bias = layers.fill_constant(
[1], dtype=dtype, value=forget_bias)
self._forget_bias.stop_gradient = False
self._dtype = dtype
def _build_once(self, input, pre_hidden, pre_cell):
self._input_size = input.shape[-1]
assert (self._input_size > 0)
self._weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, 4 * self._hiden_size],
dtype=self._dtype)
self._bias = self.create_parameter(
attr=self._bias_attr,
shape=[4 * self._hiden_size],
dtype=self._dtype,
is_bias=True)
def forward(self, input, pre_hidden, pre_cell):
concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._weight)
gate_input = layers.elementwise_add(gate_input, self._bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
new_cell = layers.elementwise_add(
layers.elementwise_mul(
pre_cell,
layers.sigmoid(layers.elementwise_add(f, self._forget_bias))),
layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j)))
new_hidden = layers.tanh(new_cell) * layers.sigmoid(o)
return new_hidden, new_cell
| 41.594086
| 118
| 0.592484
| 4,075
| 30,946
| 4.245644
| 0.079755
| 0.041038
| 0.013872
| 0.013988
| 0.835154
| 0.82082
| 0.809433
| 0.793307
| 0.786602
| 0.776545
| 0
| 0.011766
| 0.327118
| 30,946
| 743
| 119
| 41.650067
| 0.819094
| 0.478091
| 0
| 0.73487
| 0
| 0
| 0.011131
| 0
| 0
| 0
| 0
| 0
| 0.005764
| 1
| 0.028818
| false
| 0
| 0.008646
| 0
| 0.066282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6ac135aa95150550e46ff4b34af6df72ba00991
| 208
|
py
|
Python
|
rdc/__init__.py
|
garydoranjr/rdc
|
f9f61d2425d17a1530452ffcd720bf9419dd296d
|
[
"BSD-3-Clause"
] | 7
|
2019-10-20T02:41:34.000Z
|
2022-03-15T08:27:44.000Z
|
rdc/__init__.py
|
garydoranjr/rdc
|
f9f61d2425d17a1530452ffcd720bf9419dd296d
|
[
"BSD-3-Clause"
] | 1
|
2019-04-30T13:15:59.000Z
|
2019-05-13T20:11:36.000Z
|
rdc/__init__.py
|
garydoranjr/rdc
|
f9f61d2425d17a1530452ffcd720bf9419dd296d
|
[
"BSD-3-Clause"
] | 3
|
2019-11-05T19:13:52.000Z
|
2020-11-19T11:01:25.000Z
|
"""
Implements the Randomized Dependence Coefficient
David Lopez-Paz, Philipp Hennig, Bernhard Schoelkopf
http://papers.nips.cc/paper/5138-the-randomized-dependence-coefficient.pdf
"""
from .rdc import rdc
| 23.111111
| 74
| 0.798077
| 27
| 208
| 6.148148
| 0.814815
| 0.156627
| 0.277108
| 0.409639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.096154
| 208
| 8
| 75
| 26
| 0.861702
| 0.850962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e6f6fe410a3c51e99f7ebc16719e8ead960defb0
| 33,558
|
py
|
Python
|
sdk/python/pulumi_aws/imagebuilder/component.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/imagebuilder/component.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/imagebuilder/component.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ComponentArgs', 'Component']
@pulumi.input_type
class ComponentArgs:
def __init__(__self__, *,
platform: pulumi.Input[str],
version: pulumi.Input[str],
change_description: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
supported_os_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
uri: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Component resource.
:param pulumi.Input[str] platform: Platform of the component.
:param pulumi.Input[str] version: Version of the component.
:param pulumi.Input[str] change_description: Change description of the component.
:param pulumi.Input[str] data: Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. the provider will only perform drift detection of its value when present in a configuration.
:param pulumi.Input[str] description: Description of the component.
:param pulumi.Input[str] kms_key_id: Amazon Resource Name (ARN) of the Key Management Service (KMS) Key used to encrypt the component.
:param pulumi.Input[str] name: Name of the component.
:param pulumi.Input[Sequence[pulumi.Input[str]]] supported_os_versions: Set of Operating Systems (OS) supported by the component.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the component. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] uri: S3 URI with data of the component. Exactly one of `data` and `uri` can be specified.
"""
pulumi.set(__self__, "platform", platform)
pulumi.set(__self__, "version", version)
if change_description is not None:
pulumi.set(__self__, "change_description", change_description)
if data is not None:
pulumi.set(__self__, "data", data)
if description is not None:
pulumi.set(__self__, "description", description)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if supported_os_versions is not None:
pulumi.set(__self__, "supported_os_versions", supported_os_versions)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def platform(self) -> pulumi.Input[str]:
"""
Platform of the component.
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: pulumi.Input[str]):
pulumi.set(self, "platform", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of the component.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="changeDescription")
def change_description(self) -> Optional[pulumi.Input[str]]:
"""
Change description of the component.
"""
return pulumi.get(self, "change_description")
@change_description.setter
def change_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "change_description", value)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[str]]:
"""
Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. the provider will only perform drift detection of its value when present in a configuration.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the component.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the Key Management Service (KMS) Key used to encrypt the component.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the component.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="supportedOsVersions")
def supported_os_versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set of Operating Systems (OS) supported by the component.
"""
return pulumi.get(self, "supported_os_versions")
@supported_os_versions.setter
def supported_os_versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "supported_os_versions", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags for the component. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
S3 URI with data of the component. Exactly one of `data` and `uri` can be specified.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class _ComponentState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
change_description: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
date_created: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
owner: Optional[pulumi.Input[str]] = None,
platform: Optional[pulumi.Input[str]] = None,
supported_os_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Component resources.
:param pulumi.Input[str] arn: (Required) Amazon Resource Name (ARN) of the component.
:param pulumi.Input[str] change_description: Change description of the component.
:param pulumi.Input[str] data: Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. the provider will only perform drift detection of its value when present in a configuration.
:param pulumi.Input[str] date_created: Date the component was created.
:param pulumi.Input[str] description: Description of the component.
:param pulumi.Input[bool] encrypted: Encryption status of the component.
:param pulumi.Input[str] kms_key_id: Amazon Resource Name (ARN) of the Key Management Service (KMS) Key used to encrypt the component.
:param pulumi.Input[str] name: Name of the component.
:param pulumi.Input[str] owner: Owner of the component.
:param pulumi.Input[str] platform: Platform of the component.
:param pulumi.Input[Sequence[pulumi.Input[str]]] supported_os_versions: Set of Operating Systems (OS) supported by the component.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the component. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] type: Type of the component.
:param pulumi.Input[str] uri: S3 URI with data of the component. Exactly one of `data` and `uri` can be specified.
:param pulumi.Input[str] version: Version of the component.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if change_description is not None:
pulumi.set(__self__, "change_description", change_description)
if data is not None:
pulumi.set(__self__, "data", data)
if date_created is not None:
pulumi.set(__self__, "date_created", date_created)
if description is not None:
pulumi.set(__self__, "description", description)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if owner is not None:
pulumi.set(__self__, "owner", owner)
if platform is not None:
pulumi.set(__self__, "platform", platform)
if supported_os_versions is not None:
pulumi.set(__self__, "supported_os_versions", supported_os_versions)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if type is not None:
pulumi.set(__self__, "type", type)
if uri is not None:
pulumi.set(__self__, "uri", uri)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
(Required) Amazon Resource Name (ARN) of the component.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="changeDescription")
def change_description(self) -> Optional[pulumi.Input[str]]:
"""
Change description of the component.
"""
return pulumi.get(self, "change_description")
@change_description.setter
def change_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "change_description", value)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[str]]:
"""
Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. the provider will only perform drift detection of its value when present in a configuration.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data", value)
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> Optional[pulumi.Input[str]]:
"""
Date the component was created.
"""
return pulumi.get(self, "date_created")
@date_created.setter
def date_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date_created", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the component.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[bool]]:
"""
Encryption status of the component.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the Key Management Service (KMS) Key used to encrypt the component.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the component.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def owner(self) -> Optional[pulumi.Input[str]]:
"""
Owner of the component.
"""
return pulumi.get(self, "owner")
@owner.setter
def owner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner", value)
@property
@pulumi.getter
def platform(self) -> Optional[pulumi.Input[str]]:
"""
Platform of the component.
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "platform", value)
@property
@pulumi.getter(name="supportedOsVersions")
def supported_os_versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set of Operating Systems (OS) supported by the component.
"""
return pulumi.get(self, "supported_os_versions")
@supported_os_versions.setter
def supported_os_versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "supported_os_versions", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags for the component. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of the component.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
S3 URI with data of the component. Exactly one of `data` and `uri` can be specified.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the component.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class Component(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
change_description: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
platform: Optional[pulumi.Input[str]] = None,
supported_os_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
uri: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Image Builder Component.
## Example Usage
### URI Document
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.Component("example",
platform="Linux",
uri=f"s3://{aws_s3_object['example']['bucket']}/{aws_s3_object['example']['key']}",
version="1.0.0")
```
## Import
`aws_imagebuilder_components` resources can be imported by using the Amazon Resource Name (ARN), e.g.,
```sh
$ pulumi import aws:imagebuilder/component:Component example arn:aws:imagebuilder:us-east-1:123456789012:component/example/1.0.0/1
```
Certain resource arguments, such as `uri`, cannot be read via the API and imported into the provider. The provider will display a difference for these arguments the first run after import if declared in the the provider configuration for an imported resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] change_description: Change description of the component.
:param pulumi.Input[str] data: Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. the provider will only perform drift detection of its value when present in a configuration.
:param pulumi.Input[str] description: Description of the component.
:param pulumi.Input[str] kms_key_id: Amazon Resource Name (ARN) of the Key Management Service (KMS) Key used to encrypt the component.
:param pulumi.Input[str] name: Name of the component.
:param pulumi.Input[str] platform: Platform of the component.
:param pulumi.Input[Sequence[pulumi.Input[str]]] supported_os_versions: Set of Operating Systems (OS) supported by the component.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the component. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] uri: S3 URI with data of the component. Exactly one of `data` and `uri` can be specified.
:param pulumi.Input[str] version: Version of the component.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ComponentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Image Builder Component.
## Example Usage
### URI Document
```python
import pulumi
import pulumi_aws as aws
example = aws.imagebuilder.Component("example",
platform="Linux",
uri=f"s3://{aws_s3_object['example']['bucket']}/{aws_s3_object['example']['key']}",
version="1.0.0")
```
## Import
`aws_imagebuilder_components` resources can be imported by using the Amazon Resource Name (ARN), e.g.,
```sh
$ pulumi import aws:imagebuilder/component:Component example arn:aws:imagebuilder:us-east-1:123456789012:component/example/1.0.0/1
```
Certain resource arguments, such as `uri`, cannot be read via the API and imported into the provider. The provider will display a difference for these arguments the first run after import if declared in the the provider configuration for an imported resource.
:param str resource_name: The name of the resource.
:param ComponentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ComponentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
change_description: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
platform: Optional[pulumi.Input[str]] = None,
supported_os_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
uri: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ComponentArgs.__new__(ComponentArgs)
__props__.__dict__["change_description"] = change_description
__props__.__dict__["data"] = data
__props__.__dict__["description"] = description
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["name"] = name
if platform is None and not opts.urn:
raise TypeError("Missing required property 'platform'")
__props__.__dict__["platform"] = platform
__props__.__dict__["supported_os_versions"] = supported_os_versions
__props__.__dict__["tags"] = tags
__props__.__dict__["uri"] = uri
if version is None and not opts.urn:
raise TypeError("Missing required property 'version'")
__props__.__dict__["version"] = version
__props__.__dict__["arn"] = None
__props__.__dict__["date_created"] = None
__props__.__dict__["encrypted"] = None
__props__.__dict__["owner"] = None
__props__.__dict__["tags_all"] = None
__props__.__dict__["type"] = None
super(Component, __self__).__init__(
'aws:imagebuilder/component:Component',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
change_description: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
date_created: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
owner: Optional[pulumi.Input[str]] = None,
platform: Optional[pulumi.Input[str]] = None,
supported_os_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None) -> 'Component':
"""
Get an existing Component resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: (Required) Amazon Resource Name (ARN) of the component.
:param pulumi.Input[str] change_description: Change description of the component.
:param pulumi.Input[str] data: Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. the provider will only perform drift detection of its value when present in a configuration.
:param pulumi.Input[str] date_created: Date the component was created.
:param pulumi.Input[str] description: Description of the component.
:param pulumi.Input[bool] encrypted: Encryption status of the component.
:param pulumi.Input[str] kms_key_id: Amazon Resource Name (ARN) of the Key Management Service (KMS) Key used to encrypt the component.
:param pulumi.Input[str] name: Name of the component.
:param pulumi.Input[str] owner: Owner of the component.
:param pulumi.Input[str] platform: Platform of the component.
:param pulumi.Input[Sequence[pulumi.Input[str]]] supported_os_versions: Set of Operating Systems (OS) supported by the component.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags for the component. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] type: Type of the component.
:param pulumi.Input[str] uri: S3 URI with data of the component. Exactly one of `data` and `uri` can be specified.
:param pulumi.Input[str] version: Version of the component.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ComponentState.__new__(_ComponentState)
__props__.__dict__["arn"] = arn
__props__.__dict__["change_description"] = change_description
__props__.__dict__["data"] = data
__props__.__dict__["date_created"] = date_created
__props__.__dict__["description"] = description
__props__.__dict__["encrypted"] = encrypted
__props__.__dict__["kms_key_id"] = kms_key_id
__props__.__dict__["name"] = name
__props__.__dict__["owner"] = owner
__props__.__dict__["platform"] = platform
__props__.__dict__["supported_os_versions"] = supported_os_versions
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["type"] = type
__props__.__dict__["uri"] = uri
__props__.__dict__["version"] = version
return Component(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
(Required) Amazon Resource Name (ARN) of the component.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="changeDescription")
def change_description(self) -> pulumi.Output[Optional[str]]:
"""
Change description of the component.
"""
return pulumi.get(self, "change_description")
@property
@pulumi.getter
def data(self) -> pulumi.Output[str]:
"""
Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. the provider will only perform drift detection of its value when present in a configuration.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> pulumi.Output[str]:
"""
Date the component was created.
"""
return pulumi.get(self, "date_created")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the component.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def encrypted(self) -> pulumi.Output[bool]:
"""
Encryption status of the component.
"""
return pulumi.get(self, "encrypted")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[Optional[str]]:
"""
Amazon Resource Name (ARN) of the Key Management Service (KMS) Key used to encrypt the component.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the component.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def owner(self) -> pulumi.Output[str]:
"""
Owner of the component.
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter
def platform(self) -> pulumi.Output[str]:
"""
Platform of the component.
"""
return pulumi.get(self, "platform")
@property
@pulumi.getter(name="supportedOsVersions")
def supported_os_versions(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Set of Operating Systems (OS) supported by the component.
"""
return pulumi.get(self, "supported_os_versions")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags for the component. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the component.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def uri(self) -> pulumi.Output[Optional[str]]:
"""
S3 URI with data of the component. Exactly one of `data` and `uri` can be specified.
"""
return pulumi.get(self, "uri")
@property
@pulumi.getter
def version(self) -> pulumi.Output[str]:
"""
Version of the component.
"""
return pulumi.get(self, "version")
| 42.640407
| 268
| 0.636033
| 4,046
| 33,558
| 5.098616
| 0.054128
| 0.107712
| 0.109942
| 0.08745
| 0.888313
| 0.863832
| 0.829415
| 0.816375
| 0.811673
| 0.787581
| 0
| 0.002156
| 0.253591
| 33,558
| 786
| 269
| 42.694656
| 0.821423
| 0.333155
| 0
| 0.72949
| 1
| 0
| 0.073921
| 0.010949
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166297
| false
| 0.002217
| 0.011086
| 0
| 0.279379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fc300ce9b3c8141fcce2a7271c60d5a21e1feff8
| 101
|
py
|
Python
|
test_package/tests/test_something.py
|
humnaawan/dsfp-testrepo
|
8efd116c8b860a3e2bce4ac2dfdbab05c557a90c
|
[
"MIT"
] | null | null | null |
test_package/tests/test_something.py
|
humnaawan/dsfp-testrepo
|
8efd116c8b860a3e2bce4ac2dfdbab05c557a90c
|
[
"MIT"
] | null | null | null |
test_package/tests/test_something.py
|
humnaawan/dsfp-testrepo
|
8efd116c8b860a3e2bce4ac2dfdbab05c557a90c
|
[
"MIT"
] | 1
|
2018-11-06T20:48:50.000Z
|
2018-11-06T20:48:50.000Z
|
import test_package
def test_something_func():
assert test_package.do_something(a=2, b=5) == 7
| 16.833333
| 51
| 0.742574
| 17
| 101
| 4.117647
| 0.764706
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034884
| 0.148515
| 101
| 5
| 52
| 20.2
| 0.77907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fc40f8cb36efce4b11b6cca62c4fd2cd064cc4b7
| 229
|
py
|
Python
|
demo_files/chuck_rider/ignore/array_multifier.py
|
zeffii/ChucKScripts
|
7de8207284bb7c7b7b40c4ae7ac3e3878cafbfa4
|
[
"MIT"
] | 7
|
2015-01-13T21:49:58.000Z
|
2022-01-31T02:31:27.000Z
|
demo_files/chuck_rider/ignore/array_multifier.py
|
zeffii/ChucKScripts
|
7de8207284bb7c7b7b40c4ae7ac3e3878cafbfa4
|
[
"MIT"
] | null | null | null |
demo_files/chuck_rider/ignore/array_multifier.py
|
zeffii/ChucKScripts
|
7de8207284bb7c7b7b40c4ae7ac3e3878cafbfa4
|
[
"MIT"
] | null | null | null |
tb = [54, 0,55,54,61,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66, 0,64,66,61, 0, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0,
54, 0,55,54,61, 0,66, 0,64]
expander = lambda i: [i, 300] if i > 0 else [0,0]
tkm = [expander(i) for i in tb]
print(tkm)
| 25.444444
| 49
| 0.497817
| 62
| 229
| 1.83871
| 0.274194
| 0.368421
| 0.473684
| 0.561404
| 0.377193
| 0.377193
| 0.22807
| 0.22807
| 0.22807
| 0.22807
| 0
| 0.352601
| 0.244541
| 229
| 9
| 50
| 25.444444
| 0.306358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc87393ccc76e95e454a083aa23cbd4ef2e837dd
| 4,091
|
py
|
Python
|
common/misc.py
|
yasinyazici/EMA_GAN
|
fd296d600d9404a99feaece8611ca6ad4eb4ee46
|
[
"MIT"
] | 28
|
2018-10-10T03:07:03.000Z
|
2022-03-29T02:41:55.000Z
|
common/misc.py
|
yasinyazici/EMA_GAN
|
fd296d600d9404a99feaece8611ca6ad4eb4ee46
|
[
"MIT"
] | 1
|
2019-10-28T03:22:29.000Z
|
2020-04-03T23:51:08.000Z
|
common/misc.py
|
yasinyazici/EMA_GAN
|
fd296d600d9404a99feaece8611ca6ad4eb4ee46
|
[
"MIT"
] | 2
|
2019-12-22T03:07:06.000Z
|
2020-06-13T04:00:17.000Z
|
# from https://github.com/chainer/chainerrl/blob/f119a1fe210dd31ea123d244258d9b5edc21fba4/chainerrl/misc/copy_param.py
from chainer import links as L
import chainer
def copy_param(target_link, source_link):
"""Copy parameters of a link to another link."""
target_params = dict(target_link.namedparams())
for param_name, param in source_link.namedparams():
target_params[param_name].data[:] = param.data
# Copy Batch Normalization's statistics
target_links = dict(target_link.namedlinks())
for link_name, link in source_link.namedlinks():
if isinstance(link, L.BatchNormalization):
target_bn = target_links[link_name]
target_bn.avg_mean[:] = link.avg_mean
target_bn.avg_var[:] = link.avg_var
def soft_copy_param(target_link, source_link, tau):
"""Soft-copy parameters of a link to another link."""
target_params = dict(target_link.namedparams())
for param_name, param in source_link.namedparams():
target_params[param_name].data[:] *= (1 - tau)
target_params[param_name].data[:] += tau * param.data
# Soft-copy Batch Normalization's statistics
target_links = dict(target_link.namedlinks())
for link_name, link in source_link.namedlinks():
if isinstance(link, L.BatchNormalization):
target_bn = target_links[link_name]
target_bn.avg_mean[:] *= (1 - tau)
target_bn.avg_mean[:] += tau * link.avg_mean
target_bn.avg_var[:] *= (1 - tau)
target_bn.avg_var[:] += tau * link.avg_var
def soft_copy_param_init(target_link, source_link, tau):
"""Soft-copy parameters of a link to another link."""
target_params = dict(target_link.namedparams())
for param_name, param in source_link.namedparams():
if param_name in ['/c0/b','/c0/W','/bn0/beta','/bn0/gamma']:
target_params[param_name].data[:] *= (1 - tau)
target_params[param_name].data[:] += tau * param.data
else:
target_params[param_name].data[:] = param.data
# Copy Batch Normalization's statistics
target_links = dict(target_link.namedlinks())
for link_name, link in source_link.namedlinks():
if isinstance(link, L.BatchNormalization):
if param_name in ['/bn0']:
target_bn = target_links[link_name]
target_bn.avg_mean[:] *= (1 - tau)
target_bn.avg_mean[:] += tau * link.avg_mean
target_bn.avg_var[:] *= (1 - tau)
target_bn.avg_var[:] += tau * link.avg_var
else:
target_bn = target_links[link_name]
target_bn.avg_mean[:] = link.avg_mean
target_bn.avg_var[:] = link.avg_var
def average_param(target_link, source_link, n_model):
"""Soft-copy parameters of a link to another link."""
target_params = dict(target_link.namedparams())
for param_name, param in source_link.namedparams():
target_params[param_name].data[:] *= (1.0*n_model/(n_model+1))
target_params[param_name].data[:] += (1.0/(n_model+1)) * param.data
# average Batch Normalization's statistics (Should we stick with EMA for BacthNorm?)
target_links = dict(target_link.namedlinks())
for link_name, link in source_link.namedlinks():
if isinstance(link, L.BatchNormalization):
target_bn = target_links[link_name]
#target_bn.avg_mean[:] *= (1 - tau)
#target_bn.avg_mean[:] += tau * link.avg_mean
#target_bn.avg_var[:] *= (1 - tau)
#target_bn.avg_var[:] += tau * link.avg_var
target_bn.avg_mean[:] *= (1.0*n_model/(n_model+1))
target_bn.avg_mean[:] += (1.0/(n_model+1)) * link.avg_mean
target_bn.avg_var[:] *= (1.0*n_model/(n_model+1))
target_bn.avg_var[:] += (1.0/(n_model+1)) * link.avg_var
def inc_batch(mul=2):
@chainer.training.make_extension()
def increase(trainer):
trainer.updater.get_iterator('main').batchsize *=mul
return increase
| 45.455556
| 118
| 0.634075
| 547
| 4,091
| 4.47532
| 0.144424
| 0.081699
| 0.089869
| 0.061275
| 0.829248
| 0.818627
| 0.806781
| 0.791258
| 0.762663
| 0.746324
| 0
| 0.017617
| 0.236861
| 4,091
| 90
| 119
| 45.455556
| 0.766496
| 0.161085
| 0
| 0.703125
| 0
| 0
| 0.01087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.03125
| 0
| 0.140625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d83eaae4610f9cf2b2ed22875a23883b86fc100
| 114,571
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_sslsshprofile.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_sslsshprofile.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_sslsshprofile.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_sslsshprofile
short_description: Configure SSL/SSH protocol options.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
firewall_sslsshprofile:
description: the top level parameters set
required: false
type: dict
suboptions:
caname:
type: str
description: 'CA certificate used by SSL Inspection.'
comment:
type: str
description: 'Optional comments.'
mapi-over-https:
type: str
description: 'Enable/disable inspection of MAPI over HTTPS.'
choices:
- 'disable'
- 'enable'
name:
type: str
description: 'Name.'
rpc-over-https:
type: str
description: 'Enable/disable inspection of RPC over HTTPS.'
choices:
- 'disable'
- 'enable'
server-cert:
type: str
description: 'Certificate used by SSL Inspection to replace server certificate.'
server-cert-mode:
type: str
description: 'Re-sign or replace the servers certificate.'
choices:
- 're-sign'
- 'replace'
ssl-anomalies-log:
type: str
description: 'Enable/disable logging SSL anomalies.'
choices:
- 'disable'
- 'enable'
ssl-exempt:
description: 'Ssl-Exempt.'
type: list
suboptions:
address:
type: str
description: 'IPv4 address object.'
address6:
type: str
description: 'IPv6 address object.'
fortiguard-category:
type: str
description: 'FortiGuard category ID.'
id:
type: int
description: 'ID number.'
regex:
type: str
description: 'Exempt servers by regular expression.'
type:
type: str
description: 'Type of address object (IPv4 or IPv6) or FortiGuard category.'
choices:
- 'fortiguard-category'
- 'address'
- 'address6'
- 'wildcard-fqdn'
- 'regex'
wildcard-fqdn:
type: str
description: 'Exempt servers by wildcard FQDN.'
ssl-exemptions-log:
type: str
description: 'Enable/disable logging SSL exemptions.'
choices:
- 'disable'
- 'enable'
ssl-server:
description: 'Ssl-Server.'
type: list
suboptions:
ftps-client-cert-request:
type: str
description: 'Action based on client certificate request during the FTPS handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
https-client-cert-request:
type: str
description: 'Action based on client certificate request during the HTTPS handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
id:
type: int
description: 'SSL server ID.'
imaps-client-cert-request:
type: str
description: 'Action based on client certificate request during the IMAPS handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
ip:
type: str
description: 'IPv4 address of the SSL server.'
pop3s-client-cert-request:
type: str
description: 'Action based on client certificate request during the POP3S handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
smtps-client-cert-request:
type: str
description: 'Action based on client certificate request during the SMTPS handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
ssl-other-client-cert-request:
type: str
description: 'Action based on client certificate request during an SSL protocol handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
ftps-client-certificate:
type: str
description: 'Action based on received client certificate during the FTPS handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
https-client-certificate:
type: str
description: 'Action based on received client certificate during the HTTPS handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
imaps-client-certificate:
type: str
description: 'Action based on received client certificate during the IMAPS handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
pop3s-client-certificate:
type: str
description: 'Action based on received client certificate during the POP3S handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
smtps-client-certificate:
type: str
description: 'Action based on received client certificate during the SMTPS handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
ssl-other-client-certificate:
type: str
description: 'Action based on received client certificate during an SSL protocol handshake.'
choices:
- 'bypass'
- 'inspect'
- 'block'
untrusted-caname:
type: str
description: 'Untrusted CA certificate used by SSL Inspection.'
use-ssl-server:
type: str
description: 'Enable/disable the use of SSL server table for SSL offloading.'
choices:
- 'disable'
- 'enable'
whitelist:
type: str
description: 'Enable/disable exempting servers by FortiGuard whitelist.'
choices:
- 'disable'
- 'enable'
block-blacklisted-certificates:
type: str
description: 'Enable/disable blocking SSL-based botnet communication by FortiGuard certificate blacklist.'
choices:
- 'disable'
- 'enable'
ssl-negotiation-log:
type: str
description: 'Enable/disable logging SSL negotiation.'
choices:
- 'disable'
- 'enable'
ftps:
description: no description
type: dict
required: false
suboptions:
cert-validation-failure:
type: str
description: 'Action based on certificate validation failure.'
choices:
- 'allow'
- 'block'
- 'ignore'
cert-validation-timeout:
type: str
description: 'Action based on certificate validation timeout.'
choices:
- 'allow'
- 'block'
- 'ignore'
client-certificate:
type: str
description: 'Action based on received client certificate.'
choices:
- 'bypass'
- 'inspect'
- 'block'
expired-server-cert:
type: str
description: 'Action based on server certificate is expired.'
choices:
- 'allow'
- 'block'
- 'ignore'
ports:
description: 'Ports to use for scanning (1 - 65535, default = 443).'
type: int
revoked-server-cert:
type: str
description: 'Action based on server certificate is revoked.'
choices:
- 'allow'
- 'block'
- 'ignore'
sni-server-cert-check:
type: str
description: 'Check the SNI in the client hello message with the CN or SAN fields in the returned server certificate.'
choices:
- 'disable'
- 'enable'
- 'strict'
status:
type: str
description: 'Configure protocol inspection status.'
choices:
- 'disable'
- 'deep-inspection'
unsupported-ssl-cipher:
type: str
description: 'Action based on the SSL cipher used being unsupported.'
choices:
- 'allow'
- 'block'
unsupported-ssl-negotiation:
type: str
description: 'Action based on the SSL negotiation used being unsupported.'
choices:
- 'allow'
- 'block'
untrusted-server-cert:
type: str
description: 'Action based on server certificate is not issued by a trusted CA.'
choices:
- 'allow'
- 'block'
- 'ignore'
https:
description: no description
type: dict
required: false
suboptions:
cert-validation-failure:
type: str
description: 'Action based on certificate validation failure.'
choices:
- 'allow'
- 'block'
- 'ignore'
cert-validation-timeout:
type: str
description: 'Action based on certificate validation timeout.'
choices:
- 'allow'
- 'block'
- 'ignore'
client-certificate:
type: str
description: 'Action based on received client certificate.'
choices:
- 'bypass'
- 'inspect'
- 'block'
expired-server-cert:
type: str
description: 'Action based on server certificate is expired.'
choices:
- 'allow'
- 'block'
- 'ignore'
ports:
description: 'Ports to use for scanning (1 - 65535, default = 443).'
type: int
proxy-after-tcp-handshake:
type: str
description: 'Proxy traffic after the TCP 3-way handshake has been established (not before).'
choices:
- 'disable'
- 'enable'
revoked-server-cert:
type: str
description: 'Action based on server certificate is revoked.'
choices:
- 'allow'
- 'block'
- 'ignore'
sni-server-cert-check:
type: str
description: 'Check the SNI in the client hello message with the CN or SAN fields in the returned server certificate.'
choices:
- 'disable'
- 'enable'
- 'strict'
status:
type: str
description: 'Configure protocol inspection status.'
choices:
- 'disable'
- 'certificate-inspection'
- 'deep-inspection'
unsupported-ssl-cipher:
type: str
description: 'Action based on the SSL cipher used being unsupported.'
choices:
- 'allow'
- 'block'
unsupported-ssl-negotiation:
type: str
description: 'Action based on the SSL negotiation used being unsupported.'
choices:
- 'allow'
- 'block'
untrusted-server-cert:
type: str
description: 'Action based on server certificate is not issued by a trusted CA.'
choices:
- 'allow'
- 'block'
- 'ignore'
cert-probe-failure:
type: str
description: 'Action based on certificate probe failure.'
choices:
- 'block'
- 'allow'
imaps:
description: no description
type: dict
required: false
suboptions:
cert-validation-failure:
type: str
description: 'Action based on certificate validation failure.'
choices:
- 'allow'
- 'block'
- 'ignore'
cert-validation-timeout:
type: str
description: 'Action based on certificate validation timeout.'
choices:
- 'allow'
- 'block'
- 'ignore'
client-certificate:
type: str
description: 'Action based on received client certificate.'
choices:
- 'bypass'
- 'inspect'
- 'block'
expired-server-cert:
type: str
description: 'Action based on server certificate is expired.'
choices:
- 'allow'
- 'block'
- 'ignore'
ports:
description: 'Ports to use for scanning (1 - 65535, default = 443).'
type: int
proxy-after-tcp-handshake:
type: str
description: 'Proxy traffic after the TCP 3-way handshake has been established (not before).'
choices:
- 'disable'
- 'enable'
revoked-server-cert:
type: str
description: 'Action based on server certificate is revoked.'
choices:
- 'allow'
- 'block'
- 'ignore'
sni-server-cert-check:
type: str
description: 'Check the SNI in the client hello message with the CN or SAN fields in the returned server certificate.'
choices:
- 'disable'
- 'enable'
- 'strict'
status:
type: str
description: 'Configure protocol inspection status.'
choices:
- 'disable'
- 'deep-inspection'
unsupported-ssl-cipher:
type: str
description: 'Action based on the SSL cipher used being unsupported.'
choices:
- 'allow'
- 'block'
unsupported-ssl-negotiation:
type: str
description: 'Action based on the SSL negotiation used being unsupported.'
choices:
- 'allow'
- 'block'
untrusted-server-cert:
type: str
description: 'Action based on server certificate is not issued by a trusted CA.'
choices:
- 'allow'
- 'block'
- 'ignore'
pop3s:
description: no description
type: dict
required: false
suboptions:
cert-validation-failure:
type: str
description: 'Action based on certificate validation failure.'
choices:
- 'allow'
- 'block'
- 'ignore'
cert-validation-timeout:
type: str
description: 'Action based on certificate validation timeout.'
choices:
- 'allow'
- 'block'
- 'ignore'
client-certificate:
type: str
description: 'Action based on received client certificate.'
choices:
- 'bypass'
- 'inspect'
- 'block'
expired-server-cert:
type: str
description: 'Action based on server certificate is expired.'
choices:
- 'allow'
- 'block'
- 'ignore'
ports:
description: 'Ports to use for scanning (1 - 65535, default = 443).'
type: int
proxy-after-tcp-handshake:
type: str
description: 'Proxy traffic after the TCP 3-way handshake has been established (not before).'
choices:
- 'disable'
- 'enable'
revoked-server-cert:
type: str
description: 'Action based on server certificate is revoked.'
choices:
- 'allow'
- 'block'
- 'ignore'
sni-server-cert-check:
type: str
description: 'Check the SNI in the client hello message with the CN or SAN fields in the returned server certificate.'
choices:
- 'disable'
- 'enable'
- 'strict'
status:
type: str
description: 'Configure protocol inspection status.'
choices:
- 'disable'
- 'deep-inspection'
unsupported-ssl-cipher:
type: str
description: 'Action based on the SSL cipher used being unsupported.'
choices:
- 'allow'
- 'block'
unsupported-ssl-negotiation:
type: str
description: 'Action based on the SSL negotiation used being unsupported.'
choices:
- 'allow'
- 'block'
untrusted-server-cert:
type: str
description: 'Action based on server certificate is not issued by a trusted CA.'
choices:
- 'allow'
- 'block'
- 'ignore'
smtps:
description: no description
type: dict
required: false
suboptions:
cert-validation-failure:
type: str
description: 'Action based on certificate validation failure.'
choices:
- 'allow'
- 'block'
- 'ignore'
cert-validation-timeout:
type: str
description: 'Action based on certificate validation timeout.'
choices:
- 'allow'
- 'block'
- 'ignore'
client-certificate:
type: str
description: 'Action based on received client certificate.'
choices:
- 'bypass'
- 'inspect'
- 'block'
expired-server-cert:
type: str
description: 'Action based on server certificate is expired.'
choices:
- 'allow'
- 'block'
- 'ignore'
ports:
description: 'Ports to use for scanning (1 - 65535, default = 443).'
type: int
proxy-after-tcp-handshake:
type: str
description: 'Proxy traffic after the TCP 3-way handshake has been established (not before).'
choices:
- 'disable'
- 'enable'
revoked-server-cert:
type: str
description: 'Action based on server certificate is revoked.'
choices:
- 'allow'
- 'block'
- 'ignore'
sni-server-cert-check:
type: str
description: 'Check the SNI in the client hello message with the CN or SAN fields in the returned server certificate.'
choices:
- 'disable'
- 'enable'
- 'strict'
status:
type: str
description: 'Configure protocol inspection status.'
choices:
- 'disable'
- 'deep-inspection'
unsupported-ssl-cipher:
type: str
description: 'Action based on the SSL cipher used being unsupported.'
choices:
- 'allow'
- 'block'
unsupported-ssl-negotiation:
type: str
description: 'Action based on the SSL negotiation used being unsupported.'
choices:
- 'allow'
- 'block'
untrusted-server-cert:
type: str
description: 'Action based on server certificate is not issued by a trusted CA.'
choices:
- 'allow'
- 'block'
- 'ignore'
ssh:
description: no description
type: dict
required: false
suboptions:
inspect-all:
type: str
description: 'Level of SSL inspection.'
choices:
- 'disable'
- 'deep-inspection'
ports:
description: 'Ports to use for scanning (1 - 65535, default = 443).'
type: int
proxy-after-tcp-handshake:
type: str
description: 'Proxy traffic after the TCP 3-way handshake has been established (not before).'
choices:
- 'disable'
- 'enable'
ssh-algorithm:
type: str
description: 'Relative strength of encryption algorithms accepted during negotiation.'
choices:
- 'compatible'
- 'high-encryption'
ssh-tun-policy-check:
type: str
description: 'Enable/disable SSH tunnel policy check.'
choices:
- 'disable'
- 'enable'
status:
type: str
description: 'Configure protocol inspection status.'
choices:
- 'disable'
- 'deep-inspection'
unsupported-version:
type: str
description: 'Action based on SSH version being unsupported.'
choices:
- 'block'
- 'bypass'
ssl:
description: no description
type: dict
required: false
suboptions:
cert-validation-failure:
type: str
description: 'Action based on certificate validation failure.'
choices:
- 'allow'
- 'block'
- 'ignore'
cert-validation-timeout:
type: str
description: 'Action based on certificate validation timeout.'
choices:
- 'allow'
- 'block'
- 'ignore'
client-certificate:
type: str
description: 'Action based on received client certificate.'
choices:
- 'bypass'
- 'inspect'
- 'block'
expired-server-cert:
type: str
description: 'Action based on server certificate is expired.'
choices:
- 'allow'
- 'block'
- 'ignore'
inspect-all:
type: str
description: 'Level of SSL inspection.'
choices:
- 'disable'
- 'certificate-inspection'
- 'deep-inspection'
revoked-server-cert:
type: str
description: 'Action based on server certificate is revoked.'
choices:
- 'allow'
- 'block'
- 'ignore'
sni-server-cert-check:
type: str
description: 'Check the SNI in the client hello message with the CN or SAN fields in the returned server certificate.'
choices:
- 'disable'
- 'enable'
- 'strict'
unsupported-ssl-cipher:
type: str
description: 'Action based on the SSL cipher used being unsupported.'
choices:
- 'allow'
- 'block'
unsupported-ssl-negotiation:
type: str
description: 'Action based on the SSL negotiation used being unsupported.'
choices:
- 'allow'
- 'block'
untrusted-server-cert:
type: str
description: 'Action based on server certificate is not issued by a trusted CA.'
choices:
- 'allow'
- 'block'
- 'ignore'
allowlist:
type: str
description: 'Enable/disable exempting servers by FortiGuard allowlist.'
choices:
- 'disable'
- 'enable'
block-blocklisted-certificates:
type: str
description: 'Enable/disable blocking SSL-based botnet communication by FortiGuard certificate blocklist.'
choices:
- 'disable'
- 'enable'
dot:
description: no description
type: dict
required: false
suboptions:
cert-validation-failure:
type: str
description: 'Action based on certificate validation failure.'
choices:
- 'allow'
- 'block'
- 'ignore'
cert-validation-timeout:
type: str
description: 'Action based on certificate validation timeout.'
choices:
- 'allow'
- 'block'
- 'ignore'
client-certificate:
type: str
description: 'Action based on received client certificate.'
choices:
- 'bypass'
- 'inspect'
- 'block'
expired-server-cert:
type: str
description: 'Action based on server certificate is expired.'
choices:
- 'allow'
- 'block'
- 'ignore'
proxy-after-tcp-handshake:
type: str
description: 'Proxy traffic after the TCP 3-way handshake has been established (not before).'
choices:
- 'disable'
- 'enable'
revoked-server-cert:
type: str
description: 'Action based on server certificate is revoked.'
choices:
- 'allow'
- 'block'
- 'ignore'
sni-server-cert-check:
type: str
description: 'Check the SNI in the client hello message with the CN or SAN fields in the returned server certificate.'
choices:
- 'enable'
- 'strict'
- 'disable'
status:
type: str
description: 'Configure protocol inspection status.'
choices:
- 'disable'
- 'deep-inspection'
unsupported-ssl-cipher:
type: str
description: 'Action based on the SSL cipher used being unsupported.'
choices:
- 'block'
- 'allow'
unsupported-ssl-negotiation:
type: str
description: 'Action based on the SSL negotiation used being unsupported.'
choices:
- 'block'
- 'allow'
untrusted-server-cert:
type: str
description: 'Action based on server certificate is not issued by a trusted CA.'
choices:
- 'allow'
- 'block'
- 'ignore'
supported-alpn:
type: str
description: 'Configure ALPN option.'
choices:
- 'none'
- 'http1-1'
- 'http2'
- 'all'
'''
EXAMPLES = '''
- hosts: fortimanager00
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure SSL/SSH protocol options.
fmgr_firewall_sslsshprofile:
bypass_validation: False
adom: ansible
state: present
firewall_sslsshprofile:
comment: 'ansible-comment1'
mapi-over-https: disable #<value in [disable, enable]>
name: 'ansible-test'
use-ssl-server: disable #<value in [disable, enable]>
whitelist: enable #<value in [disable, enable]>
- name: gathering fortimanager facts
hosts: fortimanager00
gather_facts: no
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: retrieve all the SSL/SSH protocol options
fmgr_fact:
facts:
selector: 'firewall_sslsshprofile'
params:
adom: 'ansible'
ssl-ssh-profile: ''
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/ssl-ssh-profile',
'/pm/config/global/obj/firewall/ssl-ssh-profile'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/ssl-ssh-profile/{ssl-ssh-profile}',
'/pm/config/global/obj/firewall/ssl-ssh-profile/{ssl-ssh-profile}'
]
url_params = ['adom']
module_primary_key = 'name'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'firewall_sslsshprofile': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'caname': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'comment': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'mapi-over-https': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'name': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'rpc-over-https': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'server-cert': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'server-cert-mode': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
're-sign',
'replace'
],
'type': 'str'
},
'ssl-anomalies-log': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-exempt': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'list',
'options': {
'address': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'address6': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'fortiguard-category': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'id': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'regex': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'type': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'fortiguard-category',
'address',
'address6',
'wildcard-fqdn',
'regex'
],
'type': 'str'
},
'wildcard-fqdn': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
}
}
},
'ssl-exemptions-log': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-server': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'list',
'options': {
'ftps-client-cert-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'https-client-cert-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'id': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'imaps-client-cert-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'pop3s-client-cert-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'smtps-client-cert-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'ssl-other-client-cert-request': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'ftps-client-certificate': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'https-client-certificate': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'imaps-client-certificate': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'pop3s-client-certificate': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'smtps-client-certificate': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'ssl-other-client-certificate': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
}
}
},
'untrusted-caname': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'use-ssl-server': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'whitelist': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-blacklisted-certificates': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-negotiation-log': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ftps': {
'required': False,
'type': 'dict',
'options': {
'cert-validation-failure': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'cert-validation-timeout': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'client-certificate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'expired-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'ports': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'revoked-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'sni-server-cert-check': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable',
'strict'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'deep-inspection'
],
'type': 'str'
},
'unsupported-ssl-cipher': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'unsupported-ssl-negotiation': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'untrusted-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
}
}
},
'https': {
'required': False,
'type': 'dict',
'options': {
'cert-validation-failure': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'cert-validation-timeout': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'client-certificate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'expired-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'ports': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'proxy-after-tcp-handshake': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'revoked-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'sni-server-cert-check': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable',
'strict'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'certificate-inspection',
'deep-inspection'
],
'type': 'str'
},
'unsupported-ssl-cipher': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'unsupported-ssl-negotiation': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'untrusted-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'cert-probe-failure': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'block',
'allow'
],
'type': 'str'
}
}
},
'imaps': {
'required': False,
'type': 'dict',
'options': {
'cert-validation-failure': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'cert-validation-timeout': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'client-certificate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'expired-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'ports': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'proxy-after-tcp-handshake': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'revoked-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'sni-server-cert-check': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable',
'strict'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'deep-inspection'
],
'type': 'str'
},
'unsupported-ssl-cipher': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'unsupported-ssl-negotiation': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'untrusted-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
}
}
},
'pop3s': {
'required': False,
'type': 'dict',
'options': {
'cert-validation-failure': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'cert-validation-timeout': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'client-certificate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'expired-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'ports': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'proxy-after-tcp-handshake': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'revoked-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'sni-server-cert-check': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable',
'strict'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'deep-inspection'
],
'type': 'str'
},
'unsupported-ssl-cipher': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'unsupported-ssl-negotiation': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'untrusted-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
}
}
},
'smtps': {
'required': False,
'type': 'dict',
'options': {
'cert-validation-failure': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'cert-validation-timeout': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'client-certificate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'expired-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'ports': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'proxy-after-tcp-handshake': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'revoked-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'sni-server-cert-check': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable',
'strict'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'deep-inspection'
],
'type': 'str'
},
'unsupported-ssl-cipher': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'unsupported-ssl-negotiation': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'untrusted-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
}
}
},
'ssh': {
'required': False,
'type': 'dict',
'options': {
'inspect-all': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'deep-inspection'
],
'type': 'str'
},
'ports': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'proxy-after-tcp-handshake': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssh-algorithm': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'compatible',
'high-encryption'
],
'type': 'str'
},
'ssh-tun-policy-check': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'deep-inspection'
],
'type': 'str'
},
'unsupported-version': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'block',
'bypass'
],
'type': 'str'
}
}
},
'ssl': {
'required': False,
'type': 'dict',
'options': {
'cert-validation-failure': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'cert-validation-timeout': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'client-certificate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'expired-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'inspect-all': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'certificate-inspection',
'deep-inspection'
],
'type': 'str'
},
'revoked-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'sni-server-cert-check': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable',
'strict'
],
'type': 'str'
},
'unsupported-ssl-cipher': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'unsupported-ssl-negotiation': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'untrusted-server-cert': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
}
}
},
'allowlist': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-blocklisted-certificates': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dot': {
'required': False,
'type': 'dict',
'options': {
'cert-validation-failure': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'cert-validation-timeout': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'client-certificate': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'bypass',
'inspect',
'block'
],
'type': 'str'
},
'expired-server-cert': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'proxy-after-tcp-handshake': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'revoked-server-cert': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
},
'sni-server-cert-check': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'enable',
'strict',
'disable'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'disable',
'deep-inspection'
],
'type': 'str'
},
'unsupported-ssl-cipher': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'block',
'allow'
],
'type': 'str'
},
'unsupported-ssl-negotiation': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'block',
'allow'
],
'type': 'str'
},
'untrusted-server-cert': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'allow',
'block',
'ignore'
],
'type': 'str'
}
}
},
'supported-alpn': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'http1-1',
'http2',
'all'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_sslsshprofile'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 40.356111
| 153
| 0.261899
| 6,388
| 114,571
| 4.674233
| 0.063557
| 0.057905
| 0.030343
| 0.028367
| 0.805419
| 0.788406
| 0.771359
| 0.765598
| 0.745002
| 0.732945
| 0
| 0.036351
| 0.653045
| 114,571
| 2,838
| 154
| 40.370331
| 0.7148
| 0.005778
| 0
| 0.78574
| 0
| 0.004278
| 0.454212
| 0.034787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000357
| false
| 0.016043
| 0.002139
| 0
| 0.002496
| 0.000357
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5d9a53df4da0faf25ca2ce9e7527cbc531c5d5c4
| 251
|
py
|
Python
|
src/spaceone/inventory/error/__init__.py
|
xellos00/inventory
|
e2831f2f09b5b72623f735a186264987d41954ab
|
[
"Apache-2.0"
] | 9
|
2020-06-04T23:01:38.000Z
|
2021-06-03T03:38:59.000Z
|
src/spaceone/inventory/error/__init__.py
|
xellos00/inventory
|
e2831f2f09b5b72623f735a186264987d41954ab
|
[
"Apache-2.0"
] | 10
|
2020-08-20T01:34:30.000Z
|
2022-03-14T04:59:48.000Z
|
src/spaceone/inventory/error/__init__.py
|
xellos00/inventory
|
e2831f2f09b5b72623f735a186264987d41954ab
|
[
"Apache-2.0"
] | 9
|
2020-06-08T22:03:02.000Z
|
2021-12-06T06:12:30.000Z
|
from spaceone.inventory.error.region import *
from spaceone.inventory.error.server import *
from spaceone.inventory.error.collector import *
from spaceone.inventory.error.collect_data import *
from spaceone.inventory.error.cloud_service_type import *
| 41.833333
| 57
| 0.840637
| 33
| 251
| 6.30303
| 0.393939
| 0.288462
| 0.504808
| 0.625
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079681
| 251
| 5
| 58
| 50.2
| 0.900433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5dd3c50abe6cff53e198eba616472682d838aa99
| 8,166
|
py
|
Python
|
NiaPy/benchmarks/schwefel.py
|
tuahk/NiaPy
|
c863d801fda8e1949a3ca716a4de7c7ca3d0ea16
|
[
"MIT"
] | null | null | null |
NiaPy/benchmarks/schwefel.py
|
tuahk/NiaPy
|
c863d801fda8e1949a3ca716a4de7c7ca3d0ea16
|
[
"MIT"
] | null | null | null |
NiaPy/benchmarks/schwefel.py
|
tuahk/NiaPy
|
c863d801fda8e1949a3ca716a4de7c7ca3d0ea16
|
[
"MIT"
] | null | null | null |
# encoding=utf8
# pylint: disable=anomalous-backslash-in-string, mixed-indentation, multiple-statements, line-too-long, no-else-return, old-style-class
"""Implementations of Schwefels functions."""
from math import sin, fmod, fabs, sqrt
__all__ = ['Schwefel', 'Schwefel221', 'Schwefel222', 'ModifiedSchwefel', 'ExpandedScaffer']
class Schwefel:
r"""Implementation of Schewel function.
Date: 2018
Author: Lucija Brezočnik
License: MIT
Function: **Schwefel function**
:math:`f(\textbf{x}) = 418.9829d - \sum_{i=1}^{D} x_i \sin(\sqrt{|x_i|})`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-500, 500]`, for all :math:`i = 1, 2,..., D`.
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (420.968746,...,420.968746)`
LaTeX formats:
Inline:
$f(\textbf{x}) = 418.9829d - \sum_{i=1}^{D} x_i \sin(\sqrt{|x_i|})$
Equation:
\begin{equation} f(\textbf{x}) = 418.9829d - \sum_{i=1}^{D} x_i
\sin(\sqrt{|x_i|}) \end{equation}
Domain:
$-500 \leq x_i \leq 500$
Reference: https://www.sfu.ca/~ssurjano/schwef.html
"""
def __init__(self, Lower=-500.0, Upper=500.0):
self.Lower = Lower
self.Upper = Upper
@classmethod
def function(cls):
def evaluate(D, sol):
val = 0.0
for i in range(D):
val += (sol[i] * sin(sqrt(abs(sol[i]))))
return 418.9829 * D - val
return evaluate
class Schwefel221:
r"""Schwefel 2.21 function implementation.
Date: 2018
Author: Grega Vrbančič
Licence: MIT
Function: **Schwefel 2.21 function**
:math:`f(\mathbf{x})=\max_{i=1,...,D}|x_i|`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-100, 100]`, for all :math:`i = 1, 2,..., D`.
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (0,...,0)`
LaTeX formats:
Inline:
$f(\mathbf{x})=\max_{i=1,...,D}|x_i|$
Equation:
\begin{equation}f(\mathbf{x}) = \max_{i=1,...,D}|x_i| \end{equation}
Domain:
$-100 \leq x_i \leq 100$
Reference paper:
Jamil, M., and Yang, X. S. (2013).
A literature survey of benchmark functions for global optimisation problems.
International Journal of Mathematical Modelling and Numerical Optimisation,
4(2), 150-194.
"""
def __init__(self, Lower=-100.0, Upper=100.0):
self.Lower = Lower
self.Upper = Upper
@classmethod
def function(cls):
def evaluate(D, sol):
maximum = 0.0
for i in range(D):
if abs(sol[i]) > maximum:
maximum = abs(sol[i])
return maximum
return evaluate
class Schwefel222:
r"""Schwefel 2.22 function implementation.
Date: 2018
Author: Grega Vrbančič
Licence: MIT
Function: **Schwefel 2.22 function**
:math:`f(\mathbf{x})=\sum_{i=1}^{D}|x_i|+\prod_{i=1}^{D}|x_i|`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-100, 100]`, for all :math:`i = 1, 2,..., D`.
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (0,...,0)`
LaTeX formats:
Inline:
$f(\mathbf{x})=\sum_{i=1}^{D}|x_i|+\prod_{i=1}^{D}|x_i|$
Equation:
\begin{equation}f(\mathbf{x}) = \sum_{i=1}^{D}|x_i| +
\prod_{i=1}^{D}|x_i| \end{equation}
Domain:
$-100 \leq x_i \leq 100$
Reference paper:
Jamil, M., and Yang, X. S. (2013).
A literature survey of benchmark functions for global optimisation problems.
International Journal of Mathematical Modelling and Numerical Optimisation,
4(2), 150-194.
"""
def __init__(self, Lower=-100.0, Upper=100.0):
self.Lower = Lower
self.Upper = Upper
@classmethod
def function(cls):
def evaluate(D, sol):
part1 = 0.0
part2 = 1.0
for i in range(D):
part1 += abs(sol[i])
part2 *= abs(sol[i])
return part1 + part2
return evaluate
class ModifiedSchwefel:
r"""Implementations of Modified Schwefel functions.
Date: 2018
Author: Klemen Berkovič
License: MIT
Function:
**Modified Schwefel Function**
:math:`f(\textbf{x}) = 418.9829 \cdot D - \sum_{i=1}^D h(x_i) \\ h(x) = g(x + 420.9687462275036) \\ g(z) = \begin{cases} z \sin \left( | z |^{\frac{1}{2}} \right) &\quad | z | \leq 500 \\ \left( 500 - \mod (z, 500) \right) \sin \left( \sqrt{| 500 - \mod (z, 500) |} \right) - \frac{ \left( z - 500 \right)^2 }{ 10000 D } &\quad z > 500 \\ \left( \mod (| z |, 500) - 500 \right) \sin \left( \sqrt{| \mod (|z|, 500) - 500 |} \right) + \frac{ \left( z - 500 \right)^2 }{ 10000 D } &\quad z < -500\end{cases}`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-100, 100]`, for all :math:`i = 1, 2,..., D`.
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (420.968746,...,420.968746)`
LaTeX formats:
Inline:
$f(\textbf{x}) = 418.9829 \cdot D - \sum_{i=1}^D h(x_i) \\ h(x) = g(x + 420.9687462275036) \\ g(z) = \begin{cases} z \sin \left( | z |^{\frac{1}{2}} \right) &\quad | z | \leq 500 \\ \left( 500 - \mod (z, 500) \right) \sin \left( \sqrt{| 500 - \mod (z, 500) |} \right) - \frac{ \left( z - 500 \right)^2 }{ 10000 D } &\quad z > 500 \\ \left( \mod (| z |, 500) - 500 \right) \sin \left( \sqrt{| \mod (|z|, 500) - 500 |} \right) + \frac{ \left( z - 500 \right)^2 }{ 10000 D } &\quad z < -500\end{cases}$
Equation:
\begin{equation} f(\textbf{x}) = 418.9829 \cdot D - \sum_{i=1}^D h(x_i) \\ h(x) = g(x + 420.9687462275036) \\ g(z) = \begin{cases} z \sin \left( | z |^{\frac{1}{2}} \right) &\quad | z | \leq 500 \\ \left( 500 - \mod (z, 500) \right) \sin \left( \sqrt{| 500 - \mod (z, 500) |} \right) - \frac{ \left( z - 500 \right)^2 }{ 10000 D } &\quad z > 500 \\ \left( \mod (| z |, 500) - 500 \right) \sin \left( \sqrt{| \mod (|z|, 500) - 500 |} \right) + \frac{ \left( z - 500 \right)^2 }{ 10000 D } &\quad z < -500\end{cases} \end{equation}
Domain:
$-100 \leq x_i \leq 100$
Reference:
http://www5.zzu.edu.cn/__local/A/69/BC/D3B5DFE94CD2574B38AD7CD1D12_C802DAFE_BC0C0.pdf
"""
def __init__(self, Lower=-100.0, Upper=100.0): self.Lower, self.Upper = Lower, Upper
@classmethod
def function(cls):
def g(z, D):
if z > 500: return (500 - fmod(z, 500)) * sin(sqrt(fabs(500 - fmod(z, 500)))) - (z - 500) ** 2 / (10000 * D)
elif z < -500: return (fmod(z, 500) - 500) * sin(sqrt(fabs(fmod(z, 500) - 500))) + (z - 500) ** 2 / (10000 * D)
return z * sin(fabs(z) ** (1 / 2))
def h(x, D): return g(x + 420.9687462275036, D)
def f(D, sol):
val = 0.0
for i in range(D): val += h(sol[i], D)
return 418.9829 * D - val
return f
class ExpandedScaffer:
r"""Implementations of High Conditioned Elliptic functions.
Date: 2018
Author: Klemen Berkovič
License: MIT
Function:
**High Conditioned Elliptic Function**
:math:`f(\textbf{x}) = g(x_D, x_1) + \sum_{i=2}^D g(x_{i - 1}, x_i) \\ g(x, y) = 0.5 + \frac{\sin \left(\sqrt{x^2 + y^2} \right)^2 - 0.5}{\left( 1 + 0.001 (x^2 + y^2) \right)}^2`
**Input domain:**
The function can be defined on any input domain but it is usually
evaluated on the hypercube :math:`x_i ∈ [-100, 100]`, for all :math:`i = 1, 2,..., D`.
**Global minimum:** :math:`f(x^*) = 0`, at :math:`x^* = (420.968746,...,420.968746)`
LaTeX formats:
Inline:
$f(\textbf{x}) = g(x_D, x_1) + \sum_{i=2}^D g(x_{i - 1}, x_i) \\ g(x, y) = 0.5 + \frac{\sin \left(\sqrt{x^2 + y^2} \right)^2 - 0.5}{\left( 1 + 0.001 (x^2 + y^2) \right)}^2$
Equation:
\begin{equation} f(\textbf{x}) = g(x_D, x_1) + \sum_{i=2}^D g(x_{i - 1}, x_i) \\ g(x, y) = 0.5 + \frac{\sin \left(\sqrt{x^2 + y^2} \right)^2 - 0.5}{\left( 1 + 0.001 (x^2 + y^2) \right)}^2 \end{equation}
Domain:
$-100 \leq x_i \leq 100$
Reference:
http://www5.zzu.edu.cn/__local/A/69/BC/D3B5DFE94CD2574B38AD7CD1D12_C802DAFE_BC0C0.pdf
"""
def __init__(self, Lower=-100.0, Upper=100.0): self.Lower, self.Upper = Lower, Upper
@classmethod
def function(cls):
def g(x, y): return 0.5 + (sin(sqrt(x ** 2 + y ** 2)) ** 2 - 0.5) / (1 + 0.001 * (x ** 2 + y ** 2)) ** 2
def f(D, x):
val = 0.0
for i in range(1, D): val += g(x[i - 1], x[i])
return g(x[D - 1], x[0]) + val
return f
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 32.795181
| 535
| 0.598335
| 1,372
| 8,166
| 3.499271
| 0.134111
| 0.014997
| 0.009373
| 0.009998
| 0.784836
| 0.771714
| 0.747969
| 0.722766
| 0.722766
| 0.696313
| 0
| 0.109415
| 0.189689
| 8,166
| 248
| 536
| 32.927419
| 0.615385
| 0.812638
| 0
| 0.514286
| 0
| 0
| 0.030123
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.257143
| false
| 0
| 0.014286
| 0.028571
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5de78581e3046f064980ec8ff42c6377d35d5a42
| 36,783
|
py
|
Python
|
opensilexClientToolsPython/api/vue_js___ontology_extension_api.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | null | null | null |
opensilexClientToolsPython/api/vue_js___ontology_extension_api.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | 7
|
2021-05-25T14:06:04.000Z
|
2021-11-05T15:42:14.000Z
|
opensilexClientToolsPython/api/vue_js___ontology_extension_api.py
|
OpenSILEX/opensilexClientToolsPython
|
41b1e7e707670ecf1b2c06d79bdd9749945788cb
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
OpenSilex API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: INSTANCE-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from opensilexClientToolsPython.api_client import ApiClient
class VueJsOntologyExtensionApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_rdf_type(self, **kwargs): # noqa: E501
"""Create a custom class # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_rdf_type(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param VueRDFTypeDTO body: Class description
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_rdf_type_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_rdf_type_with_http_info(**kwargs) # noqa: E501
return data
def create_rdf_type_with_http_info(self, **kwargs): # noqa: E501
"""Create a custom class # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_rdf_type_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param VueRDFTypeDTO body: Class description
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_rdf_type" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/rdf_type', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectUriResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_rdf_type(self, **kwargs): # noqa: E501
"""Delete a RDF type # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_rdf_type(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param str rdf_type: RDF type
:param str accept_language: Request accepted language
:return: RDFPropertyDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_rdf_type_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_rdf_type_with_http_info(**kwargs) # noqa: E501
return data
def delete_rdf_type_with_http_info(self, **kwargs): # noqa: E501
"""Delete a RDF type # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_rdf_type_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param str rdf_type: RDF type
:param str accept_language: Request accepted language
:return: RDFPropertyDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rdf_type', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_rdf_type" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'rdf_type' in params:
query_params.append(('rdf_type', params['rdf_type'])) # noqa: E501
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/rdf_type', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RDFPropertyDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_data_types1(self, **kwargs): # noqa: E501
"""Return literal datatypes definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_data_types1(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[VueDataTypeDTO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_data_types1_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_data_types1_with_http_info(**kwargs) # noqa: E501
return data
def get_data_types1_with_http_info(self, **kwargs): # noqa: E501
"""Return literal datatypes definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_data_types1_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[VueDataTypeDTO]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_data_types1" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/data_types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[VueDataTypeDTO]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_object_types(self, **kwargs): # noqa: E501
"""Return object types definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_object_types(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[VueObjectTypeDTO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_object_types_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_object_types_with_http_info(**kwargs) # noqa: E501
return data
def get_object_types_with_http_info(self, **kwargs): # noqa: E501
"""Return object types definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_object_types_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[VueObjectTypeDTO]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_object_types" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/object_types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[VueObjectTypeDTO]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rdf_type1(self, rdf_type, **kwargs): # noqa: E501
"""Return rdf type model definition with properties # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rdf_type1(rdf_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rdf_type: RDF type URI (required)
:param str authorization: Authentication token (required)
:param str parent_type: Parent RDF class URI
:param str accept_language: Request accepted language
:return: VueRDFTypeDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_rdf_type1_with_http_info(rdf_type, **kwargs) # noqa: E501
else:
(data) = self.get_rdf_type1_with_http_info(rdf_type, **kwargs) # noqa: E501
return data
def get_rdf_type1_with_http_info(self, rdf_type, **kwargs): # noqa: E501
"""Return rdf type model definition with properties # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rdf_type1_with_http_info(rdf_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rdf_type: RDF type URI (required)
:param str authorization: Authentication token (required)
:param str parent_type: Parent RDF class URI
:param str accept_language: Request accepted language
:return: VueRDFTypeDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rdf_type', 'parent_type', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rdf_type1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rdf_type' is set
if ('rdf_type' not in params or
params['rdf_type'] is None):
raise ValueError("Missing the required parameter `rdf_type` when calling `get_rdf_type1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'rdf_type' in params:
query_params.append(('rdf_type', params['rdf_type'])) # noqa: E501
if 'parent_type' in params:
query_params.append(('parentType', params['parent_type'])) # noqa: E501
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/rdf_type', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VueRDFTypeDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rdf_type_properties(self, rdf_type, parent_type, **kwargs): # noqa: E501
"""Return class model properties definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rdf_type_properties(rdf_type, parent_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rdf_type: RDF class URI (required)
:param str parent_type: Parent RDF class URI (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: VueRDFTypeDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_rdf_type_properties_with_http_info(rdf_type, parent_type, **kwargs) # noqa: E501
else:
(data) = self.get_rdf_type_properties_with_http_info(rdf_type, parent_type, **kwargs) # noqa: E501
return data
def get_rdf_type_properties_with_http_info(self, rdf_type, parent_type, **kwargs): # noqa: E501
"""Return class model properties definitions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rdf_type_properties_with_http_info(rdf_type, parent_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rdf_type: RDF class URI (required)
:param str parent_type: Parent RDF class URI (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: VueRDFTypeDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rdf_type', 'parent_type', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rdf_type_properties" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rdf_type' is set
if ('rdf_type' not in params or
params['rdf_type'] is None):
raise ValueError("Missing the required parameter `rdf_type` when calling `get_rdf_type_properties`") # noqa: E501
# verify the required parameter 'parent_type' is set
if ('parent_type' not in params or
params['parent_type'] is None):
raise ValueError("Missing the required parameter `parent_type` when calling `get_rdf_type_properties`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'rdf_type' in params:
query_params.append(('rdf_type', params['rdf_type'])) # noqa: E501
if 'parent_type' in params:
query_params.append(('parent_type', params['parent_type'])) # noqa: E501
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/rdf_type_properties', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VueRDFTypeDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_rdf_types_parameters(self, **kwargs): # noqa: E501
"""Return RDF types parameters for Vue.js application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rdf_types_parameters(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[VueRDFTypeParameterDTO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_rdf_types_parameters_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_rdf_types_parameters_with_http_info(**kwargs) # noqa: E501
return data
def get_rdf_types_parameters_with_http_info(self, **kwargs): # noqa: E501
"""Return RDF types parameters for Vue.js application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_rdf_types_parameters_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[VueRDFTypeParameterDTO]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rdf_types_parameters" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/rdf_types_parameters', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[VueRDFTypeParameterDTO]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_rdf_type_properties_order(self, rdf_type, **kwargs): # noqa: E501
"""Define properties order # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_rdf_type_properties_order(rdf_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rdf_type: RDF type (required)
:param str authorization: Authentication token (required)
:param list[str] body: Array of properties
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_rdf_type_properties_order_with_http_info(rdf_type, **kwargs) # noqa: E501
else:
(data) = self.set_rdf_type_properties_order_with_http_info(rdf_type, **kwargs) # noqa: E501
return data
def set_rdf_type_properties_order_with_http_info(self, rdf_type, **kwargs): # noqa: E501
"""Define properties order # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_rdf_type_properties_order_with_http_info(rdf_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str rdf_type: RDF type (required)
:param str authorization: Authentication token (required)
:param list[str] body: Array of properties
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rdf_type', 'body', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_rdf_type_properties_order" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rdf_type' is set
if ('rdf_type' not in params or
params['rdf_type'] is None):
raise ValueError("Missing the required parameter `rdf_type` when calling `set_rdf_type_properties_order`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'rdf_type' in params:
query_params.append(('rdf_type', params['rdf_type'])) # noqa: E501
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/properties_order', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectUriResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_rdf_type(self, **kwargs): # noqa: E501
"""Update a custom class # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_rdf_type(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param VueRDFTypeDTO body: RDF type definition
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_rdf_type_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.update_rdf_type_with_http_info(**kwargs) # noqa: E501
return data
def update_rdf_type_with_http_info(self, **kwargs): # noqa: E501
"""Update a custom class # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_rdf_type_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authentication token (required)
:param VueRDFTypeDTO body: RDF type definition
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_rdf_type" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/vuejs/owl_extension/rdf_type', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectUriResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.556604
| 132
| 0.610391
| 4,155
| 36,783
| 5.133574
| 0.043803
| 0.055884
| 0.023629
| 0.03038
| 0.963854
| 0.958462
| 0.953165
| 0.945898
| 0.942897
| 0.932443
| 0
| 0.018161
| 0.299432
| 36,783
| 953
| 133
| 38.597062
| 0.809577
| 0.359432
| 0
| 0.820833
| 1
| 0
| 0.173135
| 0.051204
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039583
| false
| 0
| 0.008333
| 0
| 0.10625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5de90129fec425fdf5a6f6a4cfe9bc56c69683d8
| 21,611
|
py
|
Python
|
sdk/python/pulumi_azure/network/virtual_network_gateway_connection.py
|
davidobrien1985/pulumi-azure
|
811beeea473bd798d77354521266a87a2fac5888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/network/virtual_network_gateway_connection.py
|
davidobrien1985/pulumi-azure
|
811beeea473bd798d77354521266a87a2fac5888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/network/virtual_network_gateway_connection.py
|
davidobrien1985/pulumi-azure
|
811beeea473bd798d77354521266a87a2fac5888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class VirtualNetworkGatewayConnection(pulumi.CustomResource):
authorization_key: pulumi.Output[str]
"""
The authorization key associated with the
Express Route Circuit. This field is required only if the type is an
ExpressRoute connection.
"""
connection_protocol: pulumi.Output[str]
"""
The IKE protocol version to use. Possible
values are `IKEv1` and `IKEv2`. Defaults to `IKEv2`.
Changing this value will force a resource to be created.
> **Note**: Only valid for `IPSec` connections on virtual network gateways with SKU `VpnGw1`, `VpnGw2`, `VpnGw3`, `VpnGw1AZ`, `VpnGw2AZ` or `VpnGw3AZ`.
"""
enable_bgp: pulumi.Output[bool]
"""
If `true`, BGP (Border Gateway Protocol) is enabled
for this connection. Defaults to `false`.
"""
express_route_circuit_id: pulumi.Output[str]
"""
The ID of the Express Route Circuit
when creating an ExpressRoute connection (i.e. when `type` is `ExpressRoute`).
The Express Route Circuit can be in the same or in a different subscription.
"""
express_route_gateway_bypass: pulumi.Output[bool]
"""
If `true`, data packets will bypass ExpressRoute Gateway for data forwarding This is only valid for ExpressRoute connections.
"""
ipsec_policy: pulumi.Output[dict]
"""
A `ipsec_policy` block which is documented below.
Only a single policy can be defined for a connection. For details on
custom policies refer to [the relevant section in the Azure documentation](https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-ipsecikepolicy-rm-powershell).
* `dhGroup` (`str`) - The DH group used in IKE phase 1 for initial SA. Valid
options are `DHGroup1`, `DHGroup14`, `DHGroup2`, `DHGroup2048`, `DHGroup24`,
`ECP256`, `ECP384`, or `None`.
* `ikeEncryption` (`str`) - The IKE encryption algorithm. Valid
options are `AES128`, `AES192`, `AES256`, `DES`, or `DES3`.
* `ikeIntegrity` (`str`) - The IKE integrity algorithm. Valid
options are `MD5`, `SHA1`, `SHA256`, or `SHA384`.
* `ipsecEncryption` (`str`) - The IPSec encryption algorithm. Valid
options are `AES128`, `AES192`, `AES256`, `DES`, `DES3`, `GCMAES128`, `GCMAES192`, `GCMAES256`, or `None`.
* `ipsecIntegrity` (`str`) - The IPSec integrity algorithm. Valid
options are `GCMAES128`, `GCMAES192`, `GCMAES256`, `MD5`, `SHA1`, or `SHA256`.
* `pfsGroup` (`str`) - The DH group used in IKE phase 2 for new child SA.
Valid options are `ECP256`, `ECP384`, `PFS1`, `PFS2`, `PFS2048`, `PFS24`,
or `None`.
* `saDatasize` (`float`) - The IPSec SA payload size in KB. Must be at least
`1024` KB. Defaults to `102400000` KB.
* `saLifetime` (`float`) - The IPSec SA lifetime in seconds. Must be at least
`300` seconds. Defaults to `27000` seconds.
"""
local_network_gateway_id: pulumi.Output[str]
"""
The ID of the local network gateway
when creating Site-to-Site connection (i.e. when `type` is `IPsec`).
"""
location: pulumi.Output[str]
"""
The location/region where the connection is
located. Changing this forces a new resource to be created.
"""
name: pulumi.Output[str]
"""
The name of the connection. Changing the name forces a
new resource to be created.
"""
peer_virtual_network_gateway_id: pulumi.Output[str]
"""
The ID of the peer virtual
network gateway when creating a VNet-to-VNet connection (i.e. when `type`
is `Vnet2Vnet`). The peer Virtual Network Gateway can be in the same or
in a different subscription.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to
create the connection Changing the name forces a new resource to be created.
"""
routing_weight: pulumi.Output[float]
"""
The routing weight. Defaults to `10`.
"""
shared_key: pulumi.Output[str]
"""
The shared IPSec key. A key could be provided if a
Site-to-Site, VNet-to-VNet or ExpressRoute connection is created.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
type: pulumi.Output[str]
"""
The type of connection. Valid options are `IPsec`
(Site-to-Site), `ExpressRoute` (ExpressRoute), and `Vnet2Vnet` (VNet-to-VNet).
Each connection type requires different mandatory arguments (refer to the
examples above). Changing the connection type will force a new connection
to be created.
"""
use_policy_based_traffic_selectors: pulumi.Output[bool]
"""
If `true`, policy-based traffic
selectors are enabled for this connection. Enabling policy-based traffic
selectors requires an `ipsec_policy` block. Defaults to `false`.
"""
virtual_network_gateway_id: pulumi.Output[str]
"""
The ID of the Virtual Network Gateway
in which the connection will be created. Changing the gateway forces a new
resource to be created.
"""
def __init__(__self__, resource_name, opts=None, authorization_key=None, connection_protocol=None, enable_bgp=None, express_route_circuit_id=None, express_route_gateway_bypass=None, ipsec_policy=None, local_network_gateway_id=None, location=None, name=None, peer_virtual_network_gateway_id=None, resource_group_name=None, routing_weight=None, shared_key=None, tags=None, type=None, use_policy_based_traffic_selectors=None, virtual_network_gateway_id=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a connection in an existing Virtual Network Gateway.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_key: The authorization key associated with the
Express Route Circuit. This field is required only if the type is an
ExpressRoute connection.
:param pulumi.Input[str] connection_protocol: The IKE protocol version to use. Possible
values are `IKEv1` and `IKEv2`. Defaults to `IKEv2`.
Changing this value will force a resource to be created.
> **Note**: Only valid for `IPSec` connections on virtual network gateways with SKU `VpnGw1`, `VpnGw2`, `VpnGw3`, `VpnGw1AZ`, `VpnGw2AZ` or `VpnGw3AZ`.
:param pulumi.Input[bool] enable_bgp: If `true`, BGP (Border Gateway Protocol) is enabled
for this connection. Defaults to `false`.
:param pulumi.Input[str] express_route_circuit_id: The ID of the Express Route Circuit
when creating an ExpressRoute connection (i.e. when `type` is `ExpressRoute`).
The Express Route Circuit can be in the same or in a different subscription.
:param pulumi.Input[bool] express_route_gateway_bypass: If `true`, data packets will bypass ExpressRoute Gateway for data forwarding This is only valid for ExpressRoute connections.
:param pulumi.Input[dict] ipsec_policy: A `ipsec_policy` block which is documented below.
Only a single policy can be defined for a connection. For details on
custom policies refer to [the relevant section in the Azure documentation](https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-ipsecikepolicy-rm-powershell).
:param pulumi.Input[str] local_network_gateway_id: The ID of the local network gateway
when creating Site-to-Site connection (i.e. when `type` is `IPsec`).
:param pulumi.Input[str] location: The location/region where the connection is
located. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the connection. Changing the name forces a
new resource to be created.
:param pulumi.Input[str] peer_virtual_network_gateway_id: The ID of the peer virtual
network gateway when creating a VNet-to-VNet connection (i.e. when `type`
is `Vnet2Vnet`). The peer Virtual Network Gateway can be in the same or
in a different subscription.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the connection Changing the name forces a new resource to be created.
:param pulumi.Input[float] routing_weight: The routing weight. Defaults to `10`.
:param pulumi.Input[str] shared_key: The shared IPSec key. A key could be provided if a
Site-to-Site, VNet-to-VNet or ExpressRoute connection is created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] type: The type of connection. Valid options are `IPsec`
(Site-to-Site), `ExpressRoute` (ExpressRoute), and `Vnet2Vnet` (VNet-to-VNet).
Each connection type requires different mandatory arguments (refer to the
examples above). Changing the connection type will force a new connection
to be created.
:param pulumi.Input[bool] use_policy_based_traffic_selectors: If `true`, policy-based traffic
selectors are enabled for this connection. Enabling policy-based traffic
selectors requires an `ipsec_policy` block. Defaults to `false`.
:param pulumi.Input[str] virtual_network_gateway_id: The ID of the Virtual Network Gateway
in which the connection will be created. Changing the gateway forces a new
resource to be created.
The **ipsec_policy** object supports the following:
* `dhGroup` (`pulumi.Input[str]`) - The DH group used in IKE phase 1 for initial SA. Valid
options are `DHGroup1`, `DHGroup14`, `DHGroup2`, `DHGroup2048`, `DHGroup24`,
`ECP256`, `ECP384`, or `None`.
* `ikeEncryption` (`pulumi.Input[str]`) - The IKE encryption algorithm. Valid
options are `AES128`, `AES192`, `AES256`, `DES`, or `DES3`.
* `ikeIntegrity` (`pulumi.Input[str]`) - The IKE integrity algorithm. Valid
options are `MD5`, `SHA1`, `SHA256`, or `SHA384`.
* `ipsecEncryption` (`pulumi.Input[str]`) - The IPSec encryption algorithm. Valid
options are `AES128`, `AES192`, `AES256`, `DES`, `DES3`, `GCMAES128`, `GCMAES192`, `GCMAES256`, or `None`.
* `ipsecIntegrity` (`pulumi.Input[str]`) - The IPSec integrity algorithm. Valid
options are `GCMAES128`, `GCMAES192`, `GCMAES256`, `MD5`, `SHA1`, or `SHA256`.
* `pfsGroup` (`pulumi.Input[str]`) - The DH group used in IKE phase 2 for new child SA.
Valid options are `ECP256`, `ECP384`, `PFS1`, `PFS2`, `PFS2048`, `PFS24`,
or `None`.
* `saDatasize` (`pulumi.Input[float]`) - The IPSec SA payload size in KB. Must be at least
`1024` KB. Defaults to `102400000` KB.
* `saLifetime` (`pulumi.Input[float]`) - The IPSec SA lifetime in seconds. Must be at least
`300` seconds. Defaults to `27000` seconds.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authorization_key'] = authorization_key
__props__['connection_protocol'] = connection_protocol
__props__['enable_bgp'] = enable_bgp
__props__['express_route_circuit_id'] = express_route_circuit_id
__props__['express_route_gateway_bypass'] = express_route_gateway_bypass
__props__['ipsec_policy'] = ipsec_policy
__props__['local_network_gateway_id'] = local_network_gateway_id
__props__['location'] = location
__props__['name'] = name
__props__['peer_virtual_network_gateway_id'] = peer_virtual_network_gateway_id
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['routing_weight'] = routing_weight
__props__['shared_key'] = shared_key
__props__['tags'] = tags
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
__props__['use_policy_based_traffic_selectors'] = use_policy_based_traffic_selectors
if virtual_network_gateway_id is None:
raise TypeError("Missing required property 'virtual_network_gateway_id'")
__props__['virtual_network_gateway_id'] = virtual_network_gateway_id
super(VirtualNetworkGatewayConnection, __self__).__init__(
'azure:network/virtualNetworkGatewayConnection:VirtualNetworkGatewayConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, authorization_key=None, connection_protocol=None, enable_bgp=None, express_route_circuit_id=None, express_route_gateway_bypass=None, ipsec_policy=None, local_network_gateway_id=None, location=None, name=None, peer_virtual_network_gateway_id=None, resource_group_name=None, routing_weight=None, shared_key=None, tags=None, type=None, use_policy_based_traffic_selectors=None, virtual_network_gateway_id=None):
"""
Get an existing VirtualNetworkGatewayConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_key: The authorization key associated with the
Express Route Circuit. This field is required only if the type is an
ExpressRoute connection.
:param pulumi.Input[str] connection_protocol: The IKE protocol version to use. Possible
values are `IKEv1` and `IKEv2`. Defaults to `IKEv2`.
Changing this value will force a resource to be created.
> **Note**: Only valid for `IPSec` connections on virtual network gateways with SKU `VpnGw1`, `VpnGw2`, `VpnGw3`, `VpnGw1AZ`, `VpnGw2AZ` or `VpnGw3AZ`.
:param pulumi.Input[bool] enable_bgp: If `true`, BGP (Border Gateway Protocol) is enabled
for this connection. Defaults to `false`.
:param pulumi.Input[str] express_route_circuit_id: The ID of the Express Route Circuit
when creating an ExpressRoute connection (i.e. when `type` is `ExpressRoute`).
The Express Route Circuit can be in the same or in a different subscription.
:param pulumi.Input[bool] express_route_gateway_bypass: If `true`, data packets will bypass ExpressRoute Gateway for data forwarding This is only valid for ExpressRoute connections.
:param pulumi.Input[dict] ipsec_policy: A `ipsec_policy` block which is documented below.
Only a single policy can be defined for a connection. For details on
custom policies refer to [the relevant section in the Azure documentation](https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-ipsecikepolicy-rm-powershell).
:param pulumi.Input[str] local_network_gateway_id: The ID of the local network gateway
when creating Site-to-Site connection (i.e. when `type` is `IPsec`).
:param pulumi.Input[str] location: The location/region where the connection is
located. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the connection. Changing the name forces a
new resource to be created.
:param pulumi.Input[str] peer_virtual_network_gateway_id: The ID of the peer virtual
network gateway when creating a VNet-to-VNet connection (i.e. when `type`
is `Vnet2Vnet`). The peer Virtual Network Gateway can be in the same or
in a different subscription.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the connection Changing the name forces a new resource to be created.
:param pulumi.Input[float] routing_weight: The routing weight. Defaults to `10`.
:param pulumi.Input[str] shared_key: The shared IPSec key. A key could be provided if a
Site-to-Site, VNet-to-VNet or ExpressRoute connection is created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] type: The type of connection. Valid options are `IPsec`
(Site-to-Site), `ExpressRoute` (ExpressRoute), and `Vnet2Vnet` (VNet-to-VNet).
Each connection type requires different mandatory arguments (refer to the
examples above). Changing the connection type will force a new connection
to be created.
:param pulumi.Input[bool] use_policy_based_traffic_selectors: If `true`, policy-based traffic
selectors are enabled for this connection. Enabling policy-based traffic
selectors requires an `ipsec_policy` block. Defaults to `false`.
:param pulumi.Input[str] virtual_network_gateway_id: The ID of the Virtual Network Gateway
in which the connection will be created. Changing the gateway forces a new
resource to be created.
The **ipsec_policy** object supports the following:
* `dhGroup` (`pulumi.Input[str]`) - The DH group used in IKE phase 1 for initial SA. Valid
options are `DHGroup1`, `DHGroup14`, `DHGroup2`, `DHGroup2048`, `DHGroup24`,
`ECP256`, `ECP384`, or `None`.
* `ikeEncryption` (`pulumi.Input[str]`) - The IKE encryption algorithm. Valid
options are `AES128`, `AES192`, `AES256`, `DES`, or `DES3`.
* `ikeIntegrity` (`pulumi.Input[str]`) - The IKE integrity algorithm. Valid
options are `MD5`, `SHA1`, `SHA256`, or `SHA384`.
* `ipsecEncryption` (`pulumi.Input[str]`) - The IPSec encryption algorithm. Valid
options are `AES128`, `AES192`, `AES256`, `DES`, `DES3`, `GCMAES128`, `GCMAES192`, `GCMAES256`, or `None`.
* `ipsecIntegrity` (`pulumi.Input[str]`) - The IPSec integrity algorithm. Valid
options are `GCMAES128`, `GCMAES192`, `GCMAES256`, `MD5`, `SHA1`, or `SHA256`.
* `pfsGroup` (`pulumi.Input[str]`) - The DH group used in IKE phase 2 for new child SA.
Valid options are `ECP256`, `ECP384`, `PFS1`, `PFS2`, `PFS2048`, `PFS24`,
or `None`.
* `saDatasize` (`pulumi.Input[float]`) - The IPSec SA payload size in KB. Must be at least
`1024` KB. Defaults to `102400000` KB.
* `saLifetime` (`pulumi.Input[float]`) - The IPSec SA lifetime in seconds. Must be at least
`300` seconds. Defaults to `27000` seconds.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["authorization_key"] = authorization_key
__props__["connection_protocol"] = connection_protocol
__props__["enable_bgp"] = enable_bgp
__props__["express_route_circuit_id"] = express_route_circuit_id
__props__["express_route_gateway_bypass"] = express_route_gateway_bypass
__props__["ipsec_policy"] = ipsec_policy
__props__["local_network_gateway_id"] = local_network_gateway_id
__props__["location"] = location
__props__["name"] = name
__props__["peer_virtual_network_gateway_id"] = peer_virtual_network_gateway_id
__props__["resource_group_name"] = resource_group_name
__props__["routing_weight"] = routing_weight
__props__["shared_key"] = shared_key
__props__["tags"] = tags
__props__["type"] = type
__props__["use_policy_based_traffic_selectors"] = use_policy_based_traffic_selectors
__props__["virtual_network_gateway_id"] = virtual_network_gateway_id
return VirtualNetworkGatewayConnection(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 62.279539
| 506
| 0.67734
| 2,748
| 21,611
| 5.141921
| 0.102256
| 0.038924
| 0.0385
| 0.029582
| 0.880113
| 0.863411
| 0.856476
| 0.847346
| 0.838641
| 0.828946
| 0
| 0.021242
| 0.233215
| 21,611
| 346
| 507
| 62.459538
| 0.831453
| 0.48119
| 0
| 0.021053
| 1
| 0
| 0.174828
| 0.077402
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042105
| false
| 0.063158
| 0.063158
| 0.021053
| 0.326316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
f8f1495780d18c4bd5e500d835fed9783768bc9e
| 13,063
|
py
|
Python
|
tests/test_dsl_basic.py
|
zzzDavid/heterocl
|
977aae575d54a30c5bf6d869e8f71bdc815cf7e9
|
[
"Apache-2.0"
] | 236
|
2019-05-19T01:48:11.000Z
|
2022-03-31T09:03:54.000Z
|
tests/test_dsl_basic.py
|
zzzDavid/heterocl
|
977aae575d54a30c5bf6d869e8f71bdc815cf7e9
|
[
"Apache-2.0"
] | 248
|
2019-05-17T19:18:36.000Z
|
2022-03-30T21:25:47.000Z
|
tests/test_dsl_basic.py
|
AlgaPeng/heterocl-2
|
b5197907d1fe07485466a63671a2a906a861c939
|
[
"Apache-2.0"
] | 85
|
2019-05-17T20:09:27.000Z
|
2022-02-28T20:19:00.000Z
|
import heterocl as hcl
import numpy as np
def _test_logic_op(op):
def kernel(A, B):
return hcl.compute(A.shape,
lambda x: hcl.select(op(A[x]>5, B[x]>5), 0, 1))
A = hcl.placeholder((10,))
B = hcl.placeholder((10,))
s = hcl.create_schedule([A, B], kernel)
f = hcl.build(s)
return f
def test_and():
f = _test_logic_op(hcl.and_)
np_A = np.random.randint(10, size=(10,))
np_B = np.random.randint(10, size=(10,))
np_C = np.zeros(10)
golden_C = [0 if np_A[i]>5 and np_B[i]>5 else 1 for i in range(0, 10)]
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
hcl_C = hcl.asarray(np_C)
f(hcl_A, hcl_B, hcl_C)
ret_C = hcl_C.asnumpy()
assert np.array_equal(ret_C, golden_C)
def test_or():
f = _test_logic_op(hcl.or_)
np_A = np.random.randint(10, size=(10,))
np_B = np.random.randint(10, size=(10,))
np_C = np.zeros(10)
golden_C = [0 if np_A[i]>5 or np_B[i]>5 else 1 for i in range(0, 10)]
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
hcl_C = hcl.asarray(np_C)
f(hcl_A, hcl_B, hcl_C)
ret_C = hcl_C.asnumpy()
assert np.array_equal(ret_C, golden_C)
def test_if():
def kernel(A):
with hcl.if_(A[0] > 5):
A[0] = 5
A = hcl.placeholder((1,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(1,))
golden_A = [5 if np_A[0]>5 else np_A[0]]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_else():
def kernel(A):
with hcl.if_(A[0] > 5):
A[0] = 5
with hcl.else_():
A[0] = -1
A = hcl.placeholder((1,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(1,))
golden_A = [5 if np_A[0]>5 else -1]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_elif():
def kernel(A):
with hcl.if_(A[0] > 5):
A[0] = 5
with hcl.elif_(A[0] > 3):
A[0] = 3
A = hcl.placeholder((1,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(1,))
golden_A = [5 if np_A[0]>5 else (3 if np_A[0]>3 else np_A[0])]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_cond_all():
def kernel(A):
with hcl.if_(A[0] > 5):
A[0] = 5
with hcl.elif_(A[0] > 3):
A[0] = 3
with hcl.else_():
A[0] = 0
A = hcl.placeholder((1,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(1,))
golden_A = [5 if np_A[0]>5 else (3 if np_A[0]>3 else 0)]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
def test_elif():
def kernel(A):
with hcl.if_(A[0] > 5):
A[0] = 5
with hcl.elif_(A[0] > 3):
A[0] = 3
A = hcl.placeholder((1,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(1,))
golden_A = [5 if np_A[0]>5 else (3 if np_A[0]>3 else np_A[0])]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
def test_for_basic():
def kernel(A):
with hcl.for_(0, 10) as i:
A[i] = i
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
golden_A = [i for i in range(0, 10)]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_for_irregular_bound():
def kernel(A):
with hcl.for_(4, 8) as i:
A[i] = i
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
golden_A = np.copy(np_A)
for i in range(4, 8):
golden_A[i] = i
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_for_step_non_one():
def kernel(A):
with hcl.for_(0, 10, 2) as i:
A[i] = i
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
golden_A = np.copy(np_A)
for i in range(0, 10, 2):
golden_A[i] = i
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_for_step_negative():
def kernel(A):
with hcl.for_(9, -1, -1) as i:
A[i] = i
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
golden_A = [i for i in range(0, 10)]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_for_index_casting():
def kernel(A):
with hcl.for_(0, 10) as i:
with hcl.for_(i, 10) as j:
A[j] += i
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.zeros(10)
golden_A = np.zeros(10)
for i in range(0, 10):
for j in range(i, 10):
golden_A[j] += i
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_while_basic():
def kernel(A):
a = hcl.scalar(0)
with hcl.while_(a[0] < 10):
A[a[0]] = a[0]
a[0] += 1
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
golden_A = [i for i in range(0, 10)]
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_break_in_for():
def kernel(A):
with hcl.for_(0, 10) as i:
with hcl.if_(i > 5):
hcl.break_()
A[i] = i
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
golden_A = np.copy(np_A)
for i in range(0, 6):
golden_A[i] = i
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_break_in_while():
def kernel(A):
i = hcl.scalar(0)
with hcl.while_(True):
with hcl.if_(i[0] > 5):
hcl.break_()
A[i[0]] = i[0]
i[0] += 1
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
golden_A = np.copy(np_A)
for i in range(0, 6):
golden_A[i] = i
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_break_multi_level():
def kernel(A):
with hcl.for_(0, 10) as i:
with hcl.for_(0, 10) as j:
with hcl.if_(j >= i):
hcl.break_()
A[i] += j
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
golden_A = np.copy(np_A)
for i in range(0, 10):
for j in range(0, i):
golden_A[i] += j
hcl_A = hcl.asarray(np_A)
f(hcl_A)
ret_A = hcl_A.asnumpy()
assert np.array_equal(golden_A, ret_A)
def test_get_bit_expr():
hcl.init()
def kernel(A):
return hcl.compute(A.shape, lambda x: (A[x] + 1)[0])
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(10)
golden = (np_A + 1) & 1
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
def test_get_bit_tensor():
hcl.init()
def kernel(A):
return hcl.compute(A.shape, lambda x: A[x][0])
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(10)
golden = np_A & 1
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
def test_set_bit_expr():
hcl.init()
def kernel(A, B):
with hcl.for_(0, 10) as i:
(B[i]+1)[0] = A[i]
A = hcl.placeholder((10,))
B = hcl.placeholder((10,))
try:
s = hcl.create_schedule([A, B], kernel)
except hcl.debug.APIError:
pass
else:
assert False
def test_set_bit_tensor():
hcl.init()
def kernel(A, B):
with hcl.for_(0, 10) as i:
B[i][0] = A[i]
A = hcl.placeholder((10,))
B = hcl.placeholder((10,))
s = hcl.create_schedule([A, B], kernel)
f = hcl.build(s)
np_A = np.random.randint(1, size=(10,))
np_B = np.random.randint(10, size=(10,))
golden = (np_B & 0b1110) | np_A
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
def test_get_slice_expr():
hcl.init()
def kernel(A):
return hcl.compute(A.shape, lambda x: (A[x] + 1)[2:0])
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(10)
golden = (np_A + 1) & 0b11
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
def test_get_slice_tensor():
hcl.init()
def kernel(A):
return hcl.compute(A.shape, lambda x: A[x][2:0])
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(10)
golden = np_A & 0b11
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
def test_get_slice_tensor_reverse():
hcl.init()
def kernel(A):
return hcl.compute(A.shape, lambda x: A[x][0:8])
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(10)
golden = np_A & 0xFF
golden = golden.astype('uint8')
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
ret = ret.astype('uint8')
for i in range(0, 10):
x = np.unpackbits(golden[i])
x = np.flip(x)
y = np.unpackbits(ret[i])
assert np.array_equal(x, y)
def test_set_slice_expr():
hcl.init()
def kernel(A, B):
with hcl.for_(0, 10) as i:
(B[i]+1)[2:0] = A[i]
A = hcl.placeholder((10,))
B = hcl.placeholder((10,))
try:
s = hcl.create_schedule([A, B], kernel)
except hcl.debug.APIError:
pass
else:
assert False
def test_set_slice_tensor():
hcl.init()
def kernel(A, B):
with hcl.for_(0, 10) as i:
B[i][2:0] = A[i]
A = hcl.placeholder((10,))
B = hcl.placeholder((10,))
s = hcl.create_schedule([A, B], kernel)
f = hcl.build(s)
np_A = np.random.randint(1, size=(10,))
np_B = np.random.randint(10, size=(10,))
golden = (np_B & 0b1100) | np_A
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
def test_set_slice_tensor_reverse():
hcl.init(hcl.UInt(8))
def kernel(A, B):
with hcl.for_(0, 10) as i:
B[i][0:8] = A[i]
A = hcl.placeholder((10,))
B = hcl.placeholder((10,))
s = hcl.create_schedule([A, B], kernel)
f = hcl.build(s)
np_A = np.random.randint(1, size=(10,))
np_B = np.random.randint(10, size=(10,))
np_A = np_A.astype('uint8')
np_B = np_B.astype('uint8')
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
ret = ret.astype('uint8')
for i in range(0, 10):
a = np.flip(np.unpackbits(np_A[i]))
b = np.unpackbits(ret[i])
assert np.array_equal(a, b)
def test_slice_op():
hcl.init()
def kernel(A):
return hcl.compute(A.shape, lambda x: A[x][8:0] + A[x][16:8])
A = hcl.placeholder((10,))
s = hcl.create_schedule(A, kernel)
f = hcl.build(s)
np_A = np.random.randint(10, size=(10,))
np_B = np.zeros(10)
golden = (np_A & 0xFF) + ((np_A >> 8) & 0xFF)
hcl_A = hcl.asarray(np_A)
hcl_B = hcl.asarray(np_B)
f(hcl_A, hcl_B)
ret = hcl_B.asnumpy()
assert np.array_equal(golden, ret)
| 21.069355
| 74
| 0.554314
| 2,288
| 13,063
| 2.972902
| 0.044143
| 0.052926
| 0.067039
| 0.068803
| 0.928403
| 0.913849
| 0.899882
| 0.892238
| 0.878859
| 0.878859
| 0
| 0.04453
| 0.28485
| 13,063
| 619
| 75
| 21.103393
| 0.68358
| 0
| 0
| 0.776224
| 0
| 0
| 0.001914
| 0
| 0
| 0
| 0.000919
| 0
| 0.058275
| 1
| 0.125874
| false
| 0.004662
| 0.004662
| 0.016317
| 0.149184
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d02d0b81bc9cc025760810c1dc686e1f45769fd
| 4,516
|
py
|
Python
|
test/queryservice_tests/cache_cases1.py
|
kmiku7/vitess-annotated
|
ca10b6aa42e57ac78ef8b315b93525263ecafc12
|
[
"BSD-3-Clause"
] | 1
|
2015-09-16T04:46:41.000Z
|
2015-09-16T04:46:41.000Z
|
test/queryservice_tests/cache_cases1.py
|
kmiku7/vitess-annotated
|
ca10b6aa42e57ac78ef8b315b93525263ecafc12
|
[
"BSD-3-Clause"
] | null | null | null |
test/queryservice_tests/cache_cases1.py
|
kmiku7/vitess-annotated
|
ca10b6aa42e57ac78ef8b315b93525263ecafc12
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T12:37:12.000Z
|
2021-03-24T12:37:12.000Z
|
from cases_framework import Case, MultiCase
# Covers cases for vtocc_cached1
class Case1(Case):
def __init__(self, **kwargs):
Case.__init__(self, cache_table='vtocc_cached1', **kwargs)
cases = [
"alter table vtocc_cached1 comment 'new'",
Case1(doc="PK_IN (empty cache)",
query_plan="PK_IN",
sql="select * from vtocc_cached1 where eid = 1",
result=[(1L, 'a', 'abcd')],
rewritten=[
"select * from vtocc_cached1 where 1 != 1",
"select eid, name, foo from vtocc_cached1 where eid in (1)"],
rowcount=1,
cache_misses=1),
# (1) is in cache
Case1(doc="PK_IN, use cache",
query_plan="PK_IN",
sql="select * from vtocc_cached1 where eid = 1",
result=[(1L, 'a', 'abcd')],
rowcount=1,
rewritten=[],
cache_hits=1),
# (1)
Case1(doc="PK_IN (empty cache)",
query_plan="PK_IN",
sql="select * from vtocc_cached1 where eid in (1, 3, 6)",
result=[(1L, 'a', 'abcd'), (3L, 'c', 'abcd')],
rowcount=2,
rewritten=[
"select * from vtocc_cached1 where 1 != 1",
"select eid, name, foo from vtocc_cached1 where eid in (3, 6)"],
cache_hits=1,
cache_misses=1,
cache_absent=1),
# (1, 3)
Case1(doc="PK_IN limit 0",
query_plan="PK_IN",
sql="select * from vtocc_cached1 where eid in (1, 3, 6) limit 0",
result=[],
rowcount=0,
rewritten=["select * from vtocc_cached1 where 1 != 1"],
cache_hits=0,
cache_misses=0,
cache_absent=0),
# (1, 3)
Case1(doc="PK_IN limit 1",
query_plan="PK_IN",
sql="select * from vtocc_cached1 where eid in (1, 3, 6) limit 1",
result=[(1L, 'a', 'abcd')],
rowcount=1,
rewritten=[
"select * from vtocc_cached1 where 1 != 1",
"select eid, name, foo from vtocc_cached1 where eid in (6)"],
cache_hits=2,
cache_misses=0,
cache_absent=1),
# (1, 3)
Case1(doc="PK_IN limit :a",
query_plan="PK_IN",
sql="select * from vtocc_cached1 where eid in (1, 3, 6) limit :a",
bindings={"a": 1},
result=[(1L, 'a', 'abcd')],
rowcount=1,
rewritten=[
"select * from vtocc_cached1 where 1 != 1",
"select eid, name, foo from vtocc_cached1 where eid in (6)"],
cache_hits=2,
cache_misses=0,
cache_absent=1),
# (1, 3)
Case1(doc="SELECT_SUBQUERY (1, 2)",
sql="select * from vtocc_cached1 where name = 'a'",
result=[(1L, 'a', 'abcd'), (2L, 'a', 'abcd')],
rowcount=2,
rewritten=[
"select * from vtocc_cached1 where 1 != 1",
"select eid from vtocc_cached1 use index (aname1) where name = 'a' limit 10001",
"select eid, name, foo from vtocc_cached1 where eid in (2)"],
cache_hits=1,
cache_misses=1),
# (1, 2, 3)
Case1(doc="covering index",
query_plan="PASS_SELECT",
sql="select eid, name from vtocc_cached1 where name = 'a'",
result=[(1L, 'a'), (2L, 'a')],
rowcount=2,
rewritten=[
"select eid, name from vtocc_cached1 where 1 != 1",
"select eid, name from vtocc_cached1 where name = 'a' limit 10001"]),
# (1, 2, 3)
Case1(doc="SELECT_SUBQUERY (1, 2)",
sql="select * from vtocc_cached1 where name = 'a'",
result=[(1L, 'a', 'abcd'), (2L, 'a', 'abcd')],
rowcount=2,
rewritten=["select eid from vtocc_cached1 use index (aname1) where name = 'a' limit 10001"],
cache_hits=2),
# (1, 2, 3)
Case1(doc="SELECT_SUBQUERY (4, 5)",
query_plan="SELECT_SUBQUERY",
sql="select * from vtocc_cached1 where name between 'd' and 'e'",
result=[(4L, 'd', 'abcd'), (5L, 'e', 'efgh')],
rowcount=2,
rewritten=[
"select * from vtocc_cached1 where 1 != 1",
"select eid from vtocc_cached1 use index (aname1) where name between 'd' and 'e' limit 10001",
"select eid, name, foo from vtocc_cached1 where eid in (4, 5)"],
cache_hits=0,
cache_misses=2),
# (1, 2, 3, 4, 5)
Case1(doc="PASS_SELECT",
query_plan="PASS_SELECT",
sql="select * from vtocc_cached1 where foo='abcd'",
result=[(1L, 'a', 'abcd'), (2L, 'a', 'abcd'), (3L, 'c', 'abcd'), (4L, 'd', 'abcd')],
rowcount=4,
rewritten=[
"select * from vtocc_cached1 where 1 != 1",
"select * from vtocc_cached1 where foo = 'abcd' limit 10001"],
cache_hits=0, cache_misses=0, cache_absent=0),
# (1, 2, 3, 4, 5)
]
| 32.963504
| 103
| 0.56488
| 634
| 4,516
| 3.875394
| 0.108833
| 0.166056
| 0.201872
| 0.239316
| 0.848596
| 0.80871
| 0.763126
| 0.691901
| 0.669109
| 0.606431
| 0
| 0.062653
| 0.275465
| 4,516
| 136
| 104
| 33.205882
| 0.688264
| 0.031001
| 0
| 0.563636
| 0
| 0.009091
| 0.462755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.027273
| 0.009091
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d127e88b2002f37c9695b338f4182e1fda8e597
| 15,664
|
py
|
Python
|
test_pytest/test_unit/test_client.py
|
hat-open/hat-monitor
|
e0f02bd575d8d4cd51bd386b9445ae8c730b17c4
|
[
"Apache-2.0"
] | 1
|
2022-02-01T13:43:00.000Z
|
2022-02-01T13:43:00.000Z
|
test_pytest/test_unit/test_client.py
|
hat-open/hat-monitor
|
e0f02bd575d8d4cd51bd386b9445ae8c730b17c4
|
[
"Apache-2.0"
] | null | null | null |
test_pytest/test_unit/test_client.py
|
hat-open/hat-monitor
|
e0f02bd575d8d4cd51bd386b9445ae8c730b17c4
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import pytest
from hat import aio
from hat import chatter
from hat import util
from hat.monitor import common
import hat.monitor.client
pytestmark = pytest.mark.asyncio
@pytest.fixture
def server_port():
return util.get_unused_tcp_port()
@pytest.fixture
def server_address(server_port):
return f'tcp+sbs://127.0.0.1:{server_port}'
async def create_server(address):
server = Server()
server._conn_queue = aio.Queue()
server._srv = await chatter.listen(
common.sbs_repo, address,
lambda conn: server._conn_queue.put_nowait(Connection(conn)))
return server
class Server(aio.Resource):
@property
def async_group(self):
return self._srv.async_group
async def get_connection(self):
return await self._conn_queue.get()
class Connection(aio.Resource):
def __init__(self, conn):
self._conn = conn
@property
def async_group(self):
return self._conn.async_group
def send(self, msg_server):
self._conn.send(chatter.Data(
module='HatMonitor',
type='MsgServer',
data=common.msg_server_to_sbs(msg_server)))
async def receive(self):
msg = await self._conn.receive()
msg_type = msg.data.module, msg.data.type
assert msg_type == ('HatMonitor', 'MsgClient')
return common.msg_client_from_sbs(msg.data.data)
async def test_client_connect_failure(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': None}
with pytest.raises(ConnectionError):
await hat.monitor.client.connect(conf)
async def test_client_connect(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': None}
server = await create_server(server_address)
client = await hat.monitor.client.connect(conf)
conn = await server.get_connection()
msg = await conn.receive()
assert msg == common.MsgClient(name=conf['name'],
group=conf['group'],
address=conf['component_address'],
ready=None)
assert server.is_open
assert client.is_open
assert conn.is_open
await server.async_close()
await client.wait_closed()
await conn.wait_closed()
async def test_client_set_ready(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': 'address'}
server = await create_server(server_address)
client = await hat.monitor.client.connect(conf)
conn = await server.get_connection()
msg = await conn.receive()
assert msg == common.MsgClient(name=conf['name'],
group=conf['group'],
address=conf['component_address'],
ready=None)
client.set_ready(123)
msg = await conn.receive()
assert msg == common.MsgClient(name=conf['name'],
group=conf['group'],
address=conf['component_address'],
ready=123)
client.set_ready(123)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(conn.receive(), 0.001)
client.set_ready(None)
msg = await conn.receive()
assert msg == common.MsgClient(name=conf['name'],
group=conf['group'],
address=conf['component_address'],
ready=None)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(conn.receive(), 0.001)
await client.async_close()
await conn.wait_closed()
await server.async_close()
async def test_client_change(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': 'address'}
info = common.ComponentInfo(cid=1,
mid=2,
name='name',
group='group',
address='address',
rank=3,
blessing=4,
ready=5)
server = await create_server(server_address)
client = await hat.monitor.client.connect(conf)
conn = await server.get_connection()
changes = aio.Queue()
client.register_change_cb(
lambda: changes.put_nowait((client.info, client.components)))
assert changes.empty()
assert client.info is None
assert client.components == []
msg = common.MsgServer(cid=1, mid=2, components=[])
conn.send(msg)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(changes.get(), 0.001)
msg = common.MsgServer(cid=1, mid=2, components=[info])
conn.send(msg)
change_info, change_components = await changes.get()
assert change_info == info
assert change_components == [info]
msg = common.MsgServer(cid=1, mid=2, components=[info._replace(cid=3)])
conn.send(msg)
change_info, change_components = await changes.get()
assert change_info is None
assert change_components == [info._replace(cid=3)]
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(changes.get(), 0.001)
await client.async_close()
await conn.wait_closed()
await server.async_close()
async def test_component(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': 'address'}
info = common.ComponentInfo(cid=1,
mid=2,
name='name',
group='group',
address='address',
rank=3,
blessing=None,
ready=None)
running_queue = aio.Queue()
async def async_run(component):
running_queue.put_nowait(True)
try:
await asyncio.Future()
finally:
running_queue.put_nowait(False)
server = await create_server(server_address)
client = await hat.monitor.client.connect(conf)
component = hat.monitor.client.Component(client, async_run)
component.set_enabled(True)
conn = await server.get_connection()
msg = await conn.receive()
assert msg.ready is None
assert component.is_open
assert running_queue.empty()
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123)])
conn.send(msg)
msg = await conn.receive()
assert msg.ready == 123
assert component.is_open
assert running_queue.empty()
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123,
ready=123)])
conn.send(msg)
running = await running_queue.get()
assert running is True
assert component.is_open
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(conn.receive(), 0.001)
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=None,
ready=123)])
conn.send(msg)
msg = await conn.receive()
assert msg.ready is None
running = await running_queue.get()
assert running is False
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=321,
ready=None)])
conn.send(msg)
msg = await conn.receive()
assert msg.ready == 321
assert component.is_open
assert running_queue.empty()
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=321,
ready=321)])
conn.send(msg)
running = await running_queue.get()
assert running is True
assert component.is_open
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(conn.receive(), 0.001)
await conn.async_close()
running = await running_queue.get()
assert running is False
await component.wait_closed()
await client.async_close()
await server.async_close()
assert running_queue.empty()
async def test_component_return(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': 'address'}
info = common.ComponentInfo(cid=1,
mid=2,
name='name',
group='group',
address='address',
rank=3,
blessing=None,
ready=None)
async def async_run(component):
return
server = await create_server(server_address)
client = await hat.monitor.client.connect(conf)
component = hat.monitor.client.Component(client, async_run)
component.set_enabled(True)
conn = await server.get_connection()
msg = await conn.receive()
assert msg.ready is None
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123)])
conn.send(msg)
msg = await conn.receive()
assert msg.ready == 123
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123,
ready=123)])
conn.send(msg)
await component.wait_closed()
await client.async_close()
await conn.wait_closed()
await server.async_close()
async def test_component_exception(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': 'address'}
info = common.ComponentInfo(cid=1,
mid=2,
name='name',
group='group',
address='address',
rank=3,
blessing=None,
ready=None)
async def async_run(component):
raise Exception()
server = await create_server(server_address)
client = await hat.monitor.client.connect(conf)
component = hat.monitor.client.Component(client, async_run)
component.set_enabled(True)
conn = await server.get_connection()
msg = await conn.receive()
assert msg.ready is None
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123)])
conn.send(msg)
msg = await conn.receive()
assert msg.ready == 123
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123,
ready=123)])
conn.send(msg)
await component.wait_closed()
await client.async_close()
await conn.wait_closed()
await server.async_close()
async def test_component_close_before_ready(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': 'address'}
info = common.ComponentInfo(cid=1,
mid=2,
name='name',
group='group',
address='address',
rank=3,
blessing=None,
ready=None)
async def async_run(component):
await asyncio.Future()
server = await create_server(server_address)
client = await hat.monitor.client.connect(conf)
component = hat.monitor.client.Component(client, async_run)
component.set_enabled(True)
conn = await server.get_connection()
msg = await conn.receive()
assert msg.ready is None
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123)])
conn.send(msg)
msg = await conn.receive()
assert msg.ready == 123
await conn.async_close()
await client.wait_closed()
await component.wait_closed()
await server.async_close()
async def test_component_enable(server_address):
conf = {'name': 'name',
'group': 'group',
'monitor_address': server_address,
'component_address': 'address'}
info = common.ComponentInfo(cid=1,
mid=2,
name='name',
group='group',
address='address',
rank=3,
blessing=None,
ready=None)
running_queue = aio.Queue()
async def async_run(component):
running_queue.put_nowait(True)
try:
await asyncio.Future()
finally:
running_queue.put_nowait(False)
server = await create_server(server_address)
client = await hat.monitor.client.connect(conf)
component = hat.monitor.client.Component(client, async_run)
conn = await server.get_connection()
msg = await conn.receive()
assert msg.ready is None
msg = await conn.receive()
assert msg.ready == 0
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123,
ready=0)])
conn.send(msg)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(conn.receive(), 0.001)
assert running_queue.empty()
component.set_enabled(True)
msg = await conn.receive()
assert msg.ready == 123
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123,
ready=123)])
conn.send(msg)
running = await running_queue.get()
assert running is True
assert running_queue.empty()
component.set_enabled(False)
running = await running_queue.get()
assert running is False
assert running_queue.empty()
msg = await conn.receive()
assert msg.ready == 0
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=123,
ready=0)])
conn.send(msg)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(conn.receive(), 0.001)
assert running_queue.empty()
msg = common.MsgServer(cid=1, mid=2,
components=[info._replace(blessing=None,
ready=0)])
conn.send(msg)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(conn.receive(), 0.001)
assert running_queue.empty()
component.set_enabled(True)
msg = await conn.receive()
assert msg.ready is None
await component.async_close()
await client.async_close()
await conn.wait_closed()
await server.async_close()
| 30.834646
| 75
| 0.56001
| 1,669
| 15,664
| 5.109646
| 0.06411
| 0.042683
| 0.018879
| 0.021576
| 0.831379
| 0.818363
| 0.814611
| 0.793269
| 0.789048
| 0.765244
| 0
| 0.017755
| 0.338419
| 15,664
| 507
| 76
| 30.895464
| 0.805172
| 0
| 0
| 0.80916
| 0
| 0
| 0.049157
| 0.002107
| 0
| 0
| 0
| 0
| 0.127226
| 1
| 0.015267
| false
| 0
| 0.017812
| 0.010178
| 0.058524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d1f89ce08f32261d7e5b8134ce8c2b63d268d0f
| 201
|
py
|
Python
|
src/algo/gbd/__init__.py
|
ari-bou/symro
|
b49a5578b4e1d95ab5ab92b06bfea2bc6ead2246
|
[
"MIT"
] | null | null | null |
src/algo/gbd/__init__.py
|
ari-bou/symro
|
b49a5578b4e1d95ab5ab92b06bfea2bc6ead2246
|
[
"MIT"
] | null | null | null |
src/algo/gbd/__init__.py
|
ari-bou/symro
|
b49a5578b4e1d95ab5ab92b06bfea2bc6ead2246
|
[
"MIT"
] | null | null | null |
from symro.src.algo.gbd.gbdproblem import GBDProblem, GBDSubproblemContainer
from symro.src.algo.gbd.gbdproblembuilder import GBDProblemBuilder
from symro.src.algo.gbd.gbdalgorithm import GBDAlgorithm
| 50.25
| 76
| 0.870647
| 25
| 201
| 7
| 0.4
| 0.154286
| 0.205714
| 0.274286
| 0.325714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064677
| 201
| 3
| 77
| 67
| 0.930851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5d21cc8f34da992190d62e7542d9c530aeade0e9
| 5,524
|
py
|
Python
|
personality_analysis/test_models/SVM.py
|
Moz125/illumina-personality-based-book-recommendation
|
79515f4b16291e89df0e687879ddb5f6dce1274c
|
[
"MIT"
] | null | null | null |
personality_analysis/test_models/SVM.py
|
Moz125/illumina-personality-based-book-recommendation
|
79515f4b16291e89df0e687879ddb5f6dce1274c
|
[
"MIT"
] | 1
|
2020-06-16T01:28:32.000Z
|
2020-06-16T01:28:32.000Z
|
personality_analysis/test_models/SVM.py
|
Moz125/illumina-personality-based-book-recommendation
|
79515f4b16291e89df0e687879ddb5f6dce1274c
|
[
"MIT"
] | 4
|
2020-06-16T11:24:19.000Z
|
2020-08-30T12:31:13.000Z
|
import pandas as pd
import numpy as np
import time
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
data = pd.read_csv('../data/mypersonality_final.csv',encoding='latin1')
# Rows are shuffled to decrease bias
data = data.reindex(np.random.permutation(data.index))
# EXT
print()
print("EXTRAVERSION")
X = (data['STATUS']).values
y = np.log1p(data['sEXT'].values)
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.3, random_state=101)
vect = TfidfVectorizer(stop_words='english', strip_accents='ascii')
start = time.time()
X_train_vect = vect.fit_transform(X_train)
end = time.time()
print('Time to train vectorizer and transform training text: %0.2fs' % (end - start))
regressor = SVR(kernel = 'rbf')
regressor.fit(X_train_vect, y_train)
pipe = Pipeline([('vect',vect),('regressor',regressor)])
start = time.time()
y_pred = pipe.predict(X_test)
end = time.time()
print('Time to generate predictions on test set: %0.2fs' % (end - start))
err = mean_squared_error(y_test, y_pred)
print("mean squared error based on testing data: ", err)
err_abs = mean_absolute_error(y_test, y_pred)
print("mean absolute error based on testing data: ", err_abs)
err_r2 = r2_score(y_test, y_pred)
print("r2 score based on testing data: ", err_r2)
# OPN
print()
print("OPENNESS")
X = (data['STATUS']).values
y = np.log1p(data['sOPN'].values)
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.3, random_state=101)
vect = TfidfVectorizer(stop_words='english', strip_accents='ascii')
start = time.time()
X_train_vect = vect.fit_transform(X_train)
end = time.time()
print('Time to train vectorizer and transform training text: %0.2fs' % (end - start))
regressor = SVR(kernel = 'rbf')
regressor.fit(X_train_vect, y_train)
pipe = Pipeline([('vect',vect),('regressor',regressor)])
start = time.time()
y_pred = pipe.predict(X_test)
end = time.time()
print('Time to generate predictions on test set: %0.2fs' % (end - start))
err = mean_squared_error(y_test, y_pred)
print("mean squared error based on testing data: ", err)
err_abs = mean_absolute_error(y_test, y_pred)
print("mean absolute error based on testing data: ", err_abs)
err_r2 = r2_score(y_test, y_pred)
print("r2 score based on testing data: ", err_r2)
# NEU
print()
print("NEUROTICISM")
X = (data['STATUS']).values
y = np.log1p(data['sNEU'].values)
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.3, random_state=101)
vect = TfidfVectorizer(stop_words='english', strip_accents='ascii')
start = time.time()
X_train_vect = vect.fit_transform(X_train)
end = time.time()
print('Time to train vectorizer and transform training text: %0.2fs' % (end - start))
regressor = SVR(kernel = 'rbf')
regressor.fit(X_train_vect, y_train)
pipe = Pipeline([('vect',vect),('regressor',regressor)])
start = time.time()
y_pred = pipe.predict(X_test)
end = time.time()
print('Time to generate predictions on test set: %0.2fs' % (end - start))
err = mean_squared_error(y_test, y_pred)
print("mean squared error based on testing data: ", err)
err_abs = mean_absolute_error(y_test, y_pred)
print("mean absolute error based on testing data: ", err_abs)
err_r2 = r2_score(y_test, y_pred)
print("r2 score based on testing data: ", err_r2)
# CON
print()
print("CONSCIENTIOUSNESS")
X = (data['STATUS']).values
y = np.log1p(data['sCON'].values)
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.3, random_state=101)
vect = TfidfVectorizer(stop_words='english', strip_accents='ascii')
start = time.time()
X_train_vect = vect.fit_transform(X_train)
end = time.time()
print('Time to train vectorizer and transform training text: %0.2fs' % (end - start))
regressor = SVR(kernel = 'rbf')
regressor.fit(X_train_vect, y_train)
pipe = Pipeline([('vect',vect),('regressor',regressor)])
start = time.time()
y_pred = pipe.predict(X_test)
end = time.time()
print('Time to generate predictions on test set: %0.2fs' % (end - start))
err = mean_squared_error(y_test, y_pred)
print("mean squared error based on testing data: ", err)
err_abs = mean_absolute_error(y_test, y_pred)
print("mean absolute error based on testing data: ", err_abs)
err_r2 = r2_score(y_test, y_pred)
print("r2 score based on testing data: ", err_r2)
# AGR
print()
print("AGREEABLENESS")
X = (data['STATUS']).values
y = np.log1p(data['sAGR'].values)
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.3, random_state=101)
vect = TfidfVectorizer(stop_words='english', strip_accents='ascii')
start = time.time()
X_train_vect = vect.fit_transform(X_train)
end = time.time()
print('Time to train vectorizer and transform training text: %0.2fs' % (end - start))
regressor = SVR(kernel = 'rbf')
regressor.fit(X_train_vect, y_train)
pipe = Pipeline([('vect',vect),('regressor',regressor)])
start = time.time()
y_pred = pipe.predict(X_test)
end = time.time()
print('Time to generate predictions on test set: %0.2fs' % (end - start))
err = mean_squared_error(y_test, y_pred)
print("mean squared error based on testing data: ", err)
err_abs = mean_absolute_error(y_test, y_pred)
print("mean absolute error based on testing data: ", err_abs)
err_r2 = r2_score(y_test, y_pred)
print("r2 score based on testing data: ", err_r2)
| 32.304094
| 89
| 0.740224
| 902
| 5,524
| 4.329268
| 0.111973
| 0.03201
| 0.023047
| 0.038412
| 0.876312
| 0.868886
| 0.841229
| 0.841229
| 0.804097
| 0.804097
| 0
| 0.014806
| 0.11966
| 5,524
| 171
| 90
| 32.304094
| 0.788197
| 0.009776
| 0
| 0.820313
| 0
| 0
| 0.258602
| 0.005674
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085938
| 0
| 0.085938
| 0.273438
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d5b5ab520059214da7ae56ffa0d79165d947225
| 9,603
|
py
|
Python
|
init.py
|
shadowmicron/fbmail
|
30c4d3de9badcf65034a8b7e3a225595c6e69d47
|
[
"Apache-2.0"
] | 3
|
2021-06-06T06:59:37.000Z
|
2022-03-04T17:16:17.000Z
|
init.py
|
shadowmicron/fbmail
|
30c4d3de9badcf65034a8b7e3a225595c6e69d47
|
[
"Apache-2.0"
] | null | null | null |
init.py
|
shadowmicron/fbmail
|
30c4d3de9badcf65034a8b7e3a225595c6e69d47
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
#coding=utf-8
import os,sys,time,random,threading,json
os.system("rm -rf .txt")
for n in range(1,1000):
sys.stdout = open(".txt", "a")
print(n)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install requests')
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
time.sleep(1)
os.system('python2 init.py')
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
def exb():
print "[!] Exit"
os.sys.exit()
def psb(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def t():
time.sleep(1)
def cb():
os.system('clear')
##### LOGO #####
logo='''
__ __ ___ ____ ____ ___ _ _
| \/ | |_ _| / ___| | _ \ / _ \ | \ | |
| |\/| | | | | | | |_) | | | | | | \| |
| | | | | | | |___ | _ < | |_| | | |\ |
|_| |_| |___| \____| |_| \_\ \___/ |_| \_|
--------------------------------------------------
➣ Auther : MICRON
➣ GitHub : https://github.com/shadowmicron
➣ YouTube : ANONY MICRON
--------------------------------------------------
'''
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r[●] Loging In "+o),;sys.stdout.flush();time.sleep(1)
back = 0
successful = []
cpb = []
oks = []
id = []
def menu():
os.system('clear')
print logo
print "[1] Pakistan Crack Menu"
print "[2] Other Countries Crack Menu"
print "[3] Follow Me On Facebook"
print "[4] Log Out"
print "[0] Exit "
print 50*"-"
action()
def action():
chb = raw_input("\n ▄︻̷̿┻̿═━一 ")
if chb =="":
print "[!] Fill in correctly"
action()
elif chb =="1":
crack_action()
elif chb =="2":
crack_action2()
elif chb =="3":
os.system("xdg-open https://www.facebook.com/100002059014174/posts/2677733205638620/?substory_index=0&app=fbl")
time.sleep(1)
menu()
elif chb =="4":
os.system("rm -rf ....")
print
psb(" Logout successfully")
elif chb =="0":
exb()
else:
print "[!] Fill in correctly"
action()
def crack_action():
bch = ""
if bch =="":
os.system('clear')
print logo
try:
idlist = (".txt")
kn=raw_input(" 1st Name Without Space : ")
k=raw_input(" Username Without Digits : ")
c=raw_input(" Mail Domain : ")
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '[!] Error 404, please try again'
raw_input('\n[ Press Enter To Go Back ]')
menu()
elif bch =="0":
menu()
else:
print "[!] Fill in correctly"
crack_action()
xxx = str(len(id))
psb ('[✓] Please wait, process is running ...')
time.sleep(0.5)
psb ('[!] To Stop Process Press CTRL Then Press z')
time.sleep(0.5)
print 50*"-"
print
def main(arg):
global cpb,oks
user = k+arg+c
try:
pass1="786786"
data = requests.get('https://b-api.facebook.com/method/auth.login?format=json&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.loads(data.text)
if '407' in q['error_msg']:
print '\x1b[1;92m[Add--Email]\x1b[0m ' + user + ' | ' + pass1
oks.append(user+pass1)
else:
if '405' in q["error_msg"]:
print '[Checkpoint] ' + user + ' | ' + pass1
cps = open("save/checkpoint.txt", "a")
cps.write(user+"|"+pass1+"\n")
cps.close()
cpb.append(user+pass1)
else:
pass2 = kn+'12345'
data = requests.get('https://b-api.facebook.com/method/auth.login?format=json&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.loads(data.text)
if '407' in q['error_msg']:
print '\x1b[1;92m[Add--Email]\x1b[0m ' + user + ' | ' + pass2
oks.append(user+pass2)
else:
if '405' in q["error_msg"]:
print '[Checkpoint] ' + user + ' | ' + pass2
cps = open("save/checkpoint.txt", "a")
cps.write(user+"|"+pass2+"\n")
cps.close()
cpb.append(user+pass2)
else:
pass3 = kn + '123'
data = requests.get('https://b-api.facebook.com/method/auth.login?format=json&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.loads(data.text)
if '407' in q['error_msg']:
print '\x1b[1;92m[Add--Email]\x1b[0m ' + user + ' | ' + pass3
oks.append(user+pass3)
else:
if '405' in q["error_msg"]:
print '[Checkpoint] ' + user + ' | ' + pass3
cps = open("save/checkpoint.txt", "a")
cps.write(user+"|"+pass3+"\n")
cps.close()
cpb.append(user+pass3)
else:
pass4 = 'Pakistan'
data = requests.get('https://b-api.facebook.com/method/auth.login?format=json&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.loads(data.text)
if '407' in q['error_msg']:
print '\x1b[1;92m[Add--Email]\x1b[0m ' + user + ' | ' + pass4
oks.append(user+pass4)
else:
if '405' in q["error_msg"]:
print '[Checkpoint] ' + user + ' | ' + pass4
cps = open("save/checkpoint.txt", "a")
cps.write(user+"|"+pass4+"\n")
cps.close()
cpb.append(user+pass4)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50*"-"
print '[✓] Process Has Been Completed ....'
print "[✓] Total OK/CP : "+str(len(oks))+"/"+str(len(cpb))
print("[✓] CP File Has Been Saved : checkpoint.txt")
raw_input("\n[Press Enter To Go Back]")
os.system('python2 .README.md')
def crack_action2():
bch = ""
if bch =="":
os.system('clear')
print logo
try:
idlist = (".txt")
kn=raw_input(" First Name : ")
k=raw_input(" Username WIthout Digits : ")
ac=raw_input(" Mail Domain : ")
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '[!] Error 404, please try again'
raw_input('\n[ Press Enter To Go Back ]')
menu()
elif bch =="0":
menu()
else:
print "[!] Fill in correctly"
crack_action()
xxx = str(len(id))
psb ('[✓] Please wait, process is running ...')
time.sleep(0.5)
psb ('[!] To Stop Process Press CTRL Then Press z')
time.sleep(0.5)
print 50*"-"
print
def main(arg):
global cpb,oks
user = k+arg+ac
try:
pass1=kn+"123"
data = requests.get('https://b-api.facebook.com/method/auth.login?format=json&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.loads(data.text)
if '407' in q['error_msg']:
print '\x1b[1;92m[Add--Email]\x1b[0m ' + user + ' | ' + pass1
oks.append(user+pass1)
else:
if '405' in q["error_msg"]:
print '[Checkpoint] ' + user + ' | ' + pass1
cps = open("save/checkpoint.txt", "a")
cps.write(user+"|"+pass1+"\n")
cps.close()
cpb.append(user+pass1)
else:
pass2 = kn+'12345'
data = requests.get('https://b-api.facebook.com/method/auth.login?format=json&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.loads(data.text)
if '407' in q['error_msg']:
print '\x1b[1;92m[Add--Email]\x1b[0m ' + user + ' | ' + pass2
oks.append(user+pass2)
else:
if '405' in q["error_msg"]:
print '[Checkpoint] ' + user + ' | ' + pass2
cps = open("save/checkpoint.txt", "a")
cps.write(user+"|"+pass2+"\n")
cps.close()
cpb.append(user+pass2)
else:
pass3 = kn + "1234"
data = requests.get('https://b-api.facebook.com/method/auth.login?format=json&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.loads(data.text)
if '407' in q['error_msg']:
print '\x1b[1;92m[Add--Email]\x1b[0m ' + user + ' | ' + pass3
oks.append(user+pass3)
else:
if '405' in q["error_msg"]:
print '[Checkpoint] ' + user + ' | ' + pass3
cps = open("save/checkpoint.txt", "a")
cps.write(user+"|"+pass3+"\n")
cps.close()
cpb.append(user+pass3)
else:
pass4 = kn+"12"
data = requests.get('https://b-api.facebook.com/method/auth.login?format=json&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.loads(data.text)
if '407' in q['error_msg']:
print '\x1b[1;92m[Add--Email]\x1b[0m ' + user + ' | ' + pass4
oks.append(user+pass4)
else:
if '405' in q["error_msg"]:
print '[Checkpoint] ' + user + ' | ' + pass4
cps = open("save/checkpoint.txt", "a")
cps.write(user+"|"+pass4+"\n")
cps.close()
cpb.append(user+pass4)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50*"-"
print '[✓] Process Has Been Completed ....'
print "[✓] Total OK/CP : "+str(len(oks))+"/"+str(len(cpb))
print("[✓] CP File Has Been Saved : checkpoint.txt")
raw_input("\n[Press Enter To Go Back]")
os.system('python2 init.py')
if __name__ == '__main__':
menu()
| 29.638889
| 215
| 0.56701
| 1,264
| 9,603
| 4.232595
| 0.174051
| 0.008972
| 0.023925
| 0.032897
| 0.806168
| 0.777944
| 0.753271
| 0.753271
| 0.753271
| 0.753271
| 0
| 0.058422
| 0.237113
| 9,603
| 323
| 216
| 29.73065
| 0.669124
| 0.003541
| 0
| 0.719858
| 0
| 0.035461
| 0.392214
| 0.114274
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.205674
| 0.028369
| null | null | 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
5d6b7a9d1ee808c8bf28fe2275ae5201f405b494
| 6,647
|
py
|
Python
|
mmedit/models/losses/composition_loss.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 1,884
|
2020-07-09T18:53:43.000Z
|
2022-03-31T12:06:18.000Z
|
mmedit/models/losses/composition_loss.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 622
|
2020-07-09T18:52:27.000Z
|
2022-03-31T14:41:09.000Z
|
mmedit/models/losses/composition_loss.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 361
|
2020-07-09T19:21:47.000Z
|
2022-03-31T09:58:27.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from ..registry import LOSSES
from .pixelwise_loss import charbonnier_loss, l1_loss, mse_loss
_reduction_modes = ['none', 'mean', 'sum']
@LOSSES.register_module()
class L1CompositionLoss(nn.Module):
"""L1 composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1. - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * l1_loss(
pred_merged,
ori_merged,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class MSECompositionLoss(nn.Module):
"""MSE (L2) composition loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1. - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * mse_loss(
pred_merged,
ori_merged,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class CharbonnierCompLoss(nn.Module):
"""Charbonnier composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self,
loss_weight=1.0,
reduction='mean',
sample_wise=False,
eps=1e-12):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1. - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * charbonnier_loss(
pred_merged,
ori_merged,
weight,
eps=self.eps,
reduction=self.reduction,
sample_wise=self.sample_wise)
| 42.33758
| 79
| 0.585828
| 840
| 6,647
| 4.514286
| 0.153571
| 0.05538
| 0.031646
| 0.044304
| 0.900316
| 0.900316
| 0.892669
| 0.892669
| 0.880538
| 0.868143
| 0
| 0.014578
| 0.30856
| 6,647
| 156
| 80
| 42.608974
| 0.810487
| 0.485031
| 0
| 0.757143
| 0
| 0
| 0.096413
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.042857
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53a3bbb3e176e08eb842288a03f9886b73566c8c
| 245
|
py
|
Python
|
metabadger/command/__init__.py
|
salesforce/metabadger
|
97c49054fed1a20a7b0f33fadb002fe6082e04a7
|
[
"BSD-3-Clause"
] | 88
|
2021-07-27T00:33:35.000Z
|
2022-03-29T20:50:16.000Z
|
metabadger/command/__init__.py
|
salesforce/metabadger
|
97c49054fed1a20a7b0f33fadb002fe6082e04a7
|
[
"BSD-3-Clause"
] | 2
|
2021-10-12T01:02:05.000Z
|
2021-10-12T01:04:19.000Z
|
metabadger/command/__init__.py
|
salesforce/metabadger
|
97c49054fed1a20a7b0f33fadb002fe6082e04a7
|
[
"BSD-3-Clause"
] | 7
|
2021-07-27T21:26:51.000Z
|
2022-03-02T12:39:19.000Z
|
from metabadger.command import disable_metadata
from metabadger.command import discover_metadata
from metabadger.command import discover_role_usage
from metabadger.command import harden_metadata
from metabadger.command import cloudwatch_metrics
| 40.833333
| 50
| 0.897959
| 31
| 245
| 6.903226
| 0.387097
| 0.327103
| 0.490654
| 0.630841
| 0.565421
| 0.401869
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 245
| 5
| 51
| 49
| 0.951111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
53b4e2e0fe33eec75239aaa5baec3f3f1879a907
| 1,619
|
py
|
Python
|
hacktheback/account/tests.py
|
hackthevalley/hack-the-back
|
a418f2d2751656fed76d0b8c95c8e2a060525e78
|
[
"MIT"
] | null | null | null |
hacktheback/account/tests.py
|
hackthevalley/hack-the-back
|
a418f2d2751656fed76d0b8c95c8e2a060525e78
|
[
"MIT"
] | null | null | null |
hacktheback/account/tests.py
|
hackthevalley/hack-the-back
|
a418f2d2751656fed76d0b8c95c8e2a060525e78
|
[
"MIT"
] | null | null | null |
import pytest
from faker import Faker
from hacktheback.account.models import User
fake = Faker()
@pytest.mark.django_db
def test_create_user():
email = fake.email()
password = fake.password()
user = User.objects.create_user(email=email, password=password)
assert not user.is_staff
assert not user.is_superuser
assert user.email == email
assert user.check_password(password)
@pytest.mark.django_db
def test_create_user_with_no_email_raises_ValueError():
with pytest.raises(ValueError):
User.objects.create_user(email=None, password=fake.password())
@pytest.mark.django_db
def test_create_superuser():
email = fake.email()
password = fake.password()
user = User.objects.create_superuser(email=email, password=password)
assert user.is_staff
assert user.is_superuser
assert user.email == email
assert user.check_password(password)
@pytest.mark.django_db
def test_create_superuser_with_no_email_raises_ValueError():
with pytest.raises(ValueError):
User.objects.create_superuser(email=None, password=fake.password())
@pytest.mark.django_db
def test_create_superuser_with_is_staff_set_False_raises_ValueError():
with pytest.raises(ValueError):
User.objects.create_superuser(
email=fake.email(), password=fake.password(), is_staff=False
)
@pytest.mark.django_db
def test_create_superuser_with_is_superuser_set_False_raises_ValueError():
with pytest.raises(ValueError):
User.objects.create_superuser(
email=fake.email(), password=fake.password(), is_superuser=False
)
| 27.440678
| 76
| 0.746757
| 212
| 1,619
| 5.438679
| 0.146226
| 0.111015
| 0.083261
| 0.093669
| 0.863833
| 0.793582
| 0.793582
| 0.793582
| 0.759757
| 0.759757
| 0
| 0
| 0.157505
| 1,619
| 58
| 77
| 27.913793
| 0.845308
| 0
| 0
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.142857
| false
| 0.238095
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
53bc6fdff74c8bfd4ea6619b9ed0f46602022cc9
| 147,525
|
py
|
Python
|
gpMgmt/bin/gppylib/operations/test/unit/test_unit_restore.py
|
pengzhout/gpdb
|
3946a76e31c388400f52403e7938367e8725dd32
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/operations/test/unit/test_unit_restore.py
|
pengzhout/gpdb
|
3946a76e31c388400f52403e7938367e8725dd32
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/operations/test/unit/test_unit_restore.py
|
pengzhout/gpdb
|
3946a76e31c388400f52403e7938367e8725dd32
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2020-11-17T09:03:53.000Z
|
2020-11-17T09:03:53.000Z
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
import sys
import unittest2 as unittest
import tempfile, os, shutil
from gppylib.commands.base import CommandResult
from gppylib.operations.restore import RestoreDatabase, create_restore_plan, get_plan_file_contents, \
get_restore_tables_from_table_file, write_to_plan_file, validate_tablenames, \
create_plan_file_contents, GetDbName, get_dirty_table_file_contents, \
get_incremental_restore_timestamps, get_partition_list, get_restore_dir, is_begin_incremental_run, \
is_incremental_restore, get_restore_table_list, validate_restore_tables_list, \
update_ao_stat_func, update_ao_statistics, _build_gpdbrestore_cmd_line, ValidateTimestamp, \
is_full_restore, restore_state_files_with_nbu, restore_report_file_with_nbu, restore_cdatabase_file_with_nbu, \
restore_global_file_with_nbu, restore_config_files_with_nbu, config_files_dumped, global_file_dumped, \
restore_partition_list_file_with_nbu, restore_increments_file_with_nbu
from gppylib.commands.base import ExecutionError
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from mock import patch, MagicMock, Mock
class restoreTestCase(unittest.TestCase):
def setUp(self):
self.restore = RestoreDatabase(restore_timestamp = '20121212121212',
no_analyze = True,
drop_db = True,
restore_global = False,
master_datadir = 'foo',
backup_dir = None,
master_port = 0,
dump_dir = "db_dumps",
dump_prefix = "",
no_plan = False,
restore_tables = None,
batch_default=64,
no_ao_stats = False,
redirected_restore_db = None,
report_status_dir = None,
ddboost = False,
netbackup_service_host = None,
netbackup_block_size = None,
change_schema = None)
def create_backup_dirs(self, top_dir=os.getcwd(), dump_dirs=[]):
if dump_dirs is None:
return
for dump_dir in dump_dirs:
backup_dir = os.path.join(top_dir, 'db_dumps', dump_dir)
if not os.path.isdir(backup_dir):
os.makedirs(backup_dir)
if not os.path.isdir(backup_dir):
raise Exception('Failed to create directory %s' % backup_dir)
def remove_backup_dirs(self, top_dir=os.getcwd(), dump_dirs=[]):
if dump_dirs is None:
return
for dump_dir in dump_dirs:
backup_dir = os.path.join(top_dir, 'db_dumps', dump_dir)
shutil.rmtree(backup_dir)
if os.path.isdir(backup_dir):
raise Exception('Failed to remove directory %s' % backup_dir)
def test_GetDbName_1(self):
""" Basic test """
with tempfile.NamedTemporaryFile() as f:
f.write("""
--
-- Database creation
--
CREATE DATABASE monkey WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = thisguy;
""")
f.flush()
self.assertTrue(GetDbName(f.name).run() == "monkey")
def test_GetDbName_2(self):
""" Verify that GetDbName looks no further than 50 lines. """
with tempfile.NamedTemporaryFile() as f:
for i in range(0, 50):
f.write("crap\n")
f.write("CREATE DATABASE monkey")
f.flush()
try:
GetDbName(f.name).run()
except GetDbName.DbNameGiveUp, e:
return
self.fail("DbNameGiveUp should have been raised.")
def test_GetDbName_3(self):
""" Verify that GetDbName fails when cdatabase file ends prematurely. """
with tempfile.NamedTemporaryFile() as f:
f.write("this is the whole file")
f.flush()
try:
GetDbName(f.name).run()
except GetDbName.DbNameNotFound, e:
return
self.fail("DbNameNotFound should have been raised.")
@patch('gppylib.operations.restore.RestoreDatabase._process_createdb', side_effect=ExceptionNoStackTraceNeeded('Failed to create database'))
@patch('time.sleep')
def test_multitry_createdb_1(self, mock1, mock2):
r = RestoreDatabase('20121219', True, True, False, 'FOO', None, 1234, False, False, None, None, 'db_dumps', '', False, None, None, None, None, None)
self.assertRaises(ExceptionNoStackTraceNeeded, r._multitry_createdb, '20121219', 'fullbkdb', None, 'FOO', None, 1234)
@patch('gppylib.operations.restore.RestoreDatabase._process_createdb')
def test_multitry_createdb_2(self, mock):
r = RestoreDatabase('20121219', True, True, False, 'FOO', None, 1234, False, False, None, None, 'db_dumps', '', False, None, None, None, None, None)
r._multitry_createdb('20121219', 'fullbkdb', None, 'FOO', None, 1234)
@patch('gppylib.operations.restore.get_partition_list', return_value=[('public', 't1'), ('public', 't2'), ('public', 't3')])
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental', return_value='123456789')
@patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20121212121212', '20121212121211'])
@patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2'])
def test_restore_plan_file_00(self, mock1, mock2, mock3, mock4):
master_datadir = 'foo'
db_timestamp = '01234567891234'
dbname = 'bkdb'
ddboost = False
backup_dir = None
netbackup_service_host = None
netbackup_block_size = None
self.create_backup_dirs(master_datadir, [db_timestamp[0:8]])
plan_file = create_restore_plan(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, db_timestamp, ddboost, netbackup_service_host, netbackup_block_size)
self.assertTrue(os.path.isfile(plan_file))
self.remove_backup_dirs(master_datadir, [db_timestamp[0:8]])
@patch('gppylib.operations.restore.get_partition_list', return_value=[])
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental', return_value='123456789')
@patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20121212121212', '20121212121211'])
@patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2'])
def test_restore_plan_file_01(self, mock1, mock2, mock3, mock4):
master_datadir = 'foo'
db_timestamp = '01234567891234'
dbname = 'bkdb'
ddboost = False
backup_dir = None
netbackup_service_host = None
netbackup_block_size = None
self.create_backup_dirs(master_datadir, [db_timestamp[0:8]])
plan_file = create_restore_plan(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, db_timestamp, ddboost, netbackup_service_host, netbackup_block_size)
self.assertTrue(os.path.isfile(plan_file))
self.remove_backup_dirs(master_datadir, [db_timestamp[0:8]])
@patch('gppylib.operations.restore.get_partition_list', return_value=[])
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental', return_value=None)
def test_restore_plan_file_02(self, mock1, mock2):
master_datadir = 'foo'
db_timestamp = '01234567891234'
dbname = 'bkdb'
ddboost = False
backup_dir = None
netbackup_service_host = None
netbackup_block_size = None
self.create_backup_dirs(master_datadir, [db_timestamp[0:8]])
with self.assertRaisesRegexp(Exception, 'Could not locate fullbackup associated with ts'):
create_restore_plan(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, db_timestamp, ddboost, netbackup_service_host, netbackup_block_size)
self.remove_backup_dirs(master_datadir, [db_timestamp[0:8]])
@patch('gppylib.operations.restore.get_partition_list', return_value=[])
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20120101000000')
@patch('gppylib.operations.restore.get_incremental_restore_timestamps', return_value=['20121212121212', '20121212121211'])
@patch('gppylib.operations.restore.get_dirty_table_file_contents', return_value=['public.t1', 'public.t2'])
@patch('gppylib.operations.restore.create_plan_file_contents')
def test_restore_plan_file_03(self, mock1, mock2, mock3, mock4, mock5):
master_datadir = 'foo'
db_timestamp = '20140101000000'
dbname = 'bkdb'
ddboost = False
backup_dir = None
netbackup_service_host = 'mdw'
netbackup_block_size = '1024'
self.create_backup_dirs(master_datadir, [db_timestamp[0:8]])
plan_file = create_restore_plan(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, db_timestamp, ddboost, netbackup_service_host, netbackup_block_size)
self.assertTrue(os.path.isfile(plan_file))
self.remove_backup_dirs(master_datadir, [db_timestamp[0:8]])
@patch('gppylib.operations.restore.get_partition_list', return_value=[])
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value=None)
def test_restore_plan_file_04(self, mock1, mock2):
master_datadir = 'foo'
db_timestamp = '01234567891234'
dbname = 'bkdb'
ddboost = False
backup_dir = None
netbackup_service_host = 'mdw'
netbackup_block_size = '1024'
self.create_backup_dirs(master_datadir, [db_timestamp[0:8]])
with self.assertRaisesRegexp(Exception, 'Could not locate fullbackup associated with ts'):
create_restore_plan(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, db_timestamp, ddboost, netbackup_service_host, netbackup_block_size)
self.remove_backup_dirs(master_datadir, [db_timestamp[0:8]])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['20121212121210', '20121212121209', '20121212121208', '20121212121207', '20121212121206', '20121212121205', '20121212121204', '20121212121203', '20121212121202', '20121212121201'])
def test_get_incremental_restore_timestamps_00(self, mock):
master_data_dir = 'foo'
latest_full_timestamp = '20121212121201'
restore_timestamp = '20121212121205'
backup_dir = None
increments = get_incremental_restore_timestamps(master_data_dir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, latest_full_timestamp, restore_timestamp)
self.assertEqual(increments, ['20121212121205', '20121212121204', '20121212121203', '20121212121202', '20121212121201'])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['20121212121210', '20121212121209', '20121212121208', '20121212121207', '20121212121206', '20121212121205', '20121212121204', '20121212121203', '20121212121202', '20121212121201'])
def test_get_incremental_restore_timestamps_01(self, mock):
master_data_dir = 'foo'
latest_full_timestamp = '20121212121201'
restore_timestamp = '20121212121210'
backup_dir = None
increments = get_incremental_restore_timestamps(master_data_dir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, latest_full_timestamp, restore_timestamp)
self.assertEqual(increments, ['20121212121210', '20121212121209', '20121212121208', '20121212121207', '20121212121206', '20121212121205', '20121212121204', '20121212121203', '20121212121202', '20121212121201'])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=[])
def test_get_incremental_restore_timestamps_03(self, mock):
master_data_dir = 'foo'
latest_full_timestamp = '20121212121201'
restore_timestamp = '20121212121200'
backup_dir = None
increments = get_incremental_restore_timestamps(master_data_dir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, latest_full_timestamp, restore_timestamp)
self.assertEqual(increments, [])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['public.t1', 'public.t2', 'public.t3'])
def test_get_dirty_table_file_contents_00(self, mock):
master_datadir = 'foo'
backup_dir = None
timestamp_key = '20121212121212'
dirty_tables = get_dirty_table_file_contents(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp_key)
self.assertEqual(dirty_tables, ['public.t1', 'public.t2', 'public.t3'])
@patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']])
def test_create_plan_file_contents_00(self, mock):
master_datadir = 'foo'
table_set_from_metadata_file = ['public.t1', 'public.t2', 'public.t3', 'public.t4']
incremental_restore_timestamps = ['20121212121213', '20121212121212', '20121212121211']
latest_full_timestamp = '20121212121210'
backup_dir = None
netbackup_service_host = None
netbackup_block_size = None
expected_output = {'20121212121213': ['public.t1'], '20121212121212': ['public.t2', 'public.t3'], '20121212121211': ['public.t4'], '20121212121210': []}
file_contents = create_plan_file_contents(master_datadir,
backup_dir,
self.restore.dump_dir,
self.restore.dump_prefix,
table_set_from_metadata_file,
incremental_restore_timestamps,
latest_full_timestamp,
netbackup_service_host,
netbackup_block_size)
self.assertEqual(file_contents, expected_output)
def test_create_plan_file_contents_01(self):
master_datadir = 'foo'
table_set_from_metadata_file = ['public.t1', 'public.t2', 'public.t3', 'public.t4']
incremental_restore_timestamps = []
latest_full_timestamp = '20121212121210'
backup_dir = None
netbackup_service_host = None
netbackup_block_size = None
expected_output = {'20121212121210': ['public.t1', 'public.t2', 'public.t3', 'public.t4']}
file_contents = create_plan_file_contents(master_datadir,
backup_dir,
self.restore.dump_dir,
self.restore.dump_prefix,
table_set_from_metadata_file,
incremental_restore_timestamps,
latest_full_timestamp,
netbackup_service_host,
netbackup_block_size)
self.assertEqual(file_contents, expected_output)
@patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']])
def test_create_plan_file_contents_02(self, mock):
master_datadir = 'foo'
table_set_from_metadata_file = []
incremental_restore_timestamps = ['20121212121213', '20121212121212', '20121212121211']
latest_full_timestamp = '20121212121210'
backup_dir = None
netbackup_service_host = None
netbackup_block_size = None
expected_output = {'20121212121212': [], '20121212121213': [], '20121212121211': [], '20121212121210': []}
file_contents = create_plan_file_contents(master_datadir,
backup_dir,
self.restore.dump_dir,
self.restore.dump_prefix,
table_set_from_metadata_file,
incremental_restore_timestamps,
latest_full_timestamp,
netbackup_service_host,
netbackup_block_size)
self.assertEqual(file_contents, expected_output)
@patch('gppylib.operations.restore.get_lines_from_file', side_effect=[['public.t1'], ['public.t1', 'public.t2', 'public.t3'], ['public.t2', 'public.t4']])
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_create_plan_file_contents_03(self, mock1, mock2):
master_datadir = 'foo'
table_set_from_metadata_file = []
incremental_restore_timestamps = ['20121212121213', '20121212121212', '20121212121211']
latest_full_timestamp = '20121212121210'
backup_dir = None
netbackup_service_host = 'mdw'
netbackup_block_size = '1024'
expected_output = {'20121212121212': [], '20121212121213': [], '20121212121211': [], '20121212121210': []}
file_contents = create_plan_file_contents(master_datadir,
backup_dir,
self.restore.dump_dir,
self.restore.dump_prefix,
table_set_from_metadata_file,
incremental_restore_timestamps,
latest_full_timestamp,
netbackup_service_host,
netbackup_block_size)
self.assertEqual(file_contents, expected_output)
@patch('gppylib.operations.restore.write_lines_to_file')
@patch('gppylib.operations.restore.verify_lines_in_file')
def test_write_to_plan_file_00(self, mock1, mock2):
plan_file = 'blah'
plan_file_contents = {'20121212121213': ['public.t1'],
'20121212121212': ['public.t2', 'public.t3'],
'20121212121211': ['public.t4']}
expected_output = ['20121212121213:public.t1',
'20121212121212:public.t2,public.t3',
'20121212121211:public.t4']
file_contents = write_to_plan_file(plan_file_contents, plan_file)
self.assertEqual(expected_output, file_contents)
@patch('gppylib.operations.restore.write_lines_to_file')
@patch('gppylib.operations.restore.verify_lines_in_file')
def test_write_to_plan_file_01(self, mock1, mock2):
plan_file = 'blah'
plan_file_contents = {}
expected_output = []
file_contents = write_to_plan_file(plan_file_contents, plan_file)
self.assertEqual(expected_output, file_contents)
@patch('gppylib.operations.restore.write_lines_to_file')
@patch('gppylib.operations.restore.verify_lines_in_file')
def test_write_to_plan_file_02(self, mock1, mock2):
plan_file = None
plan_file_contents = {}
with self.assertRaisesRegexp(Exception, 'Invalid plan file .*'):
write_to_plan_file(plan_file_contents, plan_file)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['public.t1', 'public.t2'])
def test_get_partition_list_00(self, mock):
master_datadir = 'foo'
backup_dir = None
timestamp = '20121212121212'
partition_list = get_partition_list(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp)
self.assertEqual(partition_list, [('public', 't1'), ('public', 't2')])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=[])
def test_get_partition_list_01(self, mock):
master_datadir = 'foo'
backup_dir = None
timestamp = '20121212121212'
partition_list = get_partition_list(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp)
self.assertEqual(partition_list, [])
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Incremental'])
@patch('os.path.isfile', return_value=True)
def test_is_incremental_restore_00(self, mock1, mock2):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
self.assertTrue(is_incremental_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('gppylib.operations.restore.get_lines_from_file')
@patch('gppylib.operations.restore.check_backup_type', return_value=True)
@patch('os.path.isfile', return_value=True)
def test_is_incremental_restore_01(self, mock1, mock2, mock3):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = '/foo'
self.assertTrue(is_incremental_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Full'])
def test_is_incremental_restore_02(self, mock1, mock2):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
self.assertFalse(is_incremental_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file')
@patch('gppylib.operations.restore.check_backup_type', return_value=False)
def test_is_incremental_restore_03(self, mock1, mock2, mock3):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
self.assertFalse(is_incremental_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('gppylib.operations.restore.generate_report_filename', return_value='foo')
@patch('os.path.isfile', return_value=False)
def test_is_incremental_restore_04(self, mock1, mock2):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
self.assertFalse(is_incremental_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('gppylib.operations.restore.generate_report_filename', return_value='foo')
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Full'])
@patch('os.path.isfile', return_value=True)
def test_is_full_restore_00(self, mock1, mock2, mock3, mock4):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
self.assertTrue(is_full_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('gppylib.operations.restore.generate_report_filename', return_value='foo')
@patch('gppylib.operations.restore.get_lines_from_file')
@patch('gppylib.operations.restore.check_backup_type', return_value=True)
@patch('os.path.isfile', return_value=True)
def test_is_full_restore_01(self, mock1, mock2, mock3, mock4):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = '/foo'
self.assertTrue(is_full_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('gppylib.operations.restore.generate_report_filename', return_value='foo')
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['Backup Type: Incremental'])
def test_is_full_restore_02(self, mock1, mock2, mock3):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
self.assertFalse(is_full_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('gppylib.operations.restore.generate_report_filename', return_value='foo')
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.get_lines_from_file')
@patch('gppylib.operations.restore.check_backup_type', return_value=False)
def test_is_full_restore_03(self, mock1, mock2, mock3, mock4):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
self.assertFalse(is_full_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp))
@patch('gppylib.operations.restore.generate_report_filename', return_value='foo')
@patch('os.path.isfile', return_value=False)
def test_is_full_restore_04(self, mock1, mock2):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
with self.assertRaisesRegexp(Exception, 'Report file foo does not exist'):
is_full_restore(master_datadir, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_schema_only_restore_line_00(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = False
master_port = '5432'
table_filter_file = None
full_restore_with_filter = False
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-d=db_dumps/20121212 -d bkdb' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_schema_only_restore_line_01(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
table_filter_file = None
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-d=db_dumps/20121212 --gp-c -d bkdb' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=True)
def test_build_schema_only_restore_line_02(self, mock1, mock2, mock3):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.backup_dir = '/foo'
table_filter_file = None
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-r=/foo/db_dumps/20121212 --status=/foo/db_dumps/20121212 --gp-d=/foo/db_dumps/20121212 --gp-c -d bkdb' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=False)
def test_build_schema_only_restore_line_03(self, mock1, mock2, mock3):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.backup_dir = '/foo'
self.restore.dump_prefix = 'bar_'
table_filter_file = None
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-d=/foo/db_dumps/20121212 --prefix=bar_ --gp-c -d bkdb' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=False)
def test_build_schema_only_restore_line_04(self, mock1, mock2, mock3):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.backup_dir = '/foo'
self.restore.dump_prefix = 'bar_'
table_filter_file = 'filter_file1'
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-d=/foo/db_dumps/20121212 --prefix=bar_ --gp-f=%s --gp-c -d bkdb' % (metadata_file, table_filter_file)
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=False)
def test_build_schema_only_restore_line_05(self, mock1, mock2, mock3):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.backup_dir = '/foo'
table_filter_file = None
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-d=/foo/db_dumps/20121212 --gp-c -d bkdb' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_schema_only_restore_line_06(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
table_filter_file = None
full_restore_with_filter = False
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-r=/tmp --status=/tmp --gp-d=/foo/db_dumps/20121212 --gp-c -d bkdb' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_schema_only_restore_line_07(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
table_filter_file = None
full_restore_with_filter = True
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s -P --gp-r=/tmp --status=/tmp --gp-d=/foo/db_dumps/20121212 --gp-c -d bkdb' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_schema_only_restore_line_08(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = False
master_port = '5432'
table_filter_file = None
full_restore_with_filter = False
self.restore.netbackup_service_host = "mdw"
self.restore.netbackup_block_size = None
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-d=db_dumps/20121212 -d bkdb --netbackup-service-host=mdw' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_schema_only_restore_line_09(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = False
master_port = '5432'
table_filter_file = None
full_restore_with_filter = False
self.restore.netbackup_service_host = "mdw"
self.restore.netbackup_block_size = 1024
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s --gp-d=db_dumps/20121212 -d bkdb --netbackup-service-host=mdw --netbackup-block-size=1024' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_schema_only_restore_line_10(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
table_filter_file = None
full_restore_with_filter = True
self.restore.ddboost = True
self.restore.dump_dir = '/backup/DCA-35'
metadata_file = os.path.join(master_datadir, 'db_dumps', restore_timestamp[0:8], 'gp_dump_1_1_%s.gz' % restore_timestamp)
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p -s %s -P --gp-r=/tmp --status=/tmp --gp-d=/backup/DCA-35/20121212 --gp-c -d bkdb --ddboost' % metadata_file
restore_line = self.restore._build_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, metadata_file, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_post_data_schema_only_restore_line_00(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = False
master_port = '5432'
table_filter_file = None
full_restore_with_filter = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p -P -d bkdb'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_post_data_schema_only_restore_line_01(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p --gp-c -d bkdb'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=True)
def test_build_post_data_schema_only_restore_line_02(self, mock1, mock2, mock3):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.backup_dir = '/foo'
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=/foo/db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p --gp-r=/foo/db_dumps/20121212 --status=/foo/db_dumps/20121212 --gp-c -d bkdb'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=False)
def test_build_post_data_schema_only_restore_line_03(self, mock1, mock2, mock3):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.backup_dir = '/foo'
self.restore.dump_prefix = 'bar_'
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=/foo/db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p --prefix=bar_ --gp-c -d bkdb'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=False)
def test_build_post_data_schema_only_restore_line_04(self, mock1, mock2, mock3):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.backup_dir = '/foo'
self.restore.dump_prefix = 'bar_'
table_filter_file = 'filter_file1'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=/foo/db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p --prefix=bar_ --gp-f=%s --gp-c -d bkdb' % (table_filter_file)
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=False)
def test_build_post_data_schema_only_restore_line_05(self, mock1, mock2, mock3):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.backup_dir = '/foo'
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=/foo/db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p --gp-c -d bkdb'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_post_data_schema_only_restore_line_06(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=/foo/db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p --gp-r=/tmp --status=/tmp --gp-c -d bkdb'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_post_data_schema_only_restore_line_07(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
table_filter_file = None
full_restore_with_filter = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=/foo/db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p -P --gp-r=/tmp --status=/tmp --gp-c -d bkdb'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_post_data_schema_only_restore_line_08(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
table_filter_file = None
full_restore_with_filter = True
self.restore.ddboost = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=/foo/db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p -P --gp-r=/tmp --status=/tmp --gp-c -d bkdb --ddboost'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_post_data_schema_only_restore_line_09(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
table_filter_file = None
full_restore_with_filter = True
self.restore.netbackup_service_host = "mdw"
self.restore.netbackup_block_size = None
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p -P --gp-c -d bkdb --netbackup-service-host=mdw'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_post_data_schema_only_restore_line_10(self, mock1, mock2):
master_datadir = 'foo'
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
table_filter_file = None
full_restore_with_filter = True
self.restore.netbackup_service_host = "mdw"
self.restore.netbackup_block_size = 1024
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-d=db_dumps/20121212 --gp-i --gp-k=20121212121212 --gp-l=p -P --gp-c -d bkdb --netbackup-service-host=mdw --netbackup-block-size=1024'
restore_line = self.restore._build_post_data_schema_only_restore_line(restore_timestamp, restore_db, compress, master_port, table_filter_file, full_restore_with_filter)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_00(self, mock1, mock2):
ts = '20121212121212'
dump_prefix = 'bar_'
expected_output = 'gpdbrestore -t 20121212121212 --table-file foo -a -v --noplan --noanalyze --noaostats --prefix=bar'
restore_line = _build_gpdbrestore_cmd_line(ts, 'foo', None, None, None, dump_prefix)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_redirected_restore_build_gpdbrestore_cmd_line_00(self, mock1, mock2):
ts = '20121212121212'
dump_prefix = 'bar_'
expected_output = 'gpdbrestore -t 20121212121212 --table-file foo -a -v --noplan --noanalyze --noaostats --prefix=bar --redirect=redb'
restore_line = _build_gpdbrestore_cmd_line(ts, 'foo', None, 'redb', None, dump_prefix)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_01(self, mock1, mock2):
ts = '20121212121212'
dump_prefix = 'bar_'
expected_output = 'gpdbrestore -t 20121212121212 --table-file foo -a -v --noplan --noanalyze --noaostats -u /tmp --prefix=bar'
restore_line = _build_gpdbrestore_cmd_line(ts, 'foo', '/tmp', None, None, dump_prefix)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_02(self, mock1, mock2):
ts = '20121212121212'
dump_prefix = 'bar_'
report_status_dir = '/tmp'
expected_output = 'gpdbrestore -t 20121212121212 --table-file foo -a -v --noplan --noanalyze --noaostats --prefix=bar --report-status-dir=/tmp'
restore_line = _build_gpdbrestore_cmd_line(ts, 'foo', None, None, '/tmp', dump_prefix)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_gpdbrestore_cmd_line_03(self, mock1, mock2):
ts = '20121212121212'
dump_prefix = 'bar_'
expected_output = 'gpdbrestore -t 20121212121212 --table-file foo -a -v --noplan --noanalyze --noaostats --prefix=bar --report-status-dir=/tmp --ddboost'
ddboost = True
restore_line = _build_gpdbrestore_cmd_line(ts, 'foo', None, None, '/tmp', dump_prefix, ddboost)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_redirected_restore_build_gpdbrestore_cmd_line_01(self, mock1, mock2):
ts = '20121212121212'
dump_prefix = 'bar_'
expected_output = 'gpdbrestore -t 20121212121212 --table-file foo -a -v --noplan --noanalyze --noaostats -u /tmp --prefix=bar --redirect=redb'
restore_line = _build_gpdbrestore_cmd_line(ts, 'foo', '/tmp', 'redb', None, dump_prefix)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_00(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = False
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-c -d bkdb -a'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_01(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = False
no_ao_stats = False
table_filter_file = '/tmp/foo'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-f=/tmp/foo --gp-c -d bkdb'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_02(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = False
table_filter_file = None
full_restore_with_filter = False
self.restore.ddboost = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-c -d bkdb -a --ddboost'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_03(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
self.restore.report_status_dir = '/tmp'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-r=/tmp --status=/tmp --gp-c -d bkdb -a --gp-nostats'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_04(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-c -d bkdb -a --gp-nostats'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_05(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = False
no_ao_stats = True
table_filter_file = '/tmp/foo'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-f=/tmp/foo --gp-c -d bkdb --gp-nostats'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_06(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
full_restore_with_filter = False
self.restore.ddboost = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-c -d bkdb -a --gp-nostats --ddboost'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_07(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
self.restore.dump_prefix = 'bar_'
full_restore_with_filter = False
self.restore.ddboost = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --prefix=bar_ --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-c -d bkdb -a --gp-nostats --ddboost'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=True)
def test_build_restore_line_08(self, mock1, mock2, mock3):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
self.restore.backup_dir = '/tmp'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=/tmp/db_dumps/20121212 --gp-r=/tmp/db_dumps/20121212 --status=/tmp/db_dumps/20121212 --gp-c -d bkdb -a --gp-nostats'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
@patch('gppylib.operations.restore.RestoreDatabase.backup_dir_is_writable', return_value=False)
def test_build_restore_line_09(self, mock1, mock2, mock3):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
self.restore.backup_dir = '/tmp'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=/tmp/db_dumps/20121212 --gp-c -d bkdb -a --gp-nostats'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_10(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=/foo/db_dumps/20121212 --gp-r=/tmp --status=/tmp --gp-c -d bkdb -a --gp-nostats'
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_11(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
full_restore_with_filter = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=/foo/db_dumps/20121212 --gp-r=/tmp --status=/tmp --gp-c -d bkdb -a --gp-nostats'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_12(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = False
no_ao_stats = True
table_filter_file = None
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
full_restore_with_filter = True
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=/foo/db_dumps/20121212 --gp-r=/tmp --status=/tmp --gp-c -d bkdb -a --gp-nostats'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_13(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
self.restore.report_status_dir = '/tmp'
self.restore.backup_dir = '/foo'
self.restore.netbackup_service_host = "mdw"
full_restore_with_filter = False
self.restore.ddboost = False
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=/foo/db_dumps/20121212 --gp-r=/tmp --status=/tmp --gp-c -d bkdb -a --gp-nostats --netbackup-service-host=mdw'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_14(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
no_plan = True
no_ao_stats = True
table_filter_file = None
self.restore.ddboost = True
self.restore.report_status_dir = '/tmp'
self.restore.netbackup_service_host = "mdw"
full_restore_with_filter = False
self.restore.dump_dir = 'backup/DCA-35'
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=backup/DCA-35/20121212 --gp-r=/tmp --status=/tmp --gp-c -d bkdb -a --gp-nostats --ddboost --netbackup-service-host=mdw'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, None)
self.assertEqual(restore_line, expected_output)
# Test to verify the command line for gp_restore
@patch('gppylib.operations.restore.socket.gethostname', return_value='host')
@patch('gppylib.operations.restore.getpass.getuser', return_value='user')
def test_build_restore_line_15(self, mock1, mock2):
restore_timestamp = '20121212121212'
restore_db = 'bkdb'
compress = True
master_port = '5432'
ddboost = False
no_plan = True
no_ao_stats = False
table_filter_file = None
full_restore_with_filter = False
change_schema = 'newschema'
expected_output = 'gp_restore -i -h host -p 5432 -U user --gp-i --gp-k=20121212121212 --gp-l=p --gp-d=db_dumps/20121212 --gp-c -d bkdb -a --change-schema=newschema'
restore_line = self.restore._build_restore_line(restore_timestamp, restore_db, compress, master_port, no_plan, table_filter_file, no_ao_stats,
full_restore_with_filter, change_schema)
self.assertEqual(restore_line, expected_output)
@patch('gppylib.operations.restore.generate_plan_filename', return_value='foo')
def test_get_plan_file_contents_00(self, mock1):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
with self.assertRaisesRegexp(Exception, 'Plan file foo does not exist'):
get_plan_file_contents(master_datadir, backup_dir, timestamp, self.restore.dump_dir, self.restore.dump_prefix)
@patch('gppylib.operations.restore.generate_plan_filename', return_value='foo')
@patch('gppylib.operations.restore.get_lines_from_file', return_value=[])
@patch('os.path.isfile', return_value=True)
def test_get_plan_file_contents_01(self, mock1, mock2, mock3):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
with self.assertRaisesRegexp(Exception, 'Plan file foo has no contents'):
get_plan_file_contents(master_datadir, backup_dir, timestamp, self.restore.dump_dir, self.restore.dump_prefix)
@patch('gppylib.operations.restore.generate_plan_filename', return_value='foo')
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['20121212121212:t1,t2', '20121212121211:t3,t4', '20121212121210:t5,t6,t7'])
@patch('os.path.isfile', return_value=True)
def test_get_plan_file_contents_02(self, mock1, mock2, mock3):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
expected_output = [('20121212121212','t1,t2'), ('20121212121211','t3,t4'), ('20121212121210','t5,t6,t7')]
output = get_plan_file_contents(master_datadir, backup_dir, timestamp, self.restore.dump_dir, self.restore.dump_prefix)
self.assertEqual(output, expected_output)
@patch('gppylib.operations.restore.generate_plan_filename', return_value='foo')
@patch('gppylib.operations.restore.get_lines_from_file', return_value=['20121212121212:', '20121212121211', '20121212121210:'])
@patch('os.path.isfile', return_value=True)
def test_get_plan_file_contents_03(self, mock1, mock2, mock3):
master_datadir = 'foo'
timestamp = '20121212121212'
backup_dir = None
with self.assertRaisesRegexp(Exception, 'Invalid plan file format'):
get_plan_file_contents(master_datadir, backup_dir, timestamp, self.restore.dump_dir, self.restore.dump_prefix)
@patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20121212121212', 't1,t2'), ('20121212121211', 't3,t4'), ('20121212121210', 't5,t6,t7')])
@patch('gppylib.operations.restore.Command.run')
@patch('gppylib.operations.restore.update_ao_statistics')
def test_restore_incremental_data_only_00(self, mock1, mock2, mock3):
restore_db = None
results = self.restore.restore_incremental_data_only(restore_db)
self.assertTrue(results)
@patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20121212121212', 't1,t2'), ('20121212121211', 't3,t4'), ('20121212121210', 't5,t6,t7')])
@patch('gppylib.operations.restore.Command.run')
@patch('gppylib.operations.restore.update_ao_statistics')
def redirected_restore_test_restore_incremental_data_only_00(self, mock1, mock2, mock3):
restore_db = None
results = self.restore.restore_incremental_data_only(restore_db)
self.assertTrue(results)
@patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20121212121212', ''), ('20121212121211', ''), ('20121212121210', '')])
@patch('os.path.isfile', return_value=True)
@patch('gppylib.operations.restore.update_ao_statistics')
def test_restore_incremental_data_only_01(self, mock1, mock2, mock3):
restore_db = None
with self.assertRaisesRegexp(Exception, 'There were no tables to restore. Check the plan file contents for restore timestamp 20121212121212'):
self.restore.restore_incremental_data_only(restore_db)
@patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20121212121212', 't1,t2'), ('20121212121211', 't3,t4'), ('20121212121210', 't5,t6,t7')])
@patch('gppylib.operations.restore.Command.run')
@patch('gppylib.operations.restore.update_ao_statistics')
def test_restore_incremental_data_only_02(self, mock1, mock2, mock3):
restore_db = None
self.assertTrue(self.restore.restore_incremental_data_only(restore_db))
@patch('gppylib.operations.restore.get_plan_file_contents', return_value=[('20121212121212', 't1,t2'), ('20121212121211', 't3,t4'), ('20121212121210', 't5,t6,t7')])
@patch('gppylib.operations.restore.Command.run', side_effect=Exception('Error executing gpdbrestore'))
@patch('gppylib.operations.restore.update_ao_statistics')
def test_restore_incremental_data_only_04(self, mock1, mock2, mock3):
restore_db = None
with self.assertRaisesRegexp(Exception, 'Error executing gpdbrestore'):
self.restore.restore_incremental_data_only(restore_db)
def test_get_restore_dir_00(self):
master_datadir = '/foo'
backup_dir = None
self.assertEqual(get_restore_dir(master_datadir, backup_dir), '/foo')
def test_get_restore_dir_01(self):
master_datadir = None
backup_dir = '/foo'
self.assertEqual(get_restore_dir(master_datadir, backup_dir), '/foo')
def test_get_restore_dir_02(self):
master_datadir = None
backup_dir = None
self.assertEqual(get_restore_dir(master_datadir, backup_dir), None)
@patch('gppylib.operations.restore.is_incremental_restore', return_value=True)
def test_is_begin_incremental_run_00(self, m):
mdd = '/foo'
backup_dir = '/tmp'
timestamp = '20130204135500'
noplan = True
result = is_begin_incremental_run(mdd, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp, noplan)
self.assertFalse(result)
@patch('gppylib.operations.restore.is_incremental_restore', return_value=True)
def test_is_begin_incremental_run_01(self, m):
mdd = '/foo'
backup_dir = '/tmp'
timestamp = '20130204135500'
noplan = False
result = is_begin_incremental_run(mdd, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp, noplan)
self.assertTrue(result)
@patch('gppylib.operations.restore.is_incremental_restore', return_value=False)
def test_is_begin_incremental_run_02(self, m):
mdd = '/foo'
backup_dir = '/tmp'
timestamp = '20130204135500'
noplan = True
result = is_begin_incremental_run(mdd, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp, noplan)
self.assertFalse(result)
@patch('gppylib.operations.restore.is_incremental_restore', return_value=False)
def test_is_begin_incremental_run_03(self, m):
mdd = '/foo'
backup_dir = '/tmp'
timestamp = '20130204135500'
noplan = False
result = is_begin_incremental_run(mdd, backup_dir, self.restore.dump_dir, self.restore.dump_prefix, timestamp, noplan)
self.assertFalse(result)
def test_create_filter_file_00(self):
self.restore.restore_tables = None
fname = self.restore.create_filter_file()
self.assertEquals(fname, None)
@patch('gppylib.operations.restore.get_all_segment_addresses', return_value=['host1'])
@patch('gppylib.operations.restore.scp_file_to_hosts')
def test_create_filter_file_01(self, m1, m2):
self.restore.restore_tables = ['public.ao1', 'pepper.heap1']
fname = self.restore.create_filter_file()
tables = None
with open(fname) as fd:
contents = fd.read()
tables = contents.splitlines()
self.assertEquals(tables,self.restore.restore_tables)
os.remove(fname)
@patch('gppylib.operations.restore.get_lines_from_file', return_value = ['public.t1', 'public.t2', 'public.t3'])
@patch('os.path.isfile', return_value = True)
def test_get_restore_tables_from_table_file_00(self, mock1, mock2):
table_file = '/foo'
expected_result = ['public.t1', 'public.t2', 'public.t3']
result = get_restore_tables_from_table_file(table_file)
self.assertEqual(expected_result, result)
@patch('os.path.isfile', return_value = False)
def test_get_restore_tables_from_table_file_01(self, mock):
table_file = '/foo'
expected_result = ['public.t1', 'public.t2', 'public.t3']
with self.assertRaisesRegexp(Exception, 'Table file does not exist'):
result = get_restore_tables_from_table_file(table_file)
def test_validate_tablenames_00(self):
table_list = ['publicao1', 'public.ao2']
with self.assertRaisesRegexp(Exception, 'No schema name supplied'):
validate_tablenames(table_list)
def test_validate_tablenames_01(self):
table_list = ['public.ao1', 'public.ao2']
validate_tablenames(table_list)
def test_validate_tablenames_02(self):
table_list = []
validate_tablenames(table_list)
def test_validate_tablenames_03(self):
table_list = ['public.ao1', 'public.ao1']
resolved_list = validate_tablenames(table_list)
self.assertEqual(resolved_list, ['public.ao1'])
def test_validate_tablenames_04(self):
table_list = ['public.*', 'public.ao1']
resolved_list = validate_tablenames(table_list)
self.assertEqual(resolved_list, ['public.*'])
def test_validate_tablenames_05(self):
table_list = ['public.*', 'other.*']
resolved_list = validate_tablenames(table_list)
self.assertEqual(resolved_list, ['public.*', 'other.*'])
def test_get_restore_table_list_00(self):
table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table']
restore_tables = ['public.ao_table2', 'public.co_table']
result = get_restore_table_list(table_list, restore_tables)
with open(result) as fd:
for line in fd:
self.assertTrue(line.strip() in restore_tables)
def test_get_restore_table_list_01(self):
table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table']
restore_tables = None
result = get_restore_table_list(table_list, restore_tables)
with open(result) as fd:
for line in fd:
self.assertTrue(line.strip() in table_list)
def test_get_restore_table_list_02(self):
table_list = ['public.ao_table', 'public.ao_table2', 'public.co_table', 'public.heap_table']
restore_tables = ['public.ao_table2', 'public.co_table', 'public.ao_table3']
result = get_restore_table_list(table_list, restore_tables)
with open(result) as fd:
for line in fd:
self.assertTrue(line.strip() in restore_tables)
def test_validate_restore_tables_list_00(self):
plan_file_contents = [('20121212121213', 'public.t1'), ('20121212121212', 'public.t2,public.t3'), ('20121212121212', 'public.t4')]
restore_tables = ['public.t1', 'public.t2']
validate_restore_tables_list(plan_file_contents, restore_tables)
def test_validate_restore_tables_list_01(self):
plan_file_contents = [('20121212121213', 'public.t1'), ('20121212121212', 'public.t2,public.t3'), ('20121212121212', 'public.t4')]
restore_tables = ['public.t5', 'public.t2']
with self.assertRaisesRegexp(Exception, 'Invalid tables for -T option: The following tables were not found in plan file'):
validate_restore_tables_list(plan_file_contents, restore_tables)
def test_validate_restore_tables_list_02(self):
plan_file_contents = [('20121212121213', 'public.t1'), ('20121212121212', 'public.t2,public.t3'), ('20121212121212', 'public.Ž')]
restore_tables = ['public.t1', 'public.Áá']
with self.assertRaisesRegexp(Exception, 'Invalid tables for -T option: The following tables were not found in plan file'):
validate_restore_tables_list(plan_file_contents, restore_tables)
def test_validate_restore_tables_list_03(self):
plan_file_contents = [('20121212121213', 'public.t1'), ('20121212121212', 'public.t2,public.t3'), ('20121212121212', 'public.测试')]
restore_tables = ['public.t1', 'public.测试']
validate_restore_tables_list(plan_file_contents, restore_tables)
def test_validate_restore_tables_list_04(self):
plan_file_contents = [('20121212121213', 'public.t1'), ('20121212121212', 'public.t2,public.t3'), ('20121212121212', 'public.Ž')]
restore_tables = ['public.t1', 'public.Ž']
validate_restore_tables_list(plan_file_contents, restore_tables)
def test_validate_restore_tables_list_05(self):
plan_file_contents = [('20121212121213', 'public.t1'), ('20121212121212', 'public.t2,public.t3'), ('20121212121212', 'public.Áá')]
restore_tables = ['public.t1', 'public.Áá']
validate_restore_tables_list(plan_file_contents, restore_tables)
@patch('gppylib.operations.unix.CheckFile.run', return_value=False)
def test_restore_global_00(self, mock):
restore_timestamp = '20121212121212'
master_datadir = 'foo'
backup_dir = None
with self.assertRaisesRegexp(Exception, 'Unable to locate global file gp_global_1_1_20121212121212 in dump set'):
self.restore._restore_global(restore_timestamp, master_datadir, backup_dir)
@patch('os.path.exists', return_value=True)
@patch('gppylib.commands.gp.Psql.run')
def test_restore_global_01(self, mock1, mock2):
restore_timestamp = '20121212121212'
master_datadir = 'foo'
backup_dir = None
self.restore._restore_global(restore_timestamp, master_datadir, backup_dir) # should not error out
@patch('gppylib.operations.restore.execSQLForSingleton')
@patch('pygresql.pgdb.pgdbCnx.commit')
def test_update_ao_stat_func_00(self, m1, m2):
conn = None
ao_table = 'schema.table'
counter = 1
batch_size = 1000
update_ao_stat_func(conn, ao_table, counter, batch_size)
@patch('pygresql.pgdb.pgdbCnx.commit')
@patch('gppylib.operations.restore.execSQLForSingleton')
def test_update_ao_stat_func_01(self, m1, m2):
conn = None
ao_table = 'schema.table'
counter = 999
batch_size = 1000
update_ao_stat_func(conn, ao_table, counter, batch_size)
@patch('gppylib.operations.restore.execSQLForSingleton')
@patch('pygresql.pgdb.pgdbCnx.commit')
def test_update_ao_stat_func_02(self, m1, m2):
conn = None
ao_table = 'schema.table'
counter = 1000
batch_size = 1000
with self.assertRaisesRegexp(AttributeError, "'NoneType' object has no attribute 'commit'"):
update_ao_stat_func(conn, ao_table, counter, batch_size)
@patch('gppylib.operations.restore.execSQLForSingleton')
@patch('pygresql.pgdb.pgdbCnx.commit')
def test_update_ao_stat_func_03(self, m1, m2):
conn = None
ao_table = 'schema.table'
counter = 1001
batch_size = 1000
update_ao_stat_func(conn, ao_table, counter, batch_size)
@patch('gppylib.operations.restore.execSQLForSingleton')
@patch('pygresql.pgdb.pgdbCnx.commit')
def test_update_ao_stat_func_04(self, m1, m2):
conn = None
ao_table = 'schema.table'
counter = 2000
batch_size = 1000
with self.assertRaisesRegexp(AttributeError, "'NoneType' object has no attribute 'commit'"):
update_ao_stat_func(conn, ao_table, counter, batch_size)
@patch('gppylib.operations.restore.execute_sql', return_value=[['t1', 'public']])
@patch('gppylib.operations.restore.dbconn.connect')
@patch('gppylib.operations.restore.update_ao_stat_func')
def test_update_ao_statistics_00(self, m1, m2, m3):
port = 28888
db = 'testdb'
restored_tables = []
update_ao_statistics(port, db, restored_tables)
@patch('gppylib.operations.restore.dbconn.connect')
@patch('gppylib.db.dbconn.execSQLForSingleton', return_value=5)
def test_check_gp_toolkit_true(self, m1, m2):
restore_db = 'testdb'
self.assertTrue(self.restore.check_gp_toolkit(restore_db))
@patch('gppylib.operations.restore.dbconn.connect')
@patch('gppylib.db.dbconn.execSQLForSingleton', return_value=0)
def test_check_gp_toolkit_false(self, m1, m2):
restore_db = 'testdb'
self.assertFalse(self.restore.check_gp_toolkit(restore_db))
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('gppylib.operations.restore.execSQL')
def test_analyze_restore_tables_00(self, mock1, mock2, mock3):
db_name = 'FOO'
port = 1234
restore_tables = ['public.t1', 'public.t2']
restoredb = RestoreDatabase('20121219', False, True, False, 'FOO', None, 1234, 'db_dumps', '', False, False, None, None, False, None, None, None, None, None)
restoredb._analyze_restore_tables(db_name, restore_tables, None)
@patch('gppylib.operations.restore.execSQL', side_effect=Exception('analyze failed'))
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
def test_analyze_restore_tables_01(self, mock1, mock2, mock3):
db_name = 'FOO'
port = 1234
restore_tables = ['public.t1', 'public.t2']
restoredb = RestoreDatabase('20121219', False, True, False, 'FOO', None, 1234, 'db_dumps', '', False, False, None, None, False, None, None, None, None, None)
self.assertRaises(Exception, restoredb._analyze_restore_tables, db_name, restore_tables, None)
@patch('gppylib.operations.backup_utils.execSQL')
@patch('gppylib.operations.backup_utils.dbconn.DbURL', side_effect=Exception('Failed'))
@patch('gppylib.operations.backup_utils.dbconn.connect')
def test_analyze_restore_tables_02(self, mock1, mock2, mock3):
db_name = 'FOO'
port = 1234
restore_tables = ['public.t1', 'public.t2']
restoredb = RestoreDatabase('20121219', False, True, False, 'FOO', None, 1234, 'db_dumps', '', False, False, None, None, False, None, None, None, None, None)
self.assertRaises(Exception, restoredb._analyze_restore_tables, db_name, restore_tables, None)
@patch('gppylib.operations.backup_utils.execSQL')
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect', side_effect=Exception('Failed'))
def test_analyze_restore_tables_03(self, mock1, mock2, mock3):
db_name = 'FOO'
port = 1234
restore_tables = ['public.t1', 'public.t2']
restoredb = RestoreDatabase('20121219', False, True, False, 'FOO', None, 1234, 'db_dumps', '', False, False, None, None, False, None, None, None, None, None)
self.assertRaises(Exception, restoredb._analyze_restore_tables, db_name, restore_tables, None)
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('gppylib.operations.restore.execSQL')
def test_analyze_restore_tables_04(self, mock1, mock2, mock3):
db_name = 'FOO'
port = 1234
restore_tables = ['public.t%d' % i for i in range(3002)]
expected_batch_count = 3
restoredb = RestoreDatabase('20121219', False, True, False, 'FOO', None, 1234, 'db_dumps', '', False, False, None, None, False, None, None, None, None, None)
batch_count = restoredb._analyze_restore_tables(db_name, restore_tables, None)
self.assertEqual(batch_count, expected_batch_count)
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('gppylib.operations.backup_utils.dbconn.execSQL')
def test_analyze_restore_tables_05(self, mock1, mock2, mock3):
db_name = 'FOO'
port = 1234
restore_tables = ['public.t1', 'public.t2']
change_schema = 'newschema'
restoredb = RestoreDatabase('20121219', False, True, False, 'FOO', None, 1234, 'db_dumps', '', False, False, None, None, False, None, None, None, None, None)
restoredb._analyze_restore_tables(db_name, restore_tables, change_schema)
@patch('gppylib.operations.backup_utils.dbconn.DbURL')
@patch('gppylib.operations.backup_utils.dbconn.connect')
@patch('gppylib.operations.backup_utils.dbconn.execSQL')
def test_analyze_restore_tables_06(self, mock1, mock2, mock3):
db_name = 'FOO'
port = 1234
restore_tables = ['public.t1', 'public.t2']
change_schema = 'newschema'
restoredb = RestoreDatabase('20121219', False, True, False, 'FOO', None, 1234, 'db_dumps', '', False, False, None, None, False, None, None, None, None, None)
restoredb._analyze_restore_tables(db_name, restore_tables, change_schema)
class ValidateTimestampTestCase(unittest.TestCase):
def setUp(self):
self.validate_timestamp = ValidateTimestamp(candidate_timestamp='20140211111111',
master_datadir='/mdd',
backup_dir='/backup_dir',
dump_dir='/db_dumps',
dump_prefix='',
netbackup_service_host=None,
ddboost=False)
@patch('os.path.exists', side_effect=[True, False])
def test_validate_compressed_file_with_compression_exists(self, mock):
compressed_file = 'compressed_file.gz'
self.assertTrue(self.validate_timestamp.validate_compressed_file(compressed_file))
@patch('os.path.exists', side_effect=[False, False])
def test_validate_compressed_file_with_compression_doesnt_exists(self, mock):
compressed_file = 'compressed_file.gz'
with self.assertRaisesRegexp(ExceptionNoStackTraceNeeded, 'Unable to find compressed_file or compressed_file.gz'):
self.validate_timestamp.validate_compressed_file(compressed_file)
@patch('os.path.exists', side_effect=[False, True])
def test_validate_compressed_file_without_compression_exists(self, mock):
compressed_file = 'compressed_file.gz'
self.assertFalse(self.validate_timestamp.validate_compressed_file(compressed_file))
@patch('os.path.exists', side_effect=[False, False])
def test_validate_compressed_file_without_compression_doesnt_exist(self, mock):
compressed_file = 'compressed_file.gz'
with self.assertRaisesRegexp(ExceptionNoStackTraceNeeded, 'Unable to find compressed_file or compressed_file.gz'):
self.validate_timestamp.validate_compressed_file(compressed_file)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_00(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_01(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_02(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_03(self, mock1):
master_datadir = None
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_04(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = None
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_05(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = None
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_06(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 2048
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_07(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 1024
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_state_files_with_nbu_08(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 4096
restore_state_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_00(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_01(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_02(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_03(self, mock1):
master_datadir = None
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_04(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = None
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_05(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = None
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_06(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 1024
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_07(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 2048
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_report_file_with_nbu_08(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 4096
restore_report_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_00(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_01(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_02(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_03(self, mock1):
master_datadir = None
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_04(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = None
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_05(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = None
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_06(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 1024
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_07(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 2048
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_cdatabase_file_with_nbu_08(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 4096
restore_cdatabase_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_00(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_01(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_02(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_03(self, mock1):
master_datadir = None
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_04(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = None
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_05(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = None
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_06(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 1024
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_07(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 2048
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_global_file_with_nbu_08(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 4096
restore_global_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_00(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
master_port = "5432"
netbackup_service_host = "mdw"
netbackup_block_size = None
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_01(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
master_port = "5432"
netbackup_service_host = "mdw"
netbackup_block_size = None
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_02(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
master_port = "5432"
netbackup_service_host = "mdw"
netbackup_block_size = None
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_03(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = None
backup_dir = None
restore_timestamp = "20141400002014"
master_port = "5432"
netbackup_service_host = "mdw"
netbackup_block_size = None
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_04(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = None
master_port = "5432"
netbackup_service_host = "mdw"
netbackup_block_size = None
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_05(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
master_port = "5432"
netbackup_service_host = None
netbackup_block_size = None
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_06(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
master_port = None
netbackup_service_host = "mdw"
netbackup_block_size = None
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
with self.assertRaisesRegexp(Exception, 'Master port is None.'):
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_07(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
master_port = "5432"
netbackup_service_host = "mdw"
netbackup_block_size = 1024
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_08(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
master_port = "5432"
netbackup_service_host = "mdw"
netbackup_block_size = 2048
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.GpArray.initFromCatalog')
@patch('gppylib.operations.dump.GpArray.getDbList')
@patch('gppylib.operations.restore.generate_segment_config_filename')
def test_restore_config_files_with_nbu_09(self, mock1, mock2, mock3, mock4, mock5, mock6):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
master_port = "5432"
netbackup_service_host = "mdw"
netbackup_block_size = 4096
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.isSegmentPrimary.return_value = True
seg.getSegmentDbId.return_value = id + 1
seg.getSegmentDataDirectory.return_value = "/data"
seg.getSegmentHostName.return_value = "sdw"
restore_config_files_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, master_port, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_00(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_01(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_02(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_03(self, mock1):
master_datadir = None
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_04(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = None
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_05(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = None
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_06(self, mock1):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 1024
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_07(self, mock1):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 2048
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
def test_restore_partition_list_file_with_nbu_08(self, mock1):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141400002014"
netbackup_service_host = "mdw"
netbackup_block_size = 4096
restore_partition_list_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_00(self, mock1, mock2):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20140808000000"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_01(self, mock1, mock2):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20140808000000"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_02(self, mock1, mock2):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20140808000000"
netbackup_service_host = "mdw"
netbackup_block_size = None
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_03(self, mock1, mock2):
master_datadir = None
backup_dir = None
restore_timestamp = "20140808000000"
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_04(self, mock1, mock2):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = None
netbackup_service_host = "mdw"
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_05(self, mock1, mock2):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20140808000000"
netbackup_service_host = None
netbackup_block_size = None
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_06(self, mock1, mock2):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20140808000000"
netbackup_service_host = "mdw"
netbackup_block_size = 1024
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_07(self, mock1, mock2):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20140808000000"
netbackup_service_host = "mdw"
netbackup_block_size = 2048
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value='20140707000000')
def test_restore_increments_file_with_nbu_08(self, mock1, mock2):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20140808000000"
netbackup_service_host = "mdw"
netbackup_block_size = 4096
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.restore_file_with_nbu')
@patch('gppylib.operations.restore.get_full_timestamp_for_incremental_with_nbu', return_value=None)
def test_restore_increments_file_with_nbu_09(self, mock1, mock2):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20140808000000"
netbackup_service_host = "mdw"
netbackup_block_size = 4096
with self.assertRaisesRegexp(Exception, 'Unable to locate full timestamp for given incremental timestamp "20140808000000" using NetBackup'):
restore_increments_file_with_nbu(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host, netbackup_block_size)
@patch('gppylib.operations.restore.get_backup_directory', return_value="/data")
@patch('gppylib.operations.restore.generate_master_config_filename', return_value="gp_master_config_20141200002014.tar")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=True)
def test_config_files_dumped_00(self, mock1, mock2, mock3):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertTrue(config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.get_backup_directory', return_value="/data")
@patch('gppylib.operations.restore.generate_master_config_filename', return_value="gp_master_config_20141200002014.tar")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=True)
def test_config_files_dumped_01(self, mock1, mock2, mock3):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertTrue(config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.get_backup_directory', return_value="/datadomain")
@patch('gppylib.operations.restore.generate_master_config_filename', return_value="gp_master_config_20141200002014.tar")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=True)
def test_config_files_dumped_02(self, mock1, mock2, mock3):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertTrue(config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.check_file_dumped_with_nbu')
def test_config_files_dumped_03(self, mock1, mock2, mock3):
master_datadir = None
backup_dir = None
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.check_file_dumped_with_nbu')
def test_config_files_dumped_04(self, mock1, mock2, mock3):
master_datadir = "/data"
backup_dir = None
restore_timestamp = None
netbackup_service_host = "mdw"
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host)
@patch('gppylib.operations.restore.get_backup_directory')
@patch('gppylib.operations.restore.generate_master_config_filename')
@patch('gppylib.operations.restore.check_file_dumped_with_nbu')
def test_config_files_dumped_05(self, mock1, mock2, mock3):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141200002014"
netbackup_service_host = None
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host)
@patch('gppylib.operations.restore.get_backup_directory', return_value="/datadomain")
@patch('gppylib.operations.restore.generate_master_config_filename', return_value="gp_master_config_20141200002014.tar")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=False)
def test_config_files_dumped_06(self, mock1, mock2, mock3):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertFalse(config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.get_backup_directory', return_value="/datadomain")
@patch('gppylib.operations.restore.generate_master_config_filename', return_value="gp_master_config_20141200002014.tar")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=False)
def test_config_files_dumped_07(self, mock1, mock2, mock3):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertFalse(config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.get_backup_directory', return_value="/data")
@patch('gppylib.operations.restore.generate_master_config_filename', return_value="gp_master_config_20141200002014.tar")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=False)
def test_config_files_dumped_08(self, mock1, mock2, mock3):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertFalse(config_files_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.generate_global_filename', return_value="/data/gp_global_1_1_20141200002014")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=True)
def test_global_file_dumped_00(self, mock1, mock2):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertTrue(global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.generate_global_filename', return_value="/data/gp_global_1_1_20141200002014")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=True)
def test_global_file_dumped_01(self, mock1, mock2):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertTrue(global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.generate_global_filename', return_value="/datadomain/gp_global_1_1_20141200002014")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=True)
def test_global_file_dumped_02(self, mock1, mock2):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertTrue(global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.generate_global_filename')
@patch('gppylib.operations.restore.check_file_dumped_with_nbu')
def test_global_file_dumped_03(self, mock1, mock2):
master_datadir = None
backup_dir = None
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
with self.assertRaisesRegexp(Exception, 'Master data directory and backup directory are both none.'):
global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host)
@patch('gppylib.operations.restore.generate_global_filename')
@patch('gppylib.operations.restore.check_file_dumped_with_nbu')
def test_global_file_dumped_04(self, mock1, mock2):
master_datadir = "/data"
backup_dir = None
restore_timestamp = None
netbackup_service_host = "mdw"
with self.assertRaisesRegexp(Exception, 'Restore timestamp is None.'):
global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host)
@patch('gppylib.operations.restore.generate_global_filename')
@patch('gppylib.operations.restore.check_file_dumped_with_nbu')
def test_global_file_dumped_05(self, mock1, mock2):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141200002014"
netbackup_service_host = None
with self.assertRaisesRegexp(Exception, 'Netbackup service hostname is None.'):
global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host)
@patch('gppylib.operations.restore.generate_global_filename', return_value="/datadomain/gp_global_1_1_20141200002014")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=False)
def test_global_file_dumped_06(self, mock1, mock2):
master_datadir = "/data"
backup_dir = None
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertFalse(global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.generate_global_filename', return_value="/datadomain/gp_global_1_1_20141200002014")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=False)
def test_global_file_dumped_07(self, mock1, mock2):
master_datadir = None
backup_dir = "/datadomain"
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertFalse(global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
@patch('gppylib.operations.restore.generate_global_filename', return_value="/data/gp_global_1_1_20141200002014")
@patch('gppylib.operations.restore.check_file_dumped_with_nbu', return_value=False)
def test_global_file_dumped_08(self, mock1, mock2):
master_datadir = "/data"
backup_dir = "/datadomain"
restore_timestamp = "20141200002014"
netbackup_service_host = "mdw"
self.assertFalse(global_file_dumped(master_datadir, backup_dir, self.validate_timestamp.dump_dir, self.validate_timestamp.dump_prefix, restore_timestamp, netbackup_service_host))
if __name__ == '__main__':
unittest.main()
| 56.093156
| 255
| 0.71192
| 17,854
| 147,525
| 5.524756
| 0.023692
| 0.046716
| 0.084977
| 0.103488
| 0.961587
| 0.951682
| 0.943714
| 0.936516
| 0.928466
| 0.920751
| 0
| 0.05729
| 0.188571
| 147,525
| 2,629
| 256
| 56.114492
| 0.76672
| 0.001064
| 0
| 0.80053
| 0
| 0.019417
| 0.252079
| 0.147044
| 0
| 0
| 0
| 0
| 0.068844
| 0
| null | null | 0.019417
| 0.00353
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
53bcc93e2d4c1fa03e90edfc45d8d107653ae99b
| 37,671
|
py
|
Python
|
python/alibabacloud_tea_roa/client.py
|
yndu13/tea-roa
|
e335b768ff65cc645fbbfa34c05ff7a843495bca
|
[
"Apache-2.0"
] | 4
|
2020-03-26T08:10:55.000Z
|
2021-05-24T14:20:01.000Z
|
python/alibabacloud_tea_roa/client.py
|
yndu13/tea-roa
|
e335b768ff65cc645fbbfa34c05ff7a843495bca
|
[
"Apache-2.0"
] | 33
|
2020-05-26T09:33:44.000Z
|
2022-02-07T06:34:09.000Z
|
python/alibabacloud_tea_roa/client.py
|
yndu13/tea-roa
|
e335b768ff65cc645fbbfa34c05ff7a843495bca
|
[
"Apache-2.0"
] | 4
|
2020-05-15T08:15:39.000Z
|
2021-02-22T14:03:12.000Z
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
import time
from Tea.exceptions import TeaException, UnretryableException
from Tea.request import TeaRequest
from Tea.core import TeaCore
from typing import Dict, Any
from alibabacloud_credentials.client import Client as CredentialClient
from alibabacloud_tea_roa import models as roa_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_credentials import models as credential_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_roa_util.client import Client as ROAUtilClient
class Client:
"""
This is for ROA SDK
"""
_protocol: str = None
_read_timeout: int = None
_connect_timeout: int = None
_http_proxy: str = None
_https_proxy: str = None
_no_proxy: str = None
_max_idle_conns: int = None
_endpoint_host: str = None
_network: str = None
_endpoint_rule: str = None
_endpoint_map: Dict[str, str] = None
_suffix: str = None
_product_id: str = None
_region_id: str = None
_user_agent: str = None
_credential: CredentialClient = None
def __init__(
self,
config: roa_models.Config,
):
"""
Init client with Config
@param config: config contains the necessary information to create a client
"""
if UtilClient.is_unset(config):
raise TeaException({
'code': 'ParameterMissing',
'message': "'config' can not be unset"
})
UtilClient.validate_model(config)
if not UtilClient.empty(config.access_key_id) and not UtilClient.empty(config.access_key_secret):
if not UtilClient.empty(config.security_token):
config.type = 'sts'
else:
config.type = 'access_key'
credential_config = credential_models.Config(
access_key_id=config.access_key_id,
type=config.type,
access_key_secret=config.access_key_secret,
security_token=config.security_token
)
self._credential = CredentialClient(credential_config)
elif not UtilClient.is_unset(config.credential):
self._credential = config.credential
else:
raise TeaException({
'code': 'ParameterMissing',
'message': "'accessKeyId' and 'accessKeySecret' or 'credential' can not be unset"
})
self._region_id = config.region_id
self._protocol = config.protocol
self._endpoint_host = config.endpoint
self._read_timeout = config.read_timeout
self._connect_timeout = config.connect_timeout
self._http_proxy = config.http_proxy
self._https_proxy = config.https_proxy
self._max_idle_conns = config.max_idle_conns
def do_request(
self,
version: str,
protocol: str,
method: str,
auth_type: str,
pathname: str,
query: Dict[str, str],
headers: Dict[str, str],
body: Any,
runtime: util_models.RuntimeOptions,
) -> dict:
"""
Encapsulate the request and invoke the network
@param version: product version
@param protocol: http or https
@param method: e.g. GET
@param auth_type: when authType is Anonymous, the signature will not be calculate
@param pathname: pathname of every api
@param query: which contains request params
@param headers: request headers
@param body: content of request
@param runtime: which controls some details of call api, such as retry times
@return: the response
"""
runtime.validate()
_runtime = {
'timeouted': 'retry',
'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),
'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),
'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),
'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),
'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),
'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),
'retry': {
'retryable': runtime.autoretry,
'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)
},
'backoff': {
'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),
'period': UtilClient.default_number(runtime.backoff_period, 1)
},
'ignoreSSL': runtime.ignore_ssl
}
_last_request = None
_last_exception = None
_now = time.time()
_retry_times = 0
while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):
if _retry_times > 0:
_backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)
if _backoff_time > 0:
TeaCore.sleep(_backoff_time)
_retry_times = _retry_times + 1
try:
_request = TeaRequest()
_request.protocol = UtilClient.default_string(self._protocol, protocol)
_request.method = method
_request.pathname = pathname
_request.headers = TeaCore.merge({
'date': UtilClient.get_date_utcstring(),
'host': self._endpoint_host,
'accept': 'application/json',
'x-acs-signature-nonce': UtilClient.get_nonce(),
'x-acs-signature-method': 'HMAC-SHA1',
'x-acs-signature-version': '1.0',
'x-acs-version': version,
'user-agent': UtilClient.get_user_agent(self._user_agent),
# x-sdk-client': helper.DEFAULT_CLIENT
}, headers)
if not UtilClient.is_unset(body):
_request.body = UtilClient.to_jsonstring(body)
_request.headers['content-type'] = 'application/json; charset=utf-8'
if not UtilClient.is_unset(query):
_request.query = query
if not UtilClient.equal_string(auth_type, 'Anonymous'):
access_key_id = self._credential.get_access_key_id()
access_key_secret = self._credential.get_access_key_secret()
security_token = self._credential.get_security_token()
if not UtilClient.empty(security_token):
_request.headers['x-acs-accesskey-id'] = access_key_id
_request.headers['x-acs-security-token'] = security_token
string_to_sign = ROAUtilClient.get_string_to_sign(_request)
_request.headers['authorization'] = 'acs %s:%s' % (access_key_id, ROAUtilClient.get_signature(string_to_sign, access_key_secret))
_last_request = _request
_response = TeaCore.do_action(_request, _runtime)
if UtilClient.equal_number(_response.status_code, 204):
return {
'headers': _response.headers
}
result = UtilClient.read_as_json(_response.body)
if UtilClient.is_4xx(_response.status_code) or UtilClient.is_5xx(_response.status_code):
err = UtilClient.assert_as_map(result)
raise TeaException({
'code': '%s' % self.default_any(err.get('Code'), err.get('code')),
'message': 'code: %s, %s request id: %s' % (_response.status_code, self.default_any(err.get('Message'), err.get('message')), self.default_any(err.get('RequestId'), err.get('requestId'))),
'data': err
})
return {
'headers': _response.headers,
'body': result
}
except Exception as e:
if TeaCore.is_retryable(e):
_last_exception = e
continue
raise e
raise UnretryableException(_last_request, _last_exception)
async def do_request_async(
self,
version: str,
protocol: str,
method: str,
auth_type: str,
pathname: str,
query: Dict[str, str],
headers: Dict[str, str],
body: Any,
runtime: util_models.RuntimeOptions,
) -> dict:
"""
Encapsulate the request and invoke the network
@param version: product version
@param protocol: http or https
@param method: e.g. GET
@param auth_type: when authType is Anonymous, the signature will not be calculate
@param pathname: pathname of every api
@param query: which contains request params
@param headers: request headers
@param body: content of request
@param runtime: which controls some details of call api, such as retry times
@return: the response
"""
runtime.validate()
_runtime = {
'timeouted': 'retry',
'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),
'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),
'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),
'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),
'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),
'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),
'retry': {
'retryable': runtime.autoretry,
'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)
},
'backoff': {
'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),
'period': UtilClient.default_number(runtime.backoff_period, 1)
},
'ignoreSSL': runtime.ignore_ssl
}
_last_request = None
_last_exception = None
_now = time.time()
_retry_times = 0
while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):
if _retry_times > 0:
_backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)
if _backoff_time > 0:
TeaCore.sleep(_backoff_time)
_retry_times = _retry_times + 1
try:
_request = TeaRequest()
_request.protocol = UtilClient.default_string(self._protocol, protocol)
_request.method = method
_request.pathname = pathname
_request.headers = TeaCore.merge({
'date': UtilClient.get_date_utcstring(),
'host': self._endpoint_host,
'accept': 'application/json',
'x-acs-signature-nonce': UtilClient.get_nonce(),
'x-acs-signature-method': 'HMAC-SHA1',
'x-acs-signature-version': '1.0',
'x-acs-version': version,
'user-agent': UtilClient.get_user_agent(self._user_agent),
# x-sdk-client': helper.DEFAULT_CLIENT
}, headers)
if not UtilClient.is_unset(body):
_request.body = UtilClient.to_jsonstring(body)
_request.headers['content-type'] = 'application/json; charset=utf-8'
if not UtilClient.is_unset(query):
_request.query = query
if not UtilClient.equal_string(auth_type, 'Anonymous'):
access_key_id = await self._credential.get_access_key_id_async()
access_key_secret = await self._credential.get_access_key_secret_async()
security_token = await self._credential.get_security_token_async()
if not UtilClient.empty(security_token):
_request.headers['x-acs-accesskey-id'] = access_key_id
_request.headers['x-acs-security-token'] = security_token
string_to_sign = ROAUtilClient.get_string_to_sign(_request)
_request.headers['authorization'] = 'acs %s:%s' % (access_key_id, ROAUtilClient.get_signature(string_to_sign, access_key_secret))
_last_request = _request
_response = await TeaCore.async_do_action(_request, _runtime)
if UtilClient.equal_number(_response.status_code, 204):
return {
'headers': _response.headers
}
result = await UtilClient.read_as_json_async(_response.body)
if UtilClient.is_4xx(_response.status_code) or UtilClient.is_5xx(_response.status_code):
err = UtilClient.assert_as_map(result)
raise TeaException({
'code': '%s' % self.default_any(err.get('Code'), err.get('code')),
'message': 'code: %s, %s request id: %s' % (_response.status_code, self.default_any(err.get('Message'), err.get('message')), self.default_any(err.get('RequestId'), err.get('requestId'))),
'data': err
})
return {
'headers': _response.headers,
'body': result
}
except Exception as e:
if TeaCore.is_retryable(e):
_last_exception = e
continue
raise e
raise UnretryableException(_last_request, _last_exception)
def do_request_with_action(
self,
action: str,
version: str,
protocol: str,
method: str,
auth_type: str,
pathname: str,
query: Dict[str, str],
headers: Dict[str, str],
body: Any,
runtime: util_models.RuntimeOptions,
) -> dict:
"""
Encapsulate the request and invoke the network
@param action: api name
@param version: product version
@param protocol: http or https
@param method: e.g. GET
@param auth_type: when authType is Anonymous, the signature will not be calculate
@param pathname: pathname of every api
@param query: which contains request params
@param headers: request headers
@param body: content of request
@param runtime: which controls some details of call api, such as retry times
@return: the response
"""
runtime.validate()
_runtime = {
'timeouted': 'retry',
'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),
'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),
'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),
'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),
'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),
'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),
'retry': {
'retryable': runtime.autoretry,
'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)
},
'backoff': {
'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),
'period': UtilClient.default_number(runtime.backoff_period, 1)
},
'ignoreSSL': runtime.ignore_ssl
}
_last_request = None
_last_exception = None
_now = time.time()
_retry_times = 0
while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):
if _retry_times > 0:
_backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)
if _backoff_time > 0:
TeaCore.sleep(_backoff_time)
_retry_times = _retry_times + 1
try:
_request = TeaRequest()
_request.protocol = UtilClient.default_string(self._protocol, protocol)
_request.method = method
_request.pathname = pathname
_request.headers = TeaCore.merge({
'date': UtilClient.get_date_utcstring(),
'host': self._endpoint_host,
'accept': 'application/json',
'x-acs-signature-nonce': UtilClient.get_nonce(),
'x-acs-signature-method': 'HMAC-SHA1',
'x-acs-signature-version': '1.0',
'x-acs-version': version,
'x-acs-action': action,
'user-agent': UtilClient.get_user_agent(self._user_agent),
# x-sdk-client': helper.DEFAULT_CLIENT
}, headers)
if not UtilClient.is_unset(body):
_request.body = UtilClient.to_jsonstring(body)
_request.headers['content-type'] = 'application/json; charset=utf-8'
if not UtilClient.is_unset(query):
_request.query = query
if not UtilClient.equal_string(auth_type, 'Anonymous'):
access_key_id = self._credential.get_access_key_id()
access_key_secret = self._credential.get_access_key_secret()
security_token = self._credential.get_security_token()
if not UtilClient.empty(security_token):
_request.headers['x-acs-accesskey-id'] = access_key_id
_request.headers['x-acs-security-token'] = security_token
string_to_sign = ROAUtilClient.get_string_to_sign(_request)
_request.headers['authorization'] = 'acs %s:%s' % (access_key_id, ROAUtilClient.get_signature(string_to_sign, access_key_secret))
_last_request = _request
_response = TeaCore.do_action(_request, _runtime)
if UtilClient.equal_number(_response.status_code, 204):
return {
'headers': _response.headers
}
result = UtilClient.read_as_json(_response.body)
if UtilClient.is_4xx(_response.status_code) or UtilClient.is_5xx(_response.status_code):
err = UtilClient.assert_as_map(result)
raise TeaException({
'code': '%s' % self.default_any(err.get('Code'), err.get('code')),
'message': 'code: %s, %s request id: %s' % (_response.status_code, self.default_any(err.get('Message'), err.get('message')), self.default_any(err.get('RequestId'), err.get('requestId'))),
'data': err
})
return {
'headers': _response.headers,
'body': result
}
except Exception as e:
if TeaCore.is_retryable(e):
_last_exception = e
continue
raise e
raise UnretryableException(_last_request, _last_exception)
async def do_request_with_action_async(
self,
action: str,
version: str,
protocol: str,
method: str,
auth_type: str,
pathname: str,
query: Dict[str, str],
headers: Dict[str, str],
body: Any,
runtime: util_models.RuntimeOptions,
) -> dict:
"""
Encapsulate the request and invoke the network
@param action: api name
@param version: product version
@param protocol: http or https
@param method: e.g. GET
@param auth_type: when authType is Anonymous, the signature will not be calculate
@param pathname: pathname of every api
@param query: which contains request params
@param headers: request headers
@param body: content of request
@param runtime: which controls some details of call api, such as retry times
@return: the response
"""
runtime.validate()
_runtime = {
'timeouted': 'retry',
'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),
'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),
'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),
'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),
'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),
'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),
'retry': {
'retryable': runtime.autoretry,
'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)
},
'backoff': {
'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),
'period': UtilClient.default_number(runtime.backoff_period, 1)
},
'ignoreSSL': runtime.ignore_ssl
}
_last_request = None
_last_exception = None
_now = time.time()
_retry_times = 0
while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):
if _retry_times > 0:
_backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)
if _backoff_time > 0:
TeaCore.sleep(_backoff_time)
_retry_times = _retry_times + 1
try:
_request = TeaRequest()
_request.protocol = UtilClient.default_string(self._protocol, protocol)
_request.method = method
_request.pathname = pathname
_request.headers = TeaCore.merge({
'date': UtilClient.get_date_utcstring(),
'host': self._endpoint_host,
'accept': 'application/json',
'x-acs-signature-nonce': UtilClient.get_nonce(),
'x-acs-signature-method': 'HMAC-SHA1',
'x-acs-signature-version': '1.0',
'x-acs-version': version,
'x-acs-action': action,
'user-agent': UtilClient.get_user_agent(self._user_agent),
# x-sdk-client': helper.DEFAULT_CLIENT
}, headers)
if not UtilClient.is_unset(body):
_request.body = UtilClient.to_jsonstring(body)
_request.headers['content-type'] = 'application/json; charset=utf-8'
if not UtilClient.is_unset(query):
_request.query = query
if not UtilClient.equal_string(auth_type, 'Anonymous'):
access_key_id = await self._credential.get_access_key_id_async()
access_key_secret = await self._credential.get_access_key_secret_async()
security_token = await self._credential.get_security_token_async()
if not UtilClient.empty(security_token):
_request.headers['x-acs-accesskey-id'] = access_key_id
_request.headers['x-acs-security-token'] = security_token
string_to_sign = ROAUtilClient.get_string_to_sign(_request)
_request.headers['authorization'] = 'acs %s:%s' % (access_key_id, ROAUtilClient.get_signature(string_to_sign, access_key_secret))
_last_request = _request
_response = await TeaCore.async_do_action(_request, _runtime)
if UtilClient.equal_number(_response.status_code, 204):
return {
'headers': _response.headers
}
result = await UtilClient.read_as_json_async(_response.body)
if UtilClient.is_4xx(_response.status_code) or UtilClient.is_5xx(_response.status_code):
err = UtilClient.assert_as_map(result)
raise TeaException({
'code': '%s' % self.default_any(err.get('Code'), err.get('code')),
'message': 'code: %s, %s request id: %s' % (_response.status_code, self.default_any(err.get('Message'), err.get('message')), self.default_any(err.get('RequestId'), err.get('requestId'))),
'data': err
})
return {
'headers': _response.headers,
'body': result
}
except Exception as e:
if TeaCore.is_retryable(e):
_last_exception = e
continue
raise e
raise UnretryableException(_last_request, _last_exception)
def do_request_with_form(
self,
version: str,
protocol: str,
method: str,
auth_type: str,
pathname: str,
query: Dict[str, str],
headers: Dict[str, str],
body: Dict[str, Any],
runtime: util_models.RuntimeOptions,
) -> dict:
"""
Encapsulate the request and invoke the network
@param version: product version
@param protocol: http or https
@param method: e.g. GET
@param auth_type: when authType is Anonymous, the signature will not be calculate
@param pathname: pathname of every api
@param query: which contains request params
@param headers: request headers
@param body: content of request
@param runtime: which controls some details of call api, such as retry times
@return: the response
"""
runtime.validate()
_runtime = {
'timeouted': 'retry',
'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),
'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),
'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),
'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),
'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),
'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),
'retry': {
'retryable': runtime.autoretry,
'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)
},
'backoff': {
'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),
'period': UtilClient.default_number(runtime.backoff_period, 1)
},
'ignoreSSL': runtime.ignore_ssl
}
_last_request = None
_last_exception = None
_now = time.time()
_retry_times = 0
while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):
if _retry_times > 0:
_backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)
if _backoff_time > 0:
TeaCore.sleep(_backoff_time)
_retry_times = _retry_times + 1
try:
_request = TeaRequest()
_request.protocol = UtilClient.default_string(self._protocol, protocol)
_request.method = method
_request.pathname = pathname
_request.headers = TeaCore.merge({
'date': UtilClient.get_date_utcstring(),
'host': self._endpoint_host,
'accept': 'application/json',
'x-acs-signature-nonce': UtilClient.get_nonce(),
'x-acs-signature-method': 'HMAC-SHA1',
'x-acs-signature-version': '1.0',
'x-acs-version': version,
'user-agent': UtilClient.get_user_agent(self._user_agent),
# x-sdk-client': helper.DEFAULT_CLIENT
}, headers)
if not UtilClient.is_unset(body):
_request.body = ROAUtilClient.to_form(body)
_request.headers['content-type'] = 'application/x-www-form-urlencoded'
if not UtilClient.is_unset(query):
_request.query = query
if not UtilClient.equal_string(auth_type, 'Anonymous'):
access_key_id = self._credential.get_access_key_id()
access_key_secret = self._credential.get_access_key_secret()
security_token = self._credential.get_security_token()
if not UtilClient.empty(security_token):
_request.headers['x-acs-accesskey-id'] = access_key_id
_request.headers['x-acs-security-token'] = security_token
string_to_sign = ROAUtilClient.get_string_to_sign(_request)
_request.headers['authorization'] = 'acs %s:%s' % (access_key_id, ROAUtilClient.get_signature(string_to_sign, access_key_secret))
_last_request = _request
_response = TeaCore.do_action(_request, _runtime)
if UtilClient.equal_number(_response.status_code, 204):
return {
'headers': _response.headers
}
result = UtilClient.read_as_json(_response.body)
if UtilClient.is_4xx(_response.status_code) or UtilClient.is_5xx(_response.status_code):
err = UtilClient.assert_as_map(result)
raise TeaException({
'code': '%s' % self.default_any(err.get('Code'), err.get('code')),
'message': 'code: %s, %s request id: %s' % (_response.status_code, self.default_any(err.get('Message'), err.get('message')), self.default_any(err.get('RequestId'), err.get('requestId'))),
'data': err
})
return {
'headers': _response.headers,
'body': result
}
except Exception as e:
if TeaCore.is_retryable(e):
_last_exception = e
continue
raise e
raise UnretryableException(_last_request, _last_exception)
async def do_request_with_form_async(
self,
version: str,
protocol: str,
method: str,
auth_type: str,
pathname: str,
query: Dict[str, str],
headers: Dict[str, str],
body: Dict[str, Any],
runtime: util_models.RuntimeOptions,
) -> dict:
"""
Encapsulate the request and invoke the network
@param version: product version
@param protocol: http or https
@param method: e.g. GET
@param auth_type: when authType is Anonymous, the signature will not be calculate
@param pathname: pathname of every api
@param query: which contains request params
@param headers: request headers
@param body: content of request
@param runtime: which controls some details of call api, such as retry times
@return: the response
"""
runtime.validate()
_runtime = {
'timeouted': 'retry',
'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),
'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),
'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),
'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),
'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),
'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),
'retry': {
'retryable': runtime.autoretry,
'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)
},
'backoff': {
'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),
'period': UtilClient.default_number(runtime.backoff_period, 1)
},
'ignoreSSL': runtime.ignore_ssl
}
_last_request = None
_last_exception = None
_now = time.time()
_retry_times = 0
while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):
if _retry_times > 0:
_backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)
if _backoff_time > 0:
TeaCore.sleep(_backoff_time)
_retry_times = _retry_times + 1
try:
_request = TeaRequest()
_request.protocol = UtilClient.default_string(self._protocol, protocol)
_request.method = method
_request.pathname = pathname
_request.headers = TeaCore.merge({
'date': UtilClient.get_date_utcstring(),
'host': self._endpoint_host,
'accept': 'application/json',
'x-acs-signature-nonce': UtilClient.get_nonce(),
'x-acs-signature-method': 'HMAC-SHA1',
'x-acs-signature-version': '1.0',
'x-acs-version': version,
'user-agent': UtilClient.get_user_agent(self._user_agent),
# x-sdk-client': helper.DEFAULT_CLIENT
}, headers)
if not UtilClient.is_unset(body):
_request.body = ROAUtilClient.to_form(body)
_request.headers['content-type'] = 'application/x-www-form-urlencoded'
if not UtilClient.is_unset(query):
_request.query = query
if not UtilClient.equal_string(auth_type, 'Anonymous'):
access_key_id = await self._credential.get_access_key_id_async()
access_key_secret = await self._credential.get_access_key_secret_async()
security_token = await self._credential.get_security_token_async()
if not UtilClient.empty(security_token):
_request.headers['x-acs-accesskey-id'] = access_key_id
_request.headers['x-acs-security-token'] = security_token
string_to_sign = ROAUtilClient.get_string_to_sign(_request)
_request.headers['authorization'] = 'acs %s:%s' % (access_key_id, ROAUtilClient.get_signature(string_to_sign, access_key_secret))
_last_request = _request
_response = await TeaCore.async_do_action(_request, _runtime)
if UtilClient.equal_number(_response.status_code, 204):
return {
'headers': _response.headers
}
result = await UtilClient.read_as_json_async(_response.body)
if UtilClient.is_4xx(_response.status_code) or UtilClient.is_5xx(_response.status_code):
err = UtilClient.assert_as_map(result)
raise TeaException({
'code': '%s' % self.default_any(err.get('Code'), err.get('code')),
'message': 'code: %s, %s request id: %s' % (_response.status_code, self.default_any(err.get('Message'), err.get('message')), self.default_any(err.get('RequestId'), err.get('requestId'))),
'data': err
})
return {
'headers': _response.headers,
'body': result
}
except Exception as e:
if TeaCore.is_retryable(e):
_last_exception = e
continue
raise e
raise UnretryableException(_last_request, _last_exception)
@staticmethod
def default_any(
input_value: Any,
default_value: Any,
) -> Any:
"""
If inputValue is not null, return it or return defaultValue
@param input_value: users input value
@param default_value: default value
@return: the final result
"""
if UtilClient.is_unset(input_value):
return default_value
return input_value
def check_config(
self,
config: roa_models.Config,
) -> None:
"""
If the endpointRule and config.endpoint are empty, throw error
@param config: config contains the necessary information to create a client
"""
if UtilClient.empty(self._endpoint_rule) and UtilClient.empty(config.endpoint):
raise TeaException({
'code': 'ParameterMissing',
'message': "'config.endpoint' can not be empty"
})
| 48.986996
| 211
| 0.576013
| 3,827
| 37,671
| 5.379671
| 0.057486
| 0.049543
| 0.033515
| 0.043715
| 0.912036
| 0.898825
| 0.890762
| 0.890762
| 0.890762
| 0.890762
| 0
| 0.00353
| 0.330679
| 37,671
| 768
| 212
| 49.050781
| 0.813001
| 0.056436
| 0
| 0.841538
| 1
| 0
| 0.095388
| 0.013845
| 0
| 0
| 0
| 0
| 0.009231
| 1
| 0.009231
| false
| 0
| 0.016923
| 0
| 0.073846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
53f34ec1ed045c89cb7eec29878330fc5ebd2f28
| 86
|
py
|
Python
|
tests/client/old_client.py
|
Fahadsaadullahkhan/KubernetesJobOperator
|
d96f9498667f937503d1e45142060904674f823f
|
[
"MIT"
] | 35
|
2020-02-10T16:55:41.000Z
|
2022-03-18T01:25:00.000Z
|
tests/client/old_client.py
|
Fahadsaadullahkhan/KubernetesJobOperator
|
d96f9498667f937503d1e45142060904674f823f
|
[
"MIT"
] | 26
|
2020-02-10T05:36:44.000Z
|
2022-03-02T18:44:47.000Z
|
tests/client/old_client.py
|
Fahadsaadullahkhan/KubernetesJobOperator
|
d96f9498667f937503d1e45142060904674f823f
|
[
"MIT"
] | 8
|
2020-02-28T23:24:07.000Z
|
2021-11-29T21:35:46.000Z
|
from kubernetes.client import CoreV1Api
from kubernetes.client import CustomObjectsApi
| 43
| 46
| 0.895349
| 10
| 86
| 7.7
| 0.6
| 0.363636
| 0.519481
| 0.675325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.081395
| 86
| 2
| 46
| 43
| 0.962025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
54d554116afd7aa5ae3d30202ccad27cb53c1833
| 1,499
|
py
|
Python
|
quadpy/wedge/__init__.py
|
dariusarnold/quadpy
|
9dc7c1ebff99d15ae57ed9195cde94d97a599be8
|
[
"MIT"
] | null | null | null |
quadpy/wedge/__init__.py
|
dariusarnold/quadpy
|
9dc7c1ebff99d15ae57ed9195cde94d97a599be8
|
[
"MIT"
] | null | null | null |
quadpy/wedge/__init__.py
|
dariusarnold/quadpy
|
9dc7c1ebff99d15ae57ed9195cde94d97a599be8
|
[
"MIT"
] | null | null | null |
from ._felippa import felippa_1, felippa_2, felippa_3, felippa_4, felippa_5, felippa_6
from ._kubatko_yeager_maggi import (
kubatko_yeager_maggi_1,
kubatko_yeager_maggi_2a,
kubatko_yeager_maggi_2b,
kubatko_yeager_maggi_3a,
kubatko_yeager_maggi_3b,
kubatko_yeager_maggi_3c,
kubatko_yeager_maggi_3d,
kubatko_yeager_maggi_4a,
kubatko_yeager_maggi_4b,
kubatko_yeager_maggi_5a,
kubatko_yeager_maggi_5b,
kubatko_yeager_maggi_5c,
kubatko_yeager_maggi_6a,
kubatko_yeager_maggi_6b,
kubatko_yeager_maggi_6c,
kubatko_yeager_maggi_7a,
kubatko_yeager_maggi_7b,
kubatko_yeager_maggi_7c,
kubatko_yeager_maggi_8a,
kubatko_yeager_maggi_8b,
kubatko_yeager_maggi_9,
)
__all__ = [
"felippa_1",
"felippa_2",
"felippa_3",
"felippa_4",
"felippa_5",
"felippa_6",
"kubatko_yeager_maggi_1",
"kubatko_yeager_maggi_2a",
"kubatko_yeager_maggi_2b",
"kubatko_yeager_maggi_3a",
"kubatko_yeager_maggi_3b",
"kubatko_yeager_maggi_3c",
"kubatko_yeager_maggi_3d",
"kubatko_yeager_maggi_4a",
"kubatko_yeager_maggi_4b",
"kubatko_yeager_maggi_5a",
"kubatko_yeager_maggi_5b",
"kubatko_yeager_maggi_5c",
"kubatko_yeager_maggi_6a",
"kubatko_yeager_maggi_6b",
"kubatko_yeager_maggi_6c",
"kubatko_yeager_maggi_7a",
"kubatko_yeager_maggi_7b",
"kubatko_yeager_maggi_7c",
"kubatko_yeager_maggi_8a",
"kubatko_yeager_maggi_8b",
"kubatko_yeager_maggi_9",
]
| 27.254545
| 86
| 0.751167
| 201
| 1,499
| 4.875622
| 0.164179
| 0.570408
| 0.789796
| 0.032653
| 0.95102
| 0.95102
| 0.95102
| 0.95102
| 0.95102
| 0.95102
| 0
| 0.043548
| 0.172782
| 1,499
| 54
| 87
| 27.759259
| 0.746774
| 0
| 0
| 0
| 0
| 0
| 0.356905
| 0.320881
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037736
| 0
| 0.037736
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
073ee1057536dc8e2dd53fe06fe40c2c3848413c
| 1,080
|
py
|
Python
|
gsd-bot/GSD/GSDGithub.py
|
raphaelahrens/gsd-tools
|
7dfdae18c3b5c547793d10e7d609a9483aa9b704
|
[
"Apache-2.0"
] | 6
|
2022-01-22T16:21:29.000Z
|
2022-03-18T02:55:52.000Z
|
gsd-bot/GSD/GSDGithub.py
|
raphaelahrens/gsd-tools
|
7dfdae18c3b5c547793d10e7d609a9483aa9b704
|
[
"Apache-2.0"
] | 12
|
2021-12-23T10:42:06.000Z
|
2022-03-17T06:54:54.000Z
|
gsd-bot/GSD/GSDGithub.py
|
raphaelahrens/gsd-tools
|
7dfdae18c3b5c547793d10e7d609a9483aa9b704
|
[
"Apache-2.0"
] | 6
|
2022-01-17T23:53:32.000Z
|
2022-03-03T17:45:19.000Z
|
import requests
import os
from .GSDIssue import Issue
def get_new_issues(issues_url):
auth = (os.environ['GH_USERNAME'], os.environ['GH_TOKEN'])
params = {
'accept': "application/vnd.github.v3+json",
'labels': 'new,check',
'state': 'open'
}
# XXX Get the repo from the environment or something
resp = requests.get(issues_url, auth=auth, params=params)
resp.raise_for_status()
issues = resp.json()
to_return = []
for i in issues:
to_return.append(Issue(i))
return to_return
def get_approved_can_issues(issues_url):
auth = (os.environ['GH_USERNAME'], os.environ['GH_TOKEN'])
params = {
'accept': "application/vnd.github.v3+json",
'labels': 'approved',
'state': 'open'
}
# XXX Get the repo from the environment or something
resp = requests.get(issues_url, auth=auth, params=params)
resp.raise_for_status()
issues = resp.json()
to_return = []
for i in issues:
to_return.append(Issue(i))
return to_return
| 23.478261
| 62
| 0.618519
| 140
| 1,080
| 4.607143
| 0.314286
| 0.074419
| 0.08062
| 0.058915
| 0.865116
| 0.865116
| 0.865116
| 0.865116
| 0.865116
| 0.865116
| 0
| 0.002494
| 0.257407
| 1,080
| 45
| 63
| 24
| 0.801746
| 0.093519
| 0
| 0.709677
| 0
| 0
| 0.161191
| 0.061602
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.096774
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab0990e6c742fbc17639d8f0baede8ea6adba4a9
| 2,311
|
py
|
Python
|
40. two_out_of_three.py
|
chandravenky/puzzles
|
17ec86bbad43862830ba7059a448a33b232b4088
|
[
"MIT"
] | null | null | null |
40. two_out_of_three.py
|
chandravenky/puzzles
|
17ec86bbad43862830ba7059a448a33b232b4088
|
[
"MIT"
] | null | null | null |
40. two_out_of_three.py
|
chandravenky/puzzles
|
17ec86bbad43862830ba7059a448a33b232b4088
|
[
"MIT"
] | 1
|
2022-03-13T02:04:46.000Z
|
2022-03-13T02:04:46.000Z
|
#Solution
def two_out_of_three(nums1, nums2, nums3):
stored_master = {}
stored_1 = {}
stored_2 = {}
stored_3 = {}
for i in range(0, len(nums1)):
if nums1[i] not in stored_1:
stored_1[nums1[i]] = 1
stored_master[nums1[i]] = 1
else:
pass
for i in range(0, len(nums2)):
if nums2[i] not in stored_master:
stored_2[nums2[i]] = 1
stored_master[nums2[i]] = 1
else:
if nums2[i] not in stored_2:
stored_master[nums2[i]] = 2
for i in range(0, len(nums3)):
if nums3[i] not in stored_master:
stored_3[nums3[i]] = 1
stored_master[nums3[i]] = 1
else:
if nums3[i] not in stored_3:
stored_master[nums3[i]] = 2
final_list = { key: value for key, value in stored_master.items() if value>1 }
return list(final_list.keys())
#Tests
def two_out_of_three_test():
return ( two_out_of_three([1,1,3,2],[2,3], [3]) == [3,2],
two_out_of_three([3,1], [2,3], [1,2]) == [3,1, 2],
two_out_of_three([1,2,2], [4,3,3], [5]) == [],)
print(two_out_of_three_test())
#Leetcode
class Solution(object):
def twoOutOfThree(self, nums1, nums2, nums3):
"""
:type nums1: List[int]
:type nums2: List[int]
:type nums3: List[int]
:rtype: List[int]
"""
stored_master = {}
stored_1 = {}
stored_2 = {}
stored_3 = {}
for i in range(0, len(nums1)):
if nums1[i] not in stored_1:
stored_1[nums1[i]] = 1
stored_master[nums1[i]] = 1
else:
pass
for i in range(0, len(nums2)):
if nums2[i] not in stored_master:
stored_2[nums2[i]] = 1
stored_master[nums2[i]] = 1
else:
if nums2[i] not in stored_2:
stored_master[nums2[i]] = 2
for i in range(0, len(nums3)):
if nums3[i] not in stored_master:
stored_3[nums3[i]] = 1
stored_master[nums3[i]] = 1
else:
if nums3[i] not in stored_3:
stored_master[nums3[i]] = 2
final_list = { key: value for key, value in stored_master.items() if value>1 }
return list(final_list.keys())
| 22.881188
| 86
| 0.521852
| 339
| 2,311
| 3.380531
| 0.129794
| 0.188482
| 0.052356
| 0.104712
| 0.848168
| 0.765271
| 0.757417
| 0.757417
| 0.757417
| 0.757417
| 0
| 0.071193
| 0.343574
| 2,311
| 100
| 87
| 23.11
| 0.684245
| 0.046733
| 0
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0.033333
| 0
| 0.016667
| 0.116667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab361dd117c3a3c3e285f2c34bb61b17c04fb603
| 5,679
|
py
|
Python
|
joint/dev/Old/Jere_ML_ISTA_original/Models_MNIST.py
|
mm5110/sparse-structures-for-classification
|
ac4d765754f92f22afeb1ed0473e6d8332aa8f73
|
[
"MIT"
] | 1
|
2021-11-10T01:56:32.000Z
|
2021-11-10T01:56:32.000Z
|
joint/dev/Old/dev_old/Jere/MNIST_ML_ISTA_share/Models_MNIST.py
|
mm5110/sparse-structures-for-classification
|
ac4d765754f92f22afeb1ed0473e6d8332aa8f73
|
[
"MIT"
] | null | null | null |
joint/dev/Old/dev_old/Jere/MNIST_ML_ISTA_share/Models_MNIST.py
|
mm5110/sparse-structures-for-classification
|
ac4d765754f92f22afeb1ed0473e6d8332aa8f73
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torch.nn.functional as F
import torchvision
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
##################################################
#### MultiLayer ISTA NET ####
##################################################
class ML_ISTA_NET(nn.Module):
def __init__(self,m1,m2,m3):
super(ML_ISTA_NET, self).__init__()
# Convolutional Filters
self.W1 = nn.Parameter(torch.randn(m1,1,6,6), requires_grad=True)
self.strd1 = 2;
self.W2 = nn.Parameter(torch.randn(m2,m1,6,6), requires_grad=True)
self.strd2 = 2;
self.W3 = nn.Parameter(torch.randn(m3,m2,4,4), requires_grad=True)
self.strd3 = 1;
# Biases / Thresholds
self.b1 = nn.Parameter(torch.zeros(1,m1,1,1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1,m2,1,1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1,m3,1,1), requires_grad=True)
# Classifier
self.Wclass = nn.Linear(m3, 10)
# Initialization
self.W1.data = 1/np.sqrt(36) * self.W1.data
self.W2.data = 1/np.sqrt(36*m1) * self.W2.data
self.W3.data = 1/np.sqrt(16*m2) * self.W3.data
def forward(self, x,T=0,RHO=1):
# Encoding
gamma1 = F.relu(F.conv2d(x,self.W1, stride = self.strd1) + self.b1) # first estimation
gamma2 = F.relu(F.conv2d(gamma1,self.W2, stride = self.strd2) + self.b2)
gamma3 = F.relu(F.conv2d(gamma2,self.W3, stride = self.strd3) + self.b3)
for _ in range(T):
# backward computatoin
gamma2_ml = F.conv_transpose2d(gamma3,self.W3, stride=self.strd3)
gamma1_ml = F.conv_transpose2d(gamma2_ml,self.W2, stride=self.strd2)
gamma1 = (1-RHO) * gamma1 + RHO * gamma1_ml
gamma2 = (1-RHO) * gamma2 + RHO * gamma2_ml
# forward computation
gamma1 = F.relu( (gamma1 - F.conv2d( F.conv_transpose2d(gamma1,self.W1, stride = self.strd1) - x ,self.W1, stride = self.strd1)) + self.b1)
gamma2 = F.relu( (gamma2 - F.conv2d( F.conv_transpose2d(gamma2,self.W2, stride = self.strd2) - gamma1, self.W2, stride = self.strd2)) + self.b2)
gamma3 = F.relu( (gamma3 - F.conv2d( F.conv_transpose2d(gamma3,self.W3, stride = self.strd3) - gamma2, self.W3, stride = self.strd3)) + self.b3)
# classifier
gamma = gamma3.view(gamma3.shape[0],gamma3.shape[1]*gamma3.shape[2]*gamma3.shape[3])
out = self.Wclass(gamma)
out = F.log_softmax(out,dim = 1)
return gamma, out
##################################################
#### MultiLayer FISTA NET ####
##################################################
class ML_FISTA_NET(nn.Module):
def __init__(self,m1,m2,m3):
super(ML_FISTA_NET, self).__init__()
# Convolutional Filters
self.W1 = nn.Parameter(torch.randn(m1,1,6,6), requires_grad=True)
self.strd1 = 2;
self.W2 = nn.Parameter(torch.randn(m2,m1,6,6), requires_grad=True)
self.strd2 = 2;
self.W3 = nn.Parameter(torch.randn(m3,m2,4,4), requires_grad=True)
self.strd3 = 1;
# Biases / Thresholds
self.b1 = nn.Parameter(torch.zeros(1,m1,1,1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1,m2,1,1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1,m3,1,1), requires_grad=True)
# Classifier
self.Wclass = nn.Linear(m3, 10)
# Initialization
self.W1.data = 1/np.sqrt(36) * self.W1.data
self.W2.data = 1/np.sqrt(36*m1) * self.W2.data
self.W3.data = 1/np.sqrt(16*m2) * self.W3.data
def forward(self, x,T=0,RHO=1):
t = 1
t_prv = t
# Encoding
gamma1 = F.relu(F.conv2d(x,self.W1, stride = self.strd1) + self.b1)
gamma2 = F.relu(F.conv2d(gamma1,self.W2, stride = self.strd2) + self.b2)
gamma3 = F.relu(F.conv2d(gamma2,self.W3, stride = self.strd3) + self.b3)
gamma3_prv = gamma3
for _ in range(T):
t_prv = t
t = float((1+np.sqrt(1+4*t_prv**2))/2)
Z = gamma3 + (t_prv-1)/t * (gamma3 - gamma3_prv)
gamma3_prv = gamma3
# backward computation
gamma2_ml = F.conv_transpose2d(Z,self.W3, stride=self.strd3)
gamma1_ml = F.conv_transpose2d(gamma2_ml,self.W2, stride=self.strd2)
gamma1 = (1-RHO) * gamma1 + RHO * gamma1_ml
gamma2 = (1-RHO) * gamma2 + RHO * gamma2_ml
# forward computation
gamma1 = F.relu( (gamma1 - F.conv2d( F.conv_transpose2d(gamma1,self.W1, stride = self.strd1) - x ,self.W1, stride = self.strd1)) + self.b1)
gamma2 = F.relu( (gamma2 - F.conv2d( F.conv_transpose2d(gamma2,self.W2, stride = self.strd2) - gamma1, self.W2, stride = self.strd2)) + self.b2)
gamma3 = F.relu( (Z - F.conv2d( F.conv_transpose2d(Z,self.W3, stride = self.strd3) - gamma2, self.W3, stride = self.strd3)) + self.b3)
# classifier
gamma = gamma3.view(gamma3.shape[0],gamma3.shape[1]*gamma3.shape[2]*gamma3.shape[3])
out = self.Wclass(gamma)
out = F.log_softmax(out,dim = 1)
return gamma, out
| 38.89726
| 157
| 0.546399
| 763
| 5,679
| 3.97903
| 0.12713
| 0.072464
| 0.063241
| 0.065876
| 0.857708
| 0.847826
| 0.847826
| 0.847826
| 0.847826
| 0.822134
| 0
| 0.071004
| 0.288255
| 5,679
| 146
| 158
| 38.89726
| 0.680109
| 0.061102
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1125
| 0
| 0.2125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db7d6a26a7f84ea1d0727f0ef24049e83f41d3ea
| 1,094
|
py
|
Python
|
amongus/config.py
|
Esfahan/discord-bot
|
48ed6aca7e983106d409cc0705dde6ed7c1b3798
|
[
"MIT"
] | null | null | null |
amongus/config.py
|
Esfahan/discord-bot
|
48ed6aca7e983106d409cc0705dde6ed7c1b3798
|
[
"MIT"
] | null | null | null |
amongus/config.py
|
Esfahan/discord-bot
|
48ed6aca7e983106d409cc0705dde6ed7c1b3798
|
[
"MIT"
] | null | null | null |
import os
class DevelopmentConfig:
# Flask
DEBUG = True
# SQLAlchemy
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{user}:{password}@{host}:{port}/{dbname}'.format(**{
'user': os.environ.get('DB_USER'),
'password': os.environ.get('DB_PASSWORD'),
'host': os.environ.get('DB_HOST'),
'port': os.environ.get('DB_PORT'),
'dbname': os.environ.get('DB_NAME'),
})
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
class ProductionConfig:
# Flask
DEBUG = False
# SQLAlchemy
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{user}:{password}@{host}:{port}/{dbname}'.format(**{
'user': os.environ.get('DB_USER'),
'password': os.environ.get('DB_PASSWORD'),
'host': os.environ.get('DB_HOST'),
'port': os.environ.get('DB_PORT'),
'dbname': os.environ.get('DB_NAME'),
})
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
if os.environ.get('FALSK_ENV') == 'production':
Config = ProductionConfig
else:
Config = DevelopmentConfig
| 26.682927
| 105
| 0.628885
| 122
| 1,094
| 5.467213
| 0.270492
| 0.148426
| 0.197901
| 0.209895
| 0.758621
| 0.758621
| 0.758621
| 0.758621
| 0.758621
| 0.758621
| 0
| 0.002299
| 0.204753
| 1,094
| 40
| 106
| 27.35
| 0.764368
| 0.030165
| 0
| 0.666667
| 0
| 0
| 0.258523
| 0.117424
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.148148
| 0.037037
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
db88bf2f728fc1525ff0bc0d788a83490026dbec
| 563,875
|
py
|
Python
|
tests/test.py
|
cicobalico/imgaug
|
d2534903dea27f6e8836a2e627b3a0f075d959cf
|
[
"MIT"
] | 3
|
2018-08-23T13:27:32.000Z
|
2021-02-16T14:30:10.000Z
|
tests/test.py
|
cicobalico/imgaug
|
d2534903dea27f6e8836a2e627b3a0f075d959cf
|
[
"MIT"
] | null | null | null |
tests/test.py
|
cicobalico/imgaug
|
d2534903dea27f6e8836a2e627b3a0f075d959cf
|
[
"MIT"
] | 1
|
2021-03-30T09:57:35.000Z
|
2021-03-30T09:57:35.000Z
|
"""
Automatically run tests for this library.
Simply execute
python test.py
or execute
nosetests --verbose
from within tests/
or add @attr("now") in front of a test and then execute
nosetests --verbose -a now
to only execute a specific test.
"""
from __future__ import print_function, division
# fix execution of tests involving matplotlib on travis
import matplotlib
matplotlib.use('Agg')
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
import numpy as np
import random
import six
import six.moves as sm
from scipy import misc
import skimage
from skimage import data, color
import cv2
import time
import scipy
import copy
import warnings
#from nose.plugins.attrib import attr
def main():
time_start = time.time()
# ----------------------
# imgaug
# ----------------------
is_np_array()
test_is_single_integer()
test_is_single_float()
test_is_single_number()
test_is_iterable()
test_is_string()
test_is_integer_array()
test_is_float_array()
test_is_callable()
test_caller_name()
test_seed()
test_current_random_state()
test_new_random_state()
test_dummy_random_state()
test_copy_random_state()
test_derive_random_state()
test_derive_random_states()
test_forward_random_state()
# test_quokka()
# test_quokka_square()
# test_angle_between_vectors()
# test_draw_text()
test_imresize_many_images()
test_imresize_single_image()
test_pad()
test_compute_paddings_for_aspect_ratio()
test_pad_to_aspect_ratio()
test_pool()
test_avg_pool()
test_max_pool()
test_draw_grid()
# test_show_grid()
# test_do_assert()
# test_HooksImages_is_activated()
# test_HooksImages_is_propagating()
# test_HooksImages_preprocess()
# test_HooksImages_postprocess()
test_Keypoint()
test_KeypointsOnImage()
test_BoundingBox()
test_BoundingBoxesOnImage()
# test_HeatmapsOnImage_get_arr()
# test_HeatmapsOnImage_find_global_maxima()
test_HeatmapsOnImage_draw()
test_HeatmapsOnImage_draw_on_image()
test_HeatmapsOnImage_pad()
# test_HeatmapsOnImage_pad_to_aspect_ratio()
test_HeatmapsOnImage_avg_pool()
test_HeatmapsOnImage_max_pool()
test_HeatmapsOnImage_scale()
# test_HeatmapsOnImage_to_uint8()
# test_HeatmapsOnImage_from_uint8()
# test_HeatmapsOnImage_from_0to1()
# test_HeatmapsOnImage_change_normalization()
# test_HeatmapsOnImage_copy()
# test_HeatmapsOnImage_deepcopy()
# test_Batch()
test_BatchLoader()
# test_BackgroundAugmenter.get_batch()
# test_BackgroundAugmenter._augment_images_worker()
# test_BackgroundAugmenter.terminate()
# ----------------------
# augmenters
# ----------------------
# arithmetic
test_Add()
test_AddElementwise()
test_AdditiveGaussianNoise()
test_Multiply()
test_MultiplyElementwise()
test_Dropout()
test_CoarseDropout()
test_SaltAndPepper()
test_CoarseSaltAndPepper()
test_Salt()
test_CoarseSalt()
test_Pepper()
test_CoarsePepper()
test_ReplaceElementwise()
test_Invert()
test_ContrastNormalization()
# blur
test_GaussianBlur()
test_AverageBlur()
test_MedianBlur()
# TODO BilateralBlur
# color
# TODO WithColorspace
test_AddToHueAndSaturation()
# TODO ChangeColorspace
test_Grayscale()
# convolutional
test_Convolve()
test_Sharpen()
test_Emboss()
# TODO EdgeDetect
# TODO DirectedEdgeDetect
# flip
test_Fliplr()
test_Flipud()
# geometric
test_Affine()
test_AffineCv2()
test_PiecewiseAffine()
test_PerspectiveTransform()
test_ElasticTransformation()
# meta
test_copy_dtypes_for_restore()
test_restore_augmented_image_dtype_()
test_restore_augmented_image_dtype()
test_restore_augmented_images_dtypes_()
test_restore_augmented_images_dtypes()
test_clip_augmented_image_()
test_clip_augmented_image()
test_clip_augmented_images_()
test_clip_augmented_images()
test_Augmenter()
test_Augmenter_find()
test_Augmenter_remove()
test_Augmenter_hooks()
test_Augmenter_copy_random_state()
test_Augmenter_augment_batches()
test_Sequential()
test_SomeOf()
test_OneOf()
test_Sometimes()
test_WithChannels()
test_Noop()
test_Lambda()
test_AssertLambda()
test_AssertShape()
# overlay
test_Alpha()
test_AlphaElementwise()
# TODO SimplexNoiseAlpha
# TODO FrequencyNoiseAlpha
# segmentation
test_Superpixels()
# size
test_Scale()
# TODO test_CropAndPad()
test_Pad()
test_Crop()
# these functions use various augmenters, so test them last
test_2d_inputs()
test_determinism()
test_keypoint_augmentation()
test_unusual_channel_numbers()
test_dtype_preservation()
# ----------------------
# parameters
# ----------------------
test_parameters_handle_continuous_param()
test_parameters_handle_discrete_param()
test_parameters_handle_probability_param()
test_parameters_force_np_float_dtype()
test_parameters_both_np_float_if_one_is_float()
test_parameters_draw_distribution_grid()
test_parameters_draw_distribution_graph()
test_parameters_Biomial()
test_parameters_Choice()
test_parameters_DiscreteUniform()
test_parameters_Poisson()
test_parameters_Normal()
test_parameters_Laplace()
test_parameters_ChiSquare()
test_parameters_Weibull()
test_parameters_Uniform()
test_parameters_Beta()
test_parameters_Deterministic()
test_parameters_FromLowerResolution()
test_parameters_Clip()
test_parameters_Discretize()
test_parameters_Multiply()
test_parameters_Divide()
test_parameters_Add()
test_parameters_Subtract()
test_parameters_Power()
test_parameters_Absolute()
test_parameters_RandomSign()
test_parameters_ForceSign()
test_parameters_Positive()
test_parameters_Negative()
test_parameters_IterativeNoiseAggregator()
test_parameters_Sigmoid()
#test_parameters_SimplexNoise()
#test_parameters_FrequencyNoise()
test_parameters_operators()
test_parameters_copy()
time_end = time.time()
print("Finished without errors in %.4fs." % (time_end - time_start,))
def is_np_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((64, 64, 3), dtype=np.uint8),
np.zeros((1, 2), dtype=np.float32),
np.zeros((100,), dtype=np.float64)
]
values_false = [
"A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(),
-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4
]
for value in values_true:
assert ia.is_np_array(value) == True
for value in values_false:
assert ia.is_np_array(value) == False
def test_is_single_integer():
assert ia.is_single_integer("A") == False
assert ia.is_single_integer(None) == False
assert ia.is_single_integer(1.2) == False
assert ia.is_single_integer(1.0) == False
assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) == False
assert ia.is_single_integer(1) == True
assert ia.is_single_integer(1234) == True
assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) == True
assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) == True
def test_is_single_float():
assert ia.is_single_float("A") == False
assert ia.is_single_float(None) == False
assert ia.is_single_float(1.2) == True
assert ia.is_single_float(1.0) == True
assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) == True
assert ia.is_single_float(1) == False
assert ia.is_single_float(1234) == False
assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) == False
assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) == False
def test_caller_name():
assert ia.caller_name() == 'test_caller_name'
def test_is_single_number():
class _Dummy(object):
pass
values_true = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4]
values_false = ["A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_single_number(value) == True
for value in values_false:
assert ia.is_single_number(value) == False
def test_is_iterable():
class _Dummy(object):
pass
values_true = [
[0, 1, 2],
["A", "X"],
[[123], [456, 789]],
[],
(1, 2, 3),
(1,),
tuple(),
"A",
"ABC",
"",
np.zeros((100,), dtype=np.uint8)
]
values_false = [1, 100, 0, -100, -1, 1.2, -1.2, True, False, _Dummy()]
for value in values_true:
assert ia.is_iterable(value) == True, value
for value in values_false:
assert ia.is_iterable(value) == False
def test_is_string():
class _Dummy(object):
pass
values_true = ["A", "BC", "1", ""]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_string(value) == True
for value in values_false:
assert ia.is_string(value) == False
def test_is_integer_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_integer_array(value) == True
for value in values_false:
assert ia.is_integer_array(value) == False
def test_is_float_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_float_array(value) == True
for value in values_false:
assert ia.is_float_array(value) == False
def test_is_callable():
def _dummy_func():
pass
_dummy_func2 = lambda x: x
class _Dummy1(object):
pass
class _Dummy2(object):
def __call__(self):
pass
values_true = [_dummy_func, _dummy_func2, _Dummy2()]
values_false = ["A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0], _Dummy1(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_callable(value) == True
for value in values_false:
assert ia.is_callable(value) == False
def test_seed():
ia.seed(10017)
rs = np.random.RandomState(10017)
assert ia.CURRENT_RANDOM_STATE.randint(0, 1000*1000) == rs.randint(0, 1000*1000)
reseed()
def test_current_random_state():
assert ia.current_random_state() == ia.CURRENT_RANDOM_STATE
def test_new_random_state():
seed = 1000
ia.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=False)
rs_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=False)
rs_observed2 = ia.new_random_state(seed=None, fully_random=False)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
ia.seed(seed)
np.random.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=True)
rs_not_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) != rs_not_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=True)
rs_observed2 = ia.new_random_state(seed=None, fully_random=True)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=1234)
rs_observed2 = ia.new_random_state(seed=1234)
rs_expected = np.random.RandomState(1234)
assert rs_observed1.randint(0, 10**6) == rs_observed2.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_dummy_random_state():
assert ia.dummy_random_state().randint(0, 10**6) == np.random.RandomState(1).randint(0, 10**6)
def test_copy_random_state():
rs = np.random.RandomState(1017)
rs_copy = ia.copy_random_state(rs)
assert rs != rs_copy
assert rs.randint(0, 10**6) == rs_copy.randint(0, 10**6)
assert ia.copy_random_state(np.random) == np.random
assert ia.copy_random_state(np.random, force_copy=True) != np.random
def test_derive_random_state():
rs = np.random.RandomState(1017)
rs_observed = ia.derive_random_state(np.random.RandomState(1017))
rs_expected = np.random.RandomState(np.random.RandomState(1017).randint(0, 10**6))
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_derive_random_states():
rs = np.random.RandomState(1017)
rs_observed1, rs_observed2 = ia.derive_random_states(np.random.RandomState(1017), n=2)
seed = np.random.RandomState(1017).randint(0, 10**6)
rs_expected1 = np.random.RandomState(seed+0)
rs_expected2 = np.random.RandomState(seed+1)
assert rs_observed1.randint(0, 10**6) == rs_expected1.randint(0, 10**6)
assert rs_observed2.randint(0, 10**6) == rs_expected2.randint(0, 10**6)
def test_forward_random_state():
rs1 = np.random.RandomState(1017)
rs2 = np.random.RandomState(1017)
ia.forward_random_state(rs1)
rs2.uniform()
assert rs1.randint(0, 10**6) == rs2.randint(0, 10**6)
def test_imresize_many_images():
for c in [1, 3]:
image1 = np.zeros((16, 16, c), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, c), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, c), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, c), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, c), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, c), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, c), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, c), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, c), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for interpolation in interpolations:
images_same_observed = ia.imresize_many_images(images, (16, 16), interpolation=interpolation)
for image_expected, image_observed in zip(images, images_same_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
images_small_observed = ia.imresize_many_images(images, (8, 8), interpolation=interpolation)
for image_expected, image_observed in zip(images_small, images_small_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
images_large_observed = ia.imresize_many_images(images, (32, 32), interpolation=interpolation)
for image_expected, image_observed in zip(images_large, images_large_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
def test_imresize_single_image():
for c in [-1, 1, 3]:
image1 = np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, abs(c)), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, abs(c)), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, abs(c)), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, abs(c)), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, abs(c)), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
if c == -1:
images = images[:, :, 0]
images_small = images_small[:, :, 0]
images_large = images_large[:, :, 0]
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for interpolation in interpolations:
for image in images:
image_observed = ia.imresize_single_image(image, (16, 16), interpolation=interpolation)
diff = np.abs(image.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
for image, image_expected in zip(images, images_small):
image_observed = ia.imresize_single_image(image, (8, 8), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
for image, image_expected in zip(images, images_large):
image_observed = ia.imresize_single_image(image, (32, 32), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
def test_pad():
# -------
# uint8, int32
# -------
for dtype in [np.uint8, np.int32]:
arr = np.zeros((3, 3), dtype=dtype) + 255
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.array_equal(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, -1] == 0)
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[-1, :] == 0)
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, 0] == 0)
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
assert np.all(arr_pad[:, -2:] == 0)
assert np.all(arr_pad[-3:, :] == 0)
assert np.all(arr_pad[:, :4] == 0)
arr_pad = ia.pad(arr, top=1, cval=10)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 10)
arr = np.zeros((3, 3, 3), dtype=dtype) + 128
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :, 0] == 0)
assert np.all(arr_pad[0, :, 1] == 0)
assert np.all(arr_pad[0, :, 2] == 0)
arr = np.zeros((3, 3), dtype=dtype) + 128
arr[1, 1] = 200
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 128
assert arr_pad[0, 1] == 200
assert arr_pad[0, 2] == 128
# -------
# float32, float64
# -------
for dtype in [np.float32, np.float64]:
arr = np.zeros((3, 3), dtype=dtype) + 1.0
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, -1], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[-1, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, 0], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert 0 - 1e-6 < np.max(arr_pad[0, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, -2:]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[-3, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, :4]) < 0 + 1e-6
arr_pad = ia.pad(arr, top=1, cval=0.2)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0.2, 0.2, 0.2]))
arr = np.zeros((3, 3, 3), dtype=dtype) + 0.5
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :, 0], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 1], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 2], dtype([0, 0, 0]))
arr = np.zeros((3, 3), dtype=dtype) + 0.5
arr[1, 1] = 0.75
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.50 - 1e-6 < arr_pad[0, 0] < 0.50 + 1e-6
assert 0.75 - 1e-6 < arr_pad[0, 1] < 0.75 + 1e-6
assert 0.50 - 1e-6 < arr_pad[0, 2] < 0.50 + 1e-6
def test_compute_paddings_for_aspect_ratio():
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 0
assert bottom == 0
assert left == 0
arr = np.zeros((1, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 2
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 1), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 1
arr = np.zeros((2, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 1
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 2), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 1
assert bottom == 0
assert left == 1
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 0.5)
assert top == 2
assert right == 0
assert bottom == 2
assert left == 0
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 2.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 2
def test_pad_to_aspect_ratio():
for dtype in [np.uint8, np.int32, np.float32]:
# aspect_ratio = 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((1, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 1), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((2, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 2), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
# aspect_ratio != 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 0.5)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 8
assert arr_pad.shape[1] == 4
# 3d arr
arr = np.zeros((4, 2, 3), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
assert arr_pad.shape[2] == 3
# cval
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 0
assert np.max(arr_pad[:, -2:]) == 0
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=10)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 10
assert np.max(arr_pad[:, -2:]) == 10
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0 + 1e-6
assert 0 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.1)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0.1 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0.1 + 1e-6
assert 0.1 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0.1 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
# mode
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr[1:3, 1:3] = 200
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, mode="maximum")
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[0:1, 0:2]) == 128
assert np.max(arr_pad[1:3, 0:2]) == 200
assert np.max(arr_pad[3:, 0:2]) == 128
assert np.max(arr_pad[0:1, -2:]) == 128
assert np.max(arr_pad[1:3, -2:]) == 200
assert np.max(arr_pad[3:, -2:]) == 128
def test_pool():
# basic functionality with uint8, int32, float32
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.int32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# preserve_dtype off
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average, preserve_dtype=False)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == np.float64
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# maximum function
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.max)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
# 3d array
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr = np.tile(arr[..., np.newaxis], (1, 1, 3))
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2, 3)
assert np.array_equal(arr_pooled[..., 0], arr_pooled[..., 1])
assert np.array_equal(arr_pooled[..., 1], arr_pooled[..., 2])
arr_pooled = arr_pooled[..., 0]
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
# block_size per axis
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, (2, 1), np.average)
assert arr_pooled.shape == (2, 4)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 4]))
assert np.allclose(arr_pooled[0, 1], np.average([1, 5]))
assert np.allclose(arr_pooled[0, 2], np.average([2, 6]))
assert np.allclose(arr_pooled[0, 3], np.average([3, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 12]))
assert np.allclose(arr_pooled[1, 1], np.average([9, 13]))
assert np.allclose(arr_pooled[1, 2], np.average([10, 14]))
assert np.allclose(arr_pooled[1, 3], np.average([11, 15]))
# cval
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 0, 6, 0]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 0, 0]))
assert arr_pooled[1, 1] == int(np.average([10, 0, 0, 0]))
arr = np.uint8([
[0, 1],
[4, 5]
])
arr_pooled = ia.pool(arr, (4, 1), np.average)
assert arr_pooled.shape == (1, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 4, 0, 0]))
assert arr_pooled[0, 1] == int(np.average([1, 5, 0, 0]))
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average, cval=22)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 22, 6, 22]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 22, 22]))
assert arr_pooled[1, 1] == int(np.average([10, 22, 22, 22]))
def test_avg_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.avg_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
def test_max_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.max_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
def test_draw_grid():
image = np.zeros((2, 2, 3), dtype=np.uint8)
image[0, 0] = 64
image[0, 1] = 128
image[1, 0] = 192
image[1, 1] = 256
grid = ia.draw_grid([image], rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid(np.uint8([image]), rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image], rows=1, cols=2)
expected = np.hstack([image, image])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
def test_Keypoint():
eps = 1e-8
# x/y/x_int/y_int
kp = ia.Keypoint(y=1, x=2)
assert kp.y == 1
assert kp.x == 2
assert kp.y_int == 1
assert kp.x_int == 2
kp = ia.Keypoint(y=1.1, x=2.7)
assert 1.1 - eps < kp.y < 1.1 + eps
assert 2.7 - eps < kp.x < 2.7 + eps
assert kp.y_int == 1
assert kp.x_int == 3
# project
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.project((10, 10), (10, 10))
assert kp2.y == 1
assert kp2.x == 2
kp2 = kp.project((10, 10), (20, 10))
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.project((10, 10), (10, 20))
assert kp2.y == 1
assert kp2.x == 4
kp2 = kp.project((10, 10), (20, 20))
assert kp2.y == 2
assert kp2.x == 4
# shift
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.shift(y=1)
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.shift(y=-1)
assert kp2.y == 0
assert kp2.x == 2
kp2 = kp.shift(x=1)
assert kp2.y == 1
assert kp2.x == 3
kp2 = kp.shift(x=-1)
assert kp2.y == 1
assert kp2.x == 1
kp2 = kp.shift(y=1, x=2)
assert kp2.y == 2
assert kp2.x == 4
# __repr__ / __str_
kp = ia.Keypoint(y=1, x=2)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)"
kp = ia.Keypoint(y=1.2, x=2.7)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)"
def test_KeypointsOnImage():
eps = 1e-8
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
# height/width
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3))
assert kpi.height == 10
assert kpi.width == 20
# image instead of shape
kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8))
assert kpi.shape == (10, 20, 3)
# on()
kpi2 = kpi.on((10, 20, 3))
assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)])
kpi2 = kpi.on((20, 40, 3))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
# draw_on_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False)
kps_mask_size3 = np.copy(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1
assert np.all(image_kps[kps_mask_size3] == [0, 255, 0])
assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 0, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [255, 255, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image2 = np.copy(image)
image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False)
assert np.all(image2 == image_kps)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
assert np.all(image2[kps_mask] == [0, 255, 0])
assert np.all(image2[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception as e:
got_exception = True
assert got_exception
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception as e:
got_exception = True
assert got_exception
# shift
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.shift(x=0, y=0)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(y=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 1
kpi2 = kpi.shift(y=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y - 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y - 1
kpi2 = kpi.shift(x=1, y=2)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 2
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 2
# get_coords_array
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
observed = kpi.get_coords_array()
expected = np.float32([
[1, 2],
[3, 4]
])
assert np.allclose(observed, expected)
# from_coords_array
arr = np.float32([
[1, 2],
[3, 4]
])
kpi = ia.KeypointsOnImage.from_coords_array(arr, shape=(5, 5, 3))
assert 1 - eps < kpi.keypoints[0].x < 1 + eps
assert 2 - eps < kpi.keypoints[0].y < 2 + eps
assert 3 - eps < kpi.keypoints[1].x < 3 + eps
assert 4 - eps < kpi.keypoints[1].y < 4 + eps
# to_keypoint_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = kpi.to_keypoint_image(size=1)
image_size3 = kpi.to_keypoint_image(size=3)
kps_mask = np.zeros((5, 5, 2), dtype=np.bool)
kps_mask[2, 1, 0] = 1
kps_mask[4, 3, 1] = 1
kps_mask_size3 = np.zeros_like(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1, 0] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1, 1] = 1
assert np.all(image[kps_mask] == 255)
assert np.all(image[~kps_mask] == 0)
assert np.all(image_size3[kps_mask] == 255)
assert np.all(image_size3[kps_mask_size3] >= 128)
assert np.all(image_size3[~kps_mask_size3] == 0)
# from_keypoint_image()
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 255
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == 4
assert kpi2.keypoints[1].x == 3
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords={"x": -1, "y": -2}, threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=(-1, -2), threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=None, threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
got_exception = False
try:
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords="exception-please", threshold=20, nb_channels=3)
except Exception as exc:
assert "Expected if_not_found_coords to be" in str(exc)
got_exception = True
assert got_exception
# copy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.copy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 100
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# deepcopy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.deepcopy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# repr/str
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
expected = "KeypointsOnImage([Keypoint(x=1.00000000, y=2.00000000), Keypoint(x=3.00000000, y=4.00000000)], shape=(5, 5, 3))"
assert kpi.__repr__() == kpi.__str__() == expected
def test_BoundingBox():
eps = 1e-8
# properties with ints
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
assert bb.width == 40 - 20
assert bb.height == 30 - 10
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# wrong order of y1/y2, x1/x2
bb = ia.BoundingBox(y1=30, x1=40, y2=10, x2=20, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
# properties with floats
bb = ia.BoundingBox(y1=10.1, x1=20.1, y2=30.9, x2=40.9, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 31
assert bb.x2_int == 41
assert bb.width == 40.9 - 20.1
assert bb.height == 30.9 - 10.1
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# area
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.area == (30-10) * (40-20)
# project
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (10, 10))
assert 10 - eps < bb2.y1 < 10 + eps
assert 20 - eps < bb2.x1 < 20 + eps
assert 30 - eps < bb2.y2 < 30 + eps
assert 40 - eps < bb2.x2 < 40 + eps
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (20, 20))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (5, 5))
assert 10*0.5 - eps < bb2.y1 < 10*0.5 + eps
assert 20*0.5 - eps < bb2.x1 < 20*0.5 + eps
assert 30*0.5 - eps < bb2.y2 < 30*0.5 + eps
assert 40*0.5 - eps < bb2.x2 < 40*0.5 + eps
bb2 = bb.project((10, 10), (10, 20))
assert 10*1 - eps < bb2.y1 < 10*1 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*1 - eps < bb2.y2 < 30*1 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (20, 10))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*1 - eps < bb2.x1 < 20*1 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*1 - eps < bb2.x2 < 40*1 + eps
# extend
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.extend(all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
bb2 = bb.extend(all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
bb2 = bb.extend(top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
bb2 = bb.extend(bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
# intersection
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_inter = bb1.intersection(bb2)
assert bb_inter.x1 == 39
assert bb_inter.x2 == 40
assert bb_inter.y1 == 10
assert bb_inter.y2 == 30
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
bb_inter = bb1.intersection(bb2, default=False)
assert bb_inter == False
# union
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_union = bb1.union(bb2)
assert bb_union.x1 == 20
assert bb_union.x2 == 59
assert bb_union.y1 == 10
assert bb_union.y2 == 30
# iou
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
iou = bb1.iou(bb2)
assert 1.0 - eps < iou < 1.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
iou = bb1.iou(bb2)
assert 0.0 - eps < iou < 0.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20, label=None)
bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25, label=None)
iou = bb1.iou(bb2)
area_union = 15 * 15
area_intersection = 5 * 5
iou_expected = area_intersection / area_union
assert iou_expected - eps < iou < iou_expected + eps
# is_fully_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_fully_within_image((100, 100, 3)) == True
assert bb.is_fully_within_image((20, 100, 3)) == False
assert bb.is_fully_within_image((100, 30, 3)) == False
assert bb.is_fully_within_image((1, 1, 3)) == False
# is_partly_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_partly_within_image((100, 100, 3)) == True
assert bb.is_partly_within_image((20, 100, 3)) == True
assert bb.is_partly_within_image((100, 30, 3)) == True
assert bb.is_partly_within_image((1, 1, 3)) == False
# is_out_of_image()
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=True) == False
assert bb.is_out_of_image((100, 100, 3), partly=False, fully=True) == False
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=False) == False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=True) == True
assert bb.is_out_of_image((20, 100, 3), partly=False, fully=True) == False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=False) == True
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=True) == True
assert bb.is_out_of_image((100, 30, 3), partly=False, fully=True) == False
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=False) == True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=True) == True
assert bb.is_out_of_image((1, 1, 3), partly=False, fully=True) == True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=False) == False
# cut_out_of_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_cut = bb.cut_out_of_image((100, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image(np.zeros((100, 100, 3), dtype=np.uint8))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 20
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 30
# shift
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_top = bb.shift(top=0)
bb_right = bb.shift(right=0)
bb_bottom = bb.shift(bottom=0)
bb_left = bb.shift(left=0)
assert bb_top.y1 == 10
assert bb_top.x1 == 20
assert bb_top.y2 == 30
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20
assert bb_right.y2 == 30
assert bb_right.x2 == 40
assert bb_bottom.y1 == 10
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20
assert bb_left.y2 == 30
assert bb_left.x2 == 40
bb_top = bb.shift(top=1)
bb_right = bb.shift(right=1)
bb_bottom = bb.shift(bottom=1)
bb_left = bb.shift(left=1)
assert bb_top.y1 == 10+1
assert bb_top.x1 == 20
assert bb_top.y2 == 30+1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20-1
assert bb_right.y2 == 30
assert bb_right.x2 == 40-1
assert bb_bottom.y1 == 10-1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30-1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20+1
assert bb_left.y2 == 30
assert bb_left.x2 == 40+1
bb_top = bb.shift(top=-1)
bb_right = bb.shift(right=-1)
bb_bottom = bb.shift(bottom=-1)
bb_left = bb.shift(left=-1)
assert bb_top.y1 == 10-1
assert bb_top.x1 == 20
assert bb_top.y2 == 30-1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20+1
assert bb_right.y2 == 30
assert bb_right.x2 == 40+1
assert bb_bottom.y1 == 10+1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30+1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20-1
assert bb_left.y2 == 30
assert bb_left.x2 == 40-1
bb_mix = bb.shift(top=1, bottom=2, left=3, right=4)
assert bb_mix.y1 == 10+1-2
assert bb_mix.x1 == 20+3-4
assert bb_mix.y2 == 30+3-4
assert bb_mix.x2 == 40+1-2
# draw_on_image()
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[1:3+1, 1] = True
bb_mask[1:3+1, 3] = True
bb_mask[1, 1:3+1] = True
bb_mask[3, 1:3+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image == 0)
image_bb = bb.draw_on_image(image, color=[255, 0, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 0, 0])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image, color=128, alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [128, 128, 128])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image+100, color=[200, 200, 200], alpha=0.5, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [150, 150, 150])
assert np.all(image_bb[~bb_mask] == [100, 100, 100])
image_bb = bb.draw_on_image((image+100).astype(np.float32), color=[200, 200, 200], alpha=0.5, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1
assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=False, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image[bb_mask] == [255, 255, 255])
assert np.all(image[~bb_mask] == [0, 0, 0])
image = np.zeros_like(image)
bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[2, 0:3] = True
bb_mask[0:3, 2] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:5, 0:5] = True
bb_mask[2, 2] = False
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=2, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:1+1, 1] = True
bb_mask[1, 0:1+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
got_exception = False
try:
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=True)
except Exception as e:
got_exception = True
assert got_exception == False
bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1, label=None)
got_exception = False
try:
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=True)
except Exception as e:
got_exception = True
assert got_exception == True
# extract_from_image()
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((0, 1), (0, 1), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[8:11, 8:11, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((1, 0), (1, 0), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[0:4, 0:5, :])
# to_keypoints()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
kps = bb.to_keypoints()
assert kps[0].y == 1
assert kps[0].x == 1
assert kps[1].y == 1
assert kps[1].x == 3
assert kps[2].y == 3
assert kps[2].x == 3
assert kps[3].y == 3
assert kps[3].x == 1
# copy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label == "test"
bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2")
assert bb2.y1 == 10
assert bb2.x1 == 20
assert bb2.y2 == 30
assert bb2.x2 == 40
assert bb2.label == "test2"
# deepcopy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"])
bb2 = bb.deepcopy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label[0] == "test"
# BoundingBox_repr()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__repr__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
# test_BoundingBox_str()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__str__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
def test_BoundingBoxesOnImage():
reseed()
# test height/width
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.height == 40
assert bbsoi.width == 50
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
assert bbsoi.height == 40
assert bbsoi.width == 50
# on()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
bbsoi_projected = bbsoi.on((40, 50))
assert bbsoi_projected.bounding_boxes[0].y1 == 10
assert bbsoi_projected.bounding_boxes[0].x1 == 20
assert bbsoi_projected.bounding_boxes[0].y2 == 30
assert bbsoi_projected.bounding_boxes[0].x2 == 40
assert bbsoi_projected.bounding_boxes[1].y1 == 15
assert bbsoi_projected.bounding_boxes[1].x1 == 25
assert bbsoi_projected.bounding_boxes[1].y2 == 35
assert bbsoi_projected.bounding_boxes[1].x2 == 45
bbsoi_projected = bbsoi.on((40*2, 50*2, 3))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
bbsoi_projected = bbsoi.on(np.zeros((40*2, 50*2, 3), dtype=np.uint8))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
# draw_on_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
image = bbsoi.draw_on_image(np.zeros(bbsoi.shape, dtype=np.uint8), color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image[10-1, 20-1, :] == [0, 0, 0])
assert np.all(image[10-1, 20-0, :] == [0, 0, 0])
assert np.all(image[10-0, 20-1, :] == [0, 0, 0])
assert np.all(image[10-0, 20-0, :] == [0, 255, 0])
assert np.all(image[10+1, 20+1, :] == [0, 0, 0])
assert np.all(image[30-1, 40-1, :] == [0, 0, 0])
assert np.all(image[30+1, 40-0, :] == [0, 0, 0])
assert np.all(image[30+0, 40+1, :] == [0, 0, 0])
assert np.all(image[30+0, 40+0, :] == [0, 255, 0])
assert np.all(image[30+1, 40+1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-0, :] == [0, 0, 0])
assert np.all(image[15-0, 25-1, :] == [0, 0, 0])
assert np.all(image[15-0, 25-0, :] == [0, 255, 0])
assert np.all(image[15+1, 25+1, :] == [0, 0, 0])
assert np.all(image[35-1, 45-1, :] == [0, 0, 0])
assert np.all(image[35+1, 45+0, :] == [0, 0, 0])
assert np.all(image[35+0, 45+1, :] == [0, 0, 0])
assert np.all(image[35+0, 45+0, :] == [0, 255, 0])
assert np.all(image[35+1, 45+1, :] == [0, 0, 0])
# remove_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_slim = bbsoi.remove_out_of_image(fully=True, partly=True)
assert len(bbsoi_slim.bounding_boxes) == 1
assert bbsoi_slim.bounding_boxes[0] == bb1
# cut_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_cut = bbsoi.cut_out_of_image()
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_cut.bounding_boxes[0].y1 == 10
assert bbsoi_cut.bounding_boxes[0].x1 == 20
assert bbsoi_cut.bounding_boxes[0].y2 == 30
assert bbsoi_cut.bounding_boxes[0].x2 == 40
assert bbsoi_cut.bounding_boxes[1].y1 == 15
assert bbsoi_cut.bounding_boxes[1].x1 == 25
assert bbsoi_cut.bounding_boxes[1].y2 == 35
assert bbsoi_cut.bounding_boxes[1].x2 == 50
# shift()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift(right=1)
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10
assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1
assert bbsoi_shifted.bounding_boxes[0].y2 == 30
assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1
assert bbsoi_shifted.bounding_boxes[1].y1 == 15
assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1
assert bbsoi_shifted.bounding_boxes[1].y2 == 35
assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1
# copy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 0
# deepcopy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 10
# repr() / str()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, x2=40.0000, y2=30.0000, label=None)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, x2=51.0000, y2=35.0000, label=None)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (bb1_expected, bb2_expected)
assert bbsoi.__repr__() == bbsoi.__str__() == expected
def test_HeatmapsOnImage_draw():
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
for y, x in [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1), (3, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in [(0, 0), (0, 3), (3, 0), (3, 3)]:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in [(1, 1), (1, 2), (2, 1), (2, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v3)
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in range(4):
for x in range(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in range(4):
for x in range(2, 4):
assert np.allclose(heatmaps_drawn[y, x], v2)
def test_HeatmapsOnImage_draw_on_image():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
image = np.uint8([
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0]
assert heatmaps_drawn.shape == (4, 4, 3)
assert np.all(heatmaps_drawn[0:4, 0:2, :] == 0)
assert np.all(heatmaps_drawn[0:4, 2:3, :] == 128) or np.all(heatmaps_drawn[0:4, 2:3, :] == 127)
assert np.all(heatmaps_drawn[0:4, 3:4, :] == 255) or np.all(heatmaps_drawn[0:4, 3:4, :] == 254)
image = np.uint8([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, resize="image", cmap=None)[0]
assert heatmaps_drawn.shape == (2, 2, 3)
assert np.all(heatmaps_drawn[0:2, 0, :] == 0)
assert np.all(heatmaps_drawn[0:2, 1, :] == 128) or np.all(heatmaps_drawn[0:2, 1, :] == 127)
def test_HeatmapsOnImage_pad():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, cval=0.5)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, mode="edge")
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]
])
)
def test_HeatmapsOnImage_avg_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.avg_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 0.75],
[0.0, 0.75]])
)
def test_HeatmapsOnImage_max_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.max_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 1.0],
[0.0, 1.0]])
)
def test_HeatmapsOnImage_scale():
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale(size=(4, 4), interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (4, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale(size=2.0, interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (2, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
def test_BatchLoader():
def _load_func():
for _ in sm.xrange(20):
yield ia.Batch(images=np.zeros((2, 4, 4, 3), dtype=np.uint8))
# TODO these loops somehow require a `or len(loaded) < 20*nb_workers` on Travis, but not
# locally. (On Travis, usually one batch is missing, i.e. probably still in the queue.)
# That shouldn't be neccessary due to loader.all_finished(), but something breaks here.
# queue.close() works on Tavis py2, but not py3 as it raises an `OSError: handle is closed`.
for nb_workers in [1, 2]:
# repeat these tests many times to catch rarer race conditions
for _ in sm.xrange(50):
loader = ia.BatchLoader(_load_func, queue_size=2, nb_workers=nb_workers, threaded=True)
loaded = []
counter = 0
while (not loader.all_finished() or not loader.queue.empty() or len(loaded) < 20*nb_workers) and counter < 1000:
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
#loader.queue.close()
#while not loader.queue.empty():
# loaded.append(loader.queue.get())
assert len(loaded) == 20*nb_workers, "Expected %d to be loaded by threads, got %d for %d workers at counter %d." % (20*nb_workers, len(loaded), nb_workers, counter)
loader = ia.BatchLoader(_load_func, queue_size=200, nb_workers=nb_workers, threaded=True)
loader.terminate()
assert loader.all_finished
loader = ia.BatchLoader(_load_func, queue_size=2, nb_workers=nb_workers, threaded=False)
loaded = []
counter = 0
while (not loader.all_finished() or not loader.queue.empty() or len(loaded) < 20*nb_workers) and counter < 1000:
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
#loader.queue.close()
#while not loader.queue.empty():
# loaded.append(loader.queue.get())
assert len(loaded) == 20*nb_workers, "Expected %d to be loaded by background processes, got %d for %d workers at counter %d." % (20*nb_workers, len(loaded), nb_workers, counter)
loader = ia.BatchLoader(_load_func, queue_size=200, nb_workers=nb_workers, threaded=False)
loader.terminate()
assert loader.all_finished
def test_Noop():
reseed()
images = create_random_images((16, 70, 50, 3))
keypoints = create_random_keypoints((16, 70, 50, 3), 4)
aug = iaa.Noop()
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
assert iaa.Noop().get_parameters() == []
def test_Lambda():
reseed()
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
images_aug = images + 1
images_aug_list = [image + 1 for image in images_list]
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps_arr_aug = np.float32([[0.5, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=1),
ia.Keypoint(x=0, y=2)], shape=base_img.shape)]
def func_images(images, random_state, parents, hooks):
if isinstance(images, list):
images = [image + 1 for image in images]
else:
images = images + 1
return images
def func_heatmaps(heatmaps, random_state, parents, hooks):
heatmaps[0].arr_0to1[0, 0] += 0.5
return heatmaps
def func_keypoints(keypoints_on_images, random_state, parents, hooks):
for keypoints_on_image in keypoints_on_images:
for kp in keypoints_on_image.keypoints:
kp.x = (kp.x + 1) % 3
return keypoints_on_images
aug = iaa.Lambda(func_images, func_heatmaps, func_keypoints)
aug_det = aug.to_deterministic()
# check once that the augmenter can handle lists correctly
observed = aug.augment_images(images_list)
expected = images_aug_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_aug_list
assert array_equal_lists(observed, expected)
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images_aug
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images_aug
assert np.array_equal(observed, expected)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_aug)
observed = aug_det.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_aug)
observed = aug.augment_keypoints(keypoints)
expected = keypoints_aug
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints_aug
assert keypoints_equal(observed, expected)
def test_AssertLambda():
reseed()
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
def func_images_succeeds(images, random_state, parents, hooks):
return images[0][0, 0] == 0 and images[0][2, 2] == 1
def func_images_fails(images, random_state, parents, hooks):
return images[0][0, 0] == 1
def func_heatmaps_succeeds(heatmaps, random_state, parents, hooks):
return heatmaps[0].arr_0to1[0, 0] < 0 + 1e-6
def func_heatmaps_fails(heatmaps, random_state, parents, hooks):
return heatmaps[0].arr_0to1[0, 0] > 0 + 1e-6
def func_keypoints_succeeds(keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images[0].keypoints[0].x == 0 and keypoints_on_images[0].keypoints[2].x == 2
def func_keypoints_fails(keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images[0].keypoints[0].x == 2
aug_succeeds = iaa.AssertLambda(func_images=func_images_succeeds,
func_heatmaps=func_heatmaps_succeeds,
func_keypoints=func_keypoints_succeeds)
aug_succeeds_det = aug_succeeds.to_deterministic()
aug_fails = iaa.AssertLambda(func_images=func_images_fails,
func_heatmaps=func_heatmaps_fails,
func_keypoints=func_keypoints_fails)
aug_fails_det = aug_fails.to_deterministic()
# images as numpy array
observed = aug_succeeds.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
try:
observed = aug_fails.augment_images(images)
errored = False
except AssertionError as e:
errored = True
assert errored
observed = aug_succeeds_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
try:
observed = aug_fails.augment_images(images)
errored = False
except AssertionError as e:
errored = True
assert errored
# Lists of images
observed = aug_succeeds.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
try:
observed = aug_fails.augment_images(images_list)
errored = False
except AssertionError as e:
errored = True
assert errored
observed = aug_succeeds_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
try:
observed = aug_fails.augment_images(images_list)
errored = False
except AssertionError as e:
errored = True
assert errored
# heatmaps
observed = aug_succeeds.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
try:
observed = aug_fails.augment_heatmaps([heatmaps])[0]
errored = False
except AssertionError as e:
errored = True
assert errored
observed = aug_succeeds_det.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
try:
observed = aug_fails.augment_heatmaps([heatmaps])[0]
errored = False
except AssertionError as e:
errored = True
assert errored
# keypoints
observed = aug_succeeds.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug_fails.augment_keypoints(keypoints)
errored = False
except AssertionError as e:
errored = True
assert errored
observed = aug_succeeds_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug_fails.augment_keypoints(keypoints)
errored = False
except AssertionError as e:
errored = True
assert errored
def test_AssertShape():
reseed()
base_img = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
heatmaps_arr = np.float32([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 4, 3))
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
base_img_h4 = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0]], dtype=np.uint8)
base_img_h4 = base_img_h4[:, :, np.newaxis]
images_h4 = np.array([base_img_h4])
heatmaps_arr_h4 = np.float32([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0]])
heatmaps_h4 = ia.HeatmapsOnImage(heatmaps_arr_h4, shape=(4, 4, 3))
keypoints_h4 = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img_h4.shape)]
# image must have exactly shape (1, 3, 4, 1)
aug = iaa.AssertShape((1, 3, 4, 1))
aug_det = aug.to_deterministic()
# check once that the augmenter can handle lists correctly
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
observed = aug_det.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug.augment_images(images_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_heatmaps([heatmaps_h4])[0]
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_keypoints(keypoints_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
# any value for number of images allowed (None)
aug = iaa.AssertShape((None, 3, 4, 1))
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
observed = aug_det.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug.augment_images(images_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_heatmaps([heatmaps_h4])[0]
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_keypoints(keypoints_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
# list of possible choices [1, 3, 5] for height
aug = iaa.AssertShape((1, [1, 3, 5], 4, 1))
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
observed = aug_det.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug.augment_images(images_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_heatmaps([heatmaps_h4])[0]
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_keypoints(keypoints_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
# range of 1-3 for height (tuple comparison is a <= x < b, so we use (1,4) here)
aug = iaa.AssertShape((1, (1, 4), 4, 1))
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
observed = aug_det.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps.get_arr())
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
try:
observed = aug.augment_images(images_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_heatmaps([heatmaps_h4])[0]
errored = False
except AssertionError as e:
errored = True
assert errored
try:
observed = aug.augment_keypoints(keypoints_h4)
errored = False
except AssertionError as e:
errored = True
assert errored
# bad datatype
got_exception = False
try:
aug = iaa.AssertShape((1, False, 4, 1))
observed = aug.augment_images(np.zeros((1, 2, 2, 1), dtype=np.uint8))
except Exception as exc:
assert "Invalid datatype " in str(exc)
got_exception = True
assert got_exception
def test_Alpha():
reseed()
base_img = np.zeros((3, 3, 1), dtype=np.uint8)
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps_arr_r1 = np.float32([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0]])
heatmaps_arr_l1 = np.float32([[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 0.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
aug = iaa.Alpha(1, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = (base_img + 10).astype(np.uint8)
assert np.allclose(observed, expected)
for per_channel in [False, True]:
aug = iaa.Alpha(1, iaa.Affine(translate_px={"x":1}), iaa.Affine(translate_px={"x":-1}), per_channel=per_channel)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert 0 - 1e-6 < heatmaps.min_value < 0 + 1e-6
assert 1 - 1e-6 < heatmaps.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_r1)
aug = iaa.Alpha(0, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = (base_img + 20).astype(np.uint8)
assert np.allclose(observed, expected)
for per_channel in [False, True]:
aug = iaa.Alpha(0, iaa.Affine(translate_px={"x":1}), iaa.Affine(translate_px={"x":-1}), per_channel=per_channel)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert 0 - 1e-6 < heatmaps.min_value < 0 + 1e-6
assert 1 - 1e-6 < heatmaps.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_l1)
aug = iaa.Alpha(0.75, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = (base_img + 0.75 * 10 + 0.25 * 20).astype(np.uint8)
assert np.allclose(observed, expected)
aug = iaa.Alpha(0.75, None, iaa.Add(20))
observed = aug.augment_image(base_img + 10)
expected = (base_img + 0.75 * 10 + 0.25 * (10 + 20)).astype(np.uint8)
assert np.allclose(observed, expected)
aug = iaa.Alpha(0.75, iaa.Add(10), None)
observed = aug.augment_image(base_img + 10)
expected = (base_img + 0.75 * (10 + 10) + 0.25 * 10).astype(np.uint8)
assert np.allclose(observed, expected)
base_img = np.zeros((1, 2, 1), dtype=np.uint8)
nb_iterations = 1000
aug = iaa.Alpha((0.0, 1.0), iaa.Add(10), iaa.Add(110))
values = []
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
observed_val = np.round(np.average(observed)) - 10
values.append(observed_val / 100)
nb_bins = 5
hist, _ = np.histogram(values, bins=nb_bins, range=(0.0, 1.0), density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / nb_iterations
assert density_expected - density_tolerance < density < density_expected + density_tolerance
# bad datatype for factor
got_exception = False
try:
aug = iaa.Alpha(False, iaa.Add(10), None)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# per_channel
aug = iaa.Alpha(1.0, iaa.Add((0, 100), per_channel=True), None, per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 1000), dtype=np.uint8))
uq = np.unique(observed)
assert len(uq) > 1
assert np.max(observed) > 80
assert np.min(observed) < 20
aug = iaa.Alpha((0.0, 1.0), iaa.Add(100), None, per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 1000), dtype=np.uint8))
uq = np.unique(observed)
assert len(uq) > 1
assert np.max(observed) > 80
assert np.min(observed) < 20
aug = iaa.Alpha((0.0, 1.0), iaa.Add(100), iaa.Add(0), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) > 1:
seen[1] += 1
else:
assert False
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
# bad datatype for per_channel
got_exception = False
try:
aug = iaa.Alpha(0.5, iaa.Add(10), None, per_channel="test")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# propagating
aug = iaa.Alpha(0.5, iaa.Add(100), iaa.Add(50), name="AlphaTest")
def propagator(images, augmenter, parents, default):
if "Alpha" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(propagator=propagator)
image = np.zeros((10, 10, 3), dtype=np.uint8) + 1
observed = aug.augment_image(image, hooks=hooks)
assert np.array_equal(observed, image)
# -----
# keypoints
# -----
kps = [ia.Keypoint(x=5, y=10), ia.Keypoint(x=6, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=(20, 20, 3))
aug = iaa.Alpha(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.deepcopy()
assert keypoints_equal([observed], [expected])
aug = iaa.Alpha(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.deepcopy()
assert keypoints_equal([observed], [expected])
aug = iaa.Alpha(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.shift(x=1)
assert keypoints_equal([observed], [expected])
aug = iaa.Alpha(0.499, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.shift(x=1)
assert keypoints_equal([observed], [expected])
# per_channel
aug = iaa.Alpha(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.deepcopy()
assert keypoints_equal([observed], [expected])
aug = iaa.Alpha(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.shift(x=1)
assert keypoints_equal([observed], [expected])
aug = iaa.Alpha(iap.Choice([0.49, 0.51]), iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
expected_same = kpsoi.deepcopy()
expected_shifted = kpsoi.shift(x=1)
seen = [0, 0]
for _ in sm.xrange(200):
observed = aug.augment_keypoints([kpsoi])[0]
if keypoints_equal([observed], [expected_same]):
seen[0] += 1
elif keypoints_equal([observed], [expected_shifted]):
seen[1] += 1
else:
assert False
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
# propagating
aug = iaa.Alpha(0.0, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"y": 1}), name="AlphaTest")
def propagator(kpsoi_to_aug, augmenter, parents, default):
if "Alpha" in augmenter.name:
return False
else:
return default
hooks = ia.HooksKeypoints(propagator=propagator)
observed = aug.augment_keypoints([kpsoi], hooks=hooks)[0]
assert keypoints_equal([observed], [kpsoi])
# -----
# get_parameters()
# -----
first = iaa.Noop()
second = iaa.Sequential([iaa.Add(1)])
aug = iaa.Alpha(0.65, first, second, per_channel=1)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert 0.65 - 1e-6 < params[0].value < 0.65 + 1e-6
assert params[1].value == 1
# -----
# get_children_lists()
# -----
first = iaa.Noop()
second = iaa.Sequential([iaa.Add(1)])
aug = iaa.Alpha(0.65, first, second, per_channel=1)
children_lsts = aug.get_children_lists()
assert len(children_lsts) == 2
assert ia.is_iterable([lst for lst in children_lsts])
assert first in children_lsts[0]
assert second == children_lsts[1]
def test_AlphaElementwise():
reseed()
base_img = np.zeros((3, 3, 1), dtype=np.uint8)
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps_arr_r1 = np.float32([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0]])
heatmaps_arr_l1 = np.float32([[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 0.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
aug = iaa.AlphaElementwise(1, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = base_img + 10
assert np.allclose(observed, expected)
aug = iaa.AlphaElementwise(1, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"x": -1}))
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_r1)
aug = iaa.AlphaElementwise(0, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = base_img + 20
assert np.allclose(observed, expected)
aug = iaa.AlphaElementwise(0, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"x": -1}))
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_l1)
aug = iaa.AlphaElementwise(0.75, iaa.Add(10), iaa.Add(20))
observed = aug.augment_image(base_img)
expected = (base_img + 0.75 * 10 + 0.25 * 20).astype(np.uint8)
assert np.allclose(observed, expected)
aug = iaa.AlphaElementwise(0.75, None, iaa.Add(20))
observed = aug.augment_image(base_img + 10)
expected = (base_img + 0.75 * 10 + 0.25 * (10 + 20)).astype(np.uint8)
assert np.allclose(observed, expected)
aug = iaa.AlphaElementwise(0.75, iaa.Add(10), None)
observed = aug.augment_image(base_img + 10)
expected = (base_img + 0.75 * (10 + 10) + 0.25 * 10).astype(np.uint8)
assert np.allclose(observed, expected)
base_img = np.zeros((100, 100), dtype=np.uint8)
aug = iaa.AlphaElementwise((0.0, 1.0), iaa.Add(10), iaa.Add(110))
observed = (aug.augment_image(base_img) - 10) / 100
nb_bins = 10
hist, _ = np.histogram(observed.flatten(), bins=nb_bins, range=(0.0, 1.0), density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / observed.size
assert density_expected - density_tolerance < density < density_expected + density_tolerance
base_img = np.zeros((1, 1, 100), dtype=np.uint8)
aug = iaa.AlphaElementwise((0.0, 1.0), iaa.Add(10), iaa.Add(110), per_channel=True)
observed = aug.augment_image(base_img)
assert len(set(observed.flatten())) > 1
# propagating
aug = iaa.AlphaElementwise(0.5, iaa.Add(100), iaa.Add(50), name="AlphaElementwiseTest")
def propagator(images, augmenter, parents, default):
if "AlphaElementwise" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(propagator=propagator)
image = np.zeros((10, 10, 3), dtype=np.uint8) + 1
observed = aug.augment_image(image, hooks=hooks)
assert np.array_equal(observed, image)
# -----
# heatmaps and per_channel
# -----
class _DummyMaskParameter(iap.StochasticParameter):
def __init__(self, inverted=False):
super(_DummyMaskParameter, self).__init__()
self.nb_calls = 0
self.inverted = inverted
def _draw_samples(self, size, random_state):
self.nb_calls += 1
h, w = size
ones = np.ones((h, w), dtype=np.float32)
zeros = np.zeros((h, w), dtype=np.float32)
if self.nb_calls == 1:
return zeros if not self.inverted else ones
elif self.nb_calls in [2, 3]:
return ones if not self.inverted else zeros
else:
assert False
aug = iaa.AlphaElementwise(
_DummyMaskParameter(inverted=False),
iaa.Affine(translate_px={"x": 1}),
iaa.Affine(translate_px={"x": -1}),
per_channel=True
)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_r1)
aug = iaa.AlphaElementwise(
_DummyMaskParameter(inverted=True),
iaa.Affine(translate_px={"x": 1}),
iaa.Affine(translate_px={"x": -1}),
per_channel=True
)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_l1)
# -----
# keypoints
# -----
kps = [ia.Keypoint(x=5, y=10), ia.Keypoint(x=6, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=(20, 20, 3))
aug = iaa.AlphaElementwise(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.deepcopy()
assert keypoints_equal([observed], [expected])
aug = iaa.AlphaElementwise(0.501, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.deepcopy()
assert keypoints_equal([observed], [expected])
aug = iaa.AlphaElementwise(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.shift(x=1)
assert keypoints_equal([observed], [expected])
aug = iaa.AlphaElementwise(0.499, iaa.Noop(), iaa.Affine(translate_px={"x": 1}))
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.shift(x=1)
assert keypoints_equal([observed], [expected])
# per_channel
aug = iaa.AlphaElementwise(1.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.deepcopy()
assert keypoints_equal([observed], [expected])
aug = iaa.AlphaElementwise(0.0, iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
observed = aug.augment_keypoints([kpsoi])[0]
expected = kpsoi.shift(x=1)
assert keypoints_equal([observed], [expected])
"""
TODO this test currently doesn't work as AlphaElementwise augments keypoints without sampling
overlay factors per (x, y) location. (i.e. similar behaviour to Alpha)
aug = iaa.Alpha(iap.Choice([0.49, 0.51]), iaa.Noop(), iaa.Affine(translate_px={"x": 1}), per_channel=True)
expected_same = kpsoi.deepcopy()
expected_both_shifted = kpsoi.shift(x=1)
expected_first_shifted = KeypointsOnImage([kps[0].shift(x=1), kps[1]], shape=kpsoi.shape)
expected_second_shifted = KeypointsOnImage([kps[0], kps[1].shift(x=1)], shape=kpsoi.shape)
seen = [0, 0]
for _ in sm.xrange(200):
observed = aug.augment_keypoints([kpsoi])[0]
if keypoints_equal([observed], [expected_same]):
seen[0] += 1
elif keypoints_equal([observed], [expected_both_shifted]):
seen[1] += 1
elif keypoints_equal([observed], [expected_first_shifted]):
seen[2] += 1
elif keypoints_equal([observed], [expected_second_shifted]):
seen[3] += 1
else:
assert False
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
"""
# propagating
aug = iaa.AlphaElementwise(0.0, iaa.Affine(translate_px={"x": 1}), iaa.Affine(translate_px={"y": 1}), name="AlphaElementwiseTest")
def propagator(kpsoi_to_aug, augmenter, parents, default):
if "AlphaElementwise" in augmenter.name:
return False
else:
return default
hooks = ia.HooksKeypoints(propagator=propagator)
observed = aug.augment_keypoints([kpsoi], hooks=hooks)[0]
assert keypoints_equal([observed], [kpsoi])
def test_Superpixels():
reseed()
def _array_equals_tolerant(a, b, tolerance):
diff = np.abs(a.astype(np.int32) - b.astype(np.int32))
return np.all(diff <= tolerance)
base_img = [
[255, 255, 255, 0, 0, 0],
[255, 235, 255, 0, 20, 0],
[250, 250, 250, 5, 5, 5]
]
base_img = np.tile(np.array(base_img, dtype=np.uint8)[..., np.newaxis], (1, 1, 3))
base_img_superpixels = [
[251, 251, 251, 4, 4, 4],
[251, 251, 251, 4, 4, 4],
[251, 251, 251, 4, 4, 4]
]
base_img_superpixels = np.tile(np.array(base_img_superpixels, dtype=np.uint8)[..., np.newaxis], (1, 1, 3))
base_img_superpixels_left = np.copy(base_img_superpixels)
base_img_superpixels_left[:, 3:, :] = base_img[:, 3:, :]
base_img_superpixels_right = np.copy(base_img_superpixels)
base_img_superpixels_right[:, :3, :] = base_img[:, :3, :]
aug = iaa.Superpixels(p_replace=0, n_segments=2)
observed = aug.augment_image(base_img)
expected = base_img
assert np.allclose(observed, expected)
aug = iaa.Superpixels(p_replace=1.0, n_segments=2)
observed = aug.augment_image(base_img)
expected = base_img_superpixels
assert _array_equals_tolerant(observed, expected, 2)
aug = iaa.Superpixels(p_replace=1.0, n_segments=iap.Deterministic(2))
observed = aug.augment_image(base_img)
expected = base_img_superpixels
assert _array_equals_tolerant(observed, expected, 2)
aug = iaa.Superpixels(p_replace=iap.Binomial(iap.Choice([0.0, 1.0])), n_segments=2)
observed = aug.augment_image(base_img)
assert np.allclose(observed, base_img) or _array_equals_tolerant(observed, base_img_superpixels, 2)
aug = iaa.Superpixels(p_replace=0.5, n_segments=2)
seen = {"none": False, "left": False, "right": False, "both": False}
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
if _array_equals_tolerant(observed, base_img, 2):
seen["none"] = True
elif _array_equals_tolerant(observed, base_img_superpixels_left, 2):
seen["left"] = True
elif _array_equals_tolerant(observed, base_img_superpixels_right, 2):
seen["right"] = True
elif _array_equals_tolerant(observed, base_img_superpixels, 2):
seen["both"] = True
else:
raise Exception("Generated superpixels image does not match any expected image.")
if all(seen.values()):
break
assert all(seen.values())
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.Superpixels(p_replace="test", n_segments=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.Superpixels(p_replace=1, n_segments="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.Superpixels(p_replace=1, n_segments=2, max_size=100, interpolation="nearest")
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].p.value == 1
assert params[1].value == 2
assert params[2] == 100
assert params[3] == "nearest"
def test_Scale():
reseed()
base_img2d = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 255, 255, 255, 255, 255, 255, 0],
[0, 255, 255, 255, 255, 255, 255, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
base_img2d = np.array(base_img2d, dtype=np.uint8)
base_img3d = np.tile(base_img2d[..., np.newaxis], (1, 1, 3))
intensity_avg = np.average(base_img2d)
intensity_low = intensity_avg - 0.2 * np.abs(intensity_avg - 128)
intensity_high = intensity_avg + 0.2 * np.abs(intensity_avg - 128)
aspect_ratio2d = base_img2d.shape[1] / base_img2d.shape[0]
aspect_ratio3d = base_img3d.shape[1] / base_img3d.shape[0]
aug = iaa.Scale(12)
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (12, 12)
assert observed3d.shape == (12, 12, 3)
assert 50 < np.average(observed2d) < 205
assert 50 < np.average(observed3d) < 205
aug = iaa.Scale({"height": 8, "width": 12})
heatmaps_arr = (base_img2d / 255.0).astype(np.float32)
heatmaps_aug = aug.augment_heatmaps([ia.HeatmapsOnImage(heatmaps_arr, shape=base_img3d.shape)])[0]
assert heatmaps_aug.shape == base_img3d.shape
assert 0 - 1e-6 < heatmaps_aug.min_value < 0 + 1e-6
assert 1 - 1e-6 < heatmaps_aug.max_value < 1 + 1e-6
assert np.average(heatmaps_aug.get_arr()[0, :]) < 0.05
assert np.average(heatmaps_aug.get_arr()[-1, :]) < 0.05
assert np.average(heatmaps_aug.get_arr()[:, 0]) < 0.05
assert 0.8 < np.average(heatmaps_aug.get_arr()[2:6, 2:10]) < 1 + 1e-6
aug = iaa.Scale([12, 14])
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in [(12, 12), (14, 14)]
assert observed3d.shape in [(12, 12, 3), (14, 14, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
else:
seen3d[1] = True
if all(seen2d) and all(seen3d):
break
assert all(seen2d)
assert all(seen3d)
aug = iaa.Scale((12, 14))
seen2d = [False, False, False]
seen3d = [False, False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in [(12, 12), (13, 13), (14, 14)]
assert observed3d.shape in [(12, 12, 3), (13, 13, 3), (14, 14, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
elif observed2d.shape == (13, 13):
seen2d[1] = True
else:
seen2d[2] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
elif observed3d.shape == (13, 13, 3):
seen3d[1] = True
else:
seen3d[2] = True
if all(seen2d) and all(seen3d):
break
assert all(seen2d)
assert all(seen3d)
aug = iaa.Scale("keep")
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == base_img2d.shape
assert observed3d.shape == base_img3d.shape
aug = iaa.Scale([])
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == base_img2d.shape
assert observed3d.shape == base_img3d.shape
aug = iaa.Scale({})
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == base_img2d.shape
assert observed3d.shape == base_img3d.shape
aug = iaa.Scale({"height": 11})
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (11, base_img2d.shape[1])
assert observed3d.shape == (11, base_img3d.shape[1], 3)
aug = iaa.Scale({"width": 13})
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (base_img2d.shape[0], 13)
assert observed3d.shape == (base_img3d.shape[0], 13, 3)
aug = iaa.Scale({"height": 12, "width": 13})
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (12, 13)
assert observed3d.shape == (12, 13, 3)
aug = iaa.Scale({"height": 12, "width": "keep"})
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (12, base_img2d.shape[1])
assert observed3d.shape == (12, base_img3d.shape[1], 3)
aug = iaa.Scale({"height": "keep", "width": 12})
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (base_img2d.shape[0], 12)
assert observed3d.shape == (base_img3d.shape[0], 12, 3)
aug = iaa.Scale({"height": 12, "width": "keep-aspect-ratio"})
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (12, int(12 * aspect_ratio2d))
assert observed3d.shape == (12, int(12 * aspect_ratio3d), 3)
aug = iaa.Scale({"height": "keep-aspect-ratio", "width": 12})
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (int(12 * (1/aspect_ratio2d)), 12)
assert observed3d.shape == (int(12 * (1/aspect_ratio3d)), 12, 3)
aug = iaa.Scale({"height": [12, 14], "width": 12})
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in [(12, 12), (14, 12)]
assert observed3d.shape in [(12, 12, 3), (14, 12, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
else:
seen3d[1] = True
if all(seen2d) and all(seen3d):
break
assert all(seen2d)
assert all(seen3d)
aug = iaa.Scale({"height": 12, "width": [12, 14]})
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in [(12, 12), (12, 14)]
assert observed3d.shape in [(12, 12, 3), (12, 14, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
else:
seen3d[1] = True
if all(seen2d) and all(seen3d):
break
assert all(seen2d)
assert all(seen3d)
aug = iaa.Scale({"height": 12, "width": iap.Choice([12, 14])})
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in [(12, 12), (12, 14)]
assert observed3d.shape in [(12, 12, 3), (12, 14, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
else:
seen3d[1] = True
if all(seen2d) and all(seen3d):
break
assert all(seen2d)
assert all(seen3d)
aug = iaa.Scale({"height": (12, 14), "width": 12})
seen2d = [False, False, False]
seen3d = [False, False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in [(12, 12), (13, 12), (14, 12)]
assert observed3d.shape in [(12, 12, 3), (13, 12, 3), (14, 12, 3)]
if observed2d.shape == (12, 12):
seen2d[0] = True
elif observed2d.shape == (13, 12):
seen2d[1] = True
else:
seen2d[2] = True
if observed3d.shape == (12, 12, 3):
seen3d[0] = True
elif observed3d.shape == (13, 12, 3):
seen3d[1] = True
else:
seen3d[2] = True
if all(seen2d) and all(seen3d):
break
assert all(seen2d)
assert all(seen3d)
aug = iaa.Scale(2.0)
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape == (base_img2d.shape[0]*2, base_img2d.shape[1]*2)
assert observed3d.shape == (base_img3d.shape[0]*2, base_img3d.shape[1]*2, 3)
assert intensity_low < np.average(observed2d) < intensity_high
assert intensity_low < np.average(observed3d) < intensity_high
aug = iaa.Scale([2.0, 4.0])
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in [(base_img2d.shape[0]*2, base_img2d.shape[1]*2), (base_img2d.shape[0]*4, base_img2d.shape[1]*4)]
assert observed3d.shape in [(base_img3d.shape[0]*2, base_img3d.shape[1]*2, 3), (base_img3d.shape[0]*4, base_img3d.shape[1]*4, 3)]
if observed2d.shape == (base_img2d.shape[0]*2, base_img2d.shape[1]*2):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (base_img3d.shape[0]*2, base_img3d.shape[1]*2, 3):
seen3d[0] = True
else:
seen3d[1] = True
if all(seen2d) and all(seen3d):
break
assert all(seen2d)
assert all(seen3d)
aug = iaa.Scale(iap.Choice([2.0, 4.0]))
seen2d = [False, False]
seen3d = [False, False]
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in [(base_img2d.shape[0]*2, base_img2d.shape[1]*2), (base_img2d.shape[0]*4, base_img2d.shape[1]*4)]
assert observed3d.shape in [(base_img3d.shape[0]*2, base_img3d.shape[1]*2, 3), (base_img3d.shape[0]*4, base_img3d.shape[1]*4, 3)]
if observed2d.shape == (base_img2d.shape[0]*2, base_img2d.shape[1]*2):
seen2d[0] = True
else:
seen2d[1] = True
if observed3d.shape == (base_img3d.shape[0]*2, base_img3d.shape[1]*2, 3):
seen3d[0] = True
else:
seen3d[1] = True
if all(seen2d) and all(seen3d):
break
assert all(seen2d)
assert all(seen3d)
base_img2d = base_img2d[0:4, 0:4]
base_img3d = base_img3d[0:4, 0:4, :]
aug = iaa.Scale((0.76, 1.0))
not_seen2d = set()
not_seen3d = set()
for size in sm.xrange(3, 4+1):
not_seen2d.add((size, size))
for size in sm.xrange(3, 4+1):
not_seen3d.add((size, size, 3))
possible2d = set(list(not_seen2d))
possible3d = set(list(not_seen3d))
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in possible2d
assert observed3d.shape in possible3d
if observed2d.shape in not_seen2d:
not_seen2d.remove(observed2d.shape)
if observed3d.shape in not_seen3d:
not_seen3d.remove(observed3d.shape)
if not not_seen2d and not not_seen3d:
break
assert not not_seen2d
assert not not_seen3d
base_img2d = base_img2d[0:4, 0:4]
base_img3d = base_img3d[0:4, 0:4, :]
aug = iaa.Scale({"height": (0.76, 1.0), "width": (0.76, 1.0)})
not_seen2d = set()
not_seen3d = set()
for hsize in sm.xrange(3, 4+1):
for wsize in sm.xrange(3, 4+1):
not_seen2d.add((hsize, wsize))
#print(base_img3d.shape[0]//2, base_img3d.shape[1]+1)
for hsize in sm.xrange(3, 4+1):
for wsize in sm.xrange(3, 4+1):
not_seen3d.add((hsize, wsize, 3))
possible2d = set(list(not_seen2d))
possible3d = set(list(not_seen3d))
for _ in sm.xrange(100):
observed2d = aug.augment_image(base_img2d)
observed3d = aug.augment_image(base_img3d)
assert observed2d.shape in possible2d
assert observed3d.shape in possible3d
if observed2d.shape in not_seen2d:
not_seen2d.remove(observed2d.shape)
if observed3d.shape in not_seen3d:
not_seen3d.remove(observed3d.shape)
if not not_seen2d and not not_seen3d:
break
assert not not_seen2d
assert not not_seen3d
got_exception = False
try:
aug = iaa.Scale("foo")
observed2d = aug.augment_image(base_img2d)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
aug = iaa.Scale(size=1, interpolation="nearest")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == "nearest"
def test_Pad():
reseed()
base_img = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
heatmaps_arr = np.float32([[0, 0, 0],
[0, 1.0, 0],
[0, 0, 0]])
# test pad by 1 pixel on each side
pads = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for pad in pads:
top, right, bottom, left = pad
height, width = base_img.shape[0:2]
aug = iaa.Pad(px=pad, keep_size=False)
base_img_padded = np.pad(base_img, ((top, bottom), (left, right), (0, 0)),
mode="constant",
constant_values=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, np.array([base_img_padded]))
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, [base_img_padded])
keypoints_moved = [keypoints[0].shift(x=left, y=top)]
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_moved)
# heatmaps
height, width = heatmaps_arr.shape[0:2]
aug = iaa.Pad(px=pad, keep_size=False)
heatmaps_arr_padded = np.pad(heatmaps_arr, ((top, bottom), (left, right)),
mode="constant",
constant_values=0)
observed = aug.augment_heatmaps([ia.HeatmapsOnImage(heatmaps_arr, shape=base_img.shape)])[0]
assert observed.shape == base_img.shape
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.array_equal(observed.get_arr(), heatmaps_arr_padded)
# test pad by range of pixels
pads = [
((0, 2), 0, 0, 0),
(0, (0, 2), 0, 0),
(0, 0, (0, 2), 0),
(0, 0, 0, (0, 2)),
]
for pad in pads:
top, right, bottom, left = pad
height, width = base_img.shape[0:2]
aug = iaa.Pad(px=pad, keep_size=False)
aug_det = aug.to_deterministic()
images_padded = []
keypoints_padded = []
top_range = top if isinstance(top, tuple) else (top, top)
right_range = right if isinstance(right, tuple) else (right, right)
bottom_range = bottom if isinstance(bottom, tuple) else (bottom, bottom)
left_range = left if isinstance(left, tuple) else (left, left)
for top_val in sm.xrange(top_range[0], top_range[1]+1):
for right_val in sm.xrange(right_range[0], right_range[1]+1):
for bottom_val in sm.xrange(bottom_range[0], bottom_range[1]+1):
for left_val in sm.xrange(left_range[0], left_range[1]+1):
images_padded.append(np.pad(base_img, ((top_val, bottom_val), (left_val, right_val), (0, 0)), mode="constant", constant_values=0))
keypoints_padded.append(keypoints[0].shift(x=left_val, y=top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_padded])) else 0
for base_img_padded in images_padded]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_padded])) else 0
for base_img_padded in images_padded]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images(images_list)
assert any([array_equal_lists(observed, [base_img_padded])
for base_img_padded in images_padded])
observed = aug.augment_keypoints(keypoints)
assert any([keypoints_equal(observed, [kp]) for kp in keypoints_padded])
assert len(set(movements)) == 3
assert len(set(movements_det)) == 1
# test pad by list of exact pixel values
pads = [
([0, 2], 0, 0, 0),
(0, [0, 2], 0, 0),
(0, 0, [0, 2], 0),
(0, 0, 0, [0, 2]),
]
for pad in pads:
top, right, bottom, left = pad
height, width = base_img.shape[0:2]
aug = iaa.Pad(px=pad, keep_size=False)
aug_det = aug.to_deterministic()
images_padded = []
keypoints_padded = []
top_range = top if isinstance(top, list) else [top]
right_range = right if isinstance(right, list) else [right]
bottom_range = bottom if isinstance(bottom, list) else [bottom]
left_range = left if isinstance(left, list) else [left]
for top_val in top_range:
for right_val in right_range:
for bottom_val in bottom_range:
for left_val in left_range:
images_padded.append(np.pad(base_img, ((top_val, bottom_val), (left_val, right_val), (0, 0)), mode="constant", constant_values=0))
keypoints_padded.append(keypoints[0].shift(x=left_val, y=top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_padded])) else 0
for base_img_padded in images_padded]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_padded])) else 0
for base_img_padded in images_padded]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images(images_list)
assert any([array_equal_lists(observed, [base_img_padded])
for base_img_padded in images_padded])
observed = aug.augment_keypoints(keypoints)
assert any([keypoints_equal(observed, [kp]) for kp in keypoints_padded])
assert len(set(movements)) == 2
assert len(set(movements_det)) == 1
# pad modes
image = np.zeros((1, 2), dtype=np.uint8)
image[0, 0] = 100
image[0, 1] = 50
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode=iap.Choice(["constant", "maximum", "edge"]), pad_cval=0, keep_size=False)
seen = [0, 0, 0]
for _ in sm.xrange(300):
observed = aug.augment_image(image)
if observed[0, 2] == 0:
seen[0] += 1
elif observed[0, 2] == 100:
seen[1] += 1
elif observed[0, 2] == 50:
seen[2] += 1
else:
assert False
assert all([100 - 50 < v < 100 + 50 for v in seen])
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode=ia.ALL, pad_cval=0, keep_size=False)
expected = ["constant", "edge", "linear_ramp", "maximum", "median", "minimum", "reflect", "symmetric", "wrap"]
assert isinstance(aug.pad_mode, iap.Choice)
assert len(aug.pad_mode.a) == len(expected)
assert all([v in aug.pad_mode.a for v in expected])
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode=["constant", "maximum"], pad_cval=0, keep_size=False)
expected = ["constant", "maximum"]
assert isinstance(aug.pad_mode, iap.Choice)
assert len(aug.pad_mode.a) == len(expected)
assert all([v in aug.pad_mode.a for v in expected])
got_exception = False
try:
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode=False, pad_cval=0, keep_size=False)
except Exception as exc:
assert "Expected pad_mode to be " in str(exc)
got_exception = True
assert got_exception
# pad modes, heatmaps
heatmaps = ia.HeatmapsOnImage(np.ones((3, 3, 1), dtype=np.float32), shape=(3, 3, 3))
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode="edge", pad_cval=0, keep_size=False)
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum(observed.get_arr() <= 1e-4) == 3
# pad cvals
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode="constant", pad_cval=100, keep_size=False)
observed = aug.augment_image(np.zeros((1, 1), dtype=np.uint8))
assert observed[0, 0] == 0
assert observed[0, 1] == 100
image = np.zeros((1, 1), dtype=np.uint8)
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode="constant", pad_cval=iap.Choice([50, 100]), keep_size=False)
seen = [0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(image)
if observed[0, 1] == 50:
seen[0] += 1
elif observed[0, 1] == 100:
seen[1] += 1
else:
assert False
assert all([100 - 50 < v < 100 + 50 for v in seen])
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode="constant", pad_cval=[50, 100], keep_size=False)
expected = [50, 100]
assert isinstance(aug.pad_cval, iap.Choice)
assert len(aug.pad_cval.a) == len(expected)
assert all([v in aug.pad_cval.a for v in expected])
image = np.zeros((1, 1), dtype=np.uint8)
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode="constant", pad_cval=(50, 52), keep_size=False)
seen = [0, 0, 0]
for _ in sm.xrange(300):
observed = aug.augment_image(image)
if observed[0, 1] == 50:
seen[0] += 1
elif observed[0, 1] == 51:
seen[1] += 1
elif observed[0, 1] == 52:
seen[2] += 1
else:
assert False
assert all([100 - 50 < v < 100 + 50 for v in seen])
got_exception = False
try:
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode="constant", pad_cval="test", keep_size=False)
except Exception as exc:
assert "Expected pad_cval " in str(exc)
got_exception = True
assert got_exception
# pad cvals, heatmaps
heatmaps = ia.HeatmapsOnImage(np.zeros((3, 3, 1), dtype=np.float32), shape=(3, 3, 3))
aug = iaa.Pad(px=(0, 1, 0, 0), pad_mode="constant", pad_cval=255, keep_size=False)
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum(observed.get_arr() > 1e-4) == 0
# ------------------
# pad by percentages
# ------------------
# pad all sides by 100%
aug = iaa.Pad(percent=1.0, keep_size=False)
observed = aug.augment_image(np.zeros((4, 4), dtype=np.uint8) + 1)
assert observed.shape == (4+4+4, 4+4+4)
assert np.sum(observed[4:-4, 4:-4]) == 4*4
assert np.sum(observed) == 4*4
# pad all sides by StochasticParameter
aug = iaa.Pad(percent=iap.Deterministic(1.0), keep_size=False)
observed = aug.augment_image(np.zeros((4, 4), dtype=np.uint8) + 1)
assert observed.shape == (4+4+4, 4+4+4)
assert np.sum(observed[4:-4, 4:-4]) == 4*4
assert np.sum(observed) == 4*4
# pad all sides by 100-200%
aug = iaa.Pad(percent=(1.0, 2.0), sample_independently=False, keep_size=False)
observed = aug.augment_image(np.zeros((4, 4), dtype=np.uint8) + 1)
assert np.sum(observed) == 4*4
assert (observed.shape[0] - 4) % 2 == 0
assert (observed.shape[1] - 4) % 2 == 0
# pad by invalid value
got_exception = False
try:
aug = iaa.Pad(percent="test", keep_size=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# test pad by 100% on each side
image = np.zeros((4, 4), dtype=np.uint8)
image[0, 0] = 255
image[3, 0] = 255
image[0, 3] = 255
image[3, 3] = 255
height, width = image.shape[0:2]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=3),
ia.Keypoint(x=3, y=3)], shape=image.shape)]
pads = [
(1.0, 0, 0, 0),
(0, 1.0, 0, 0),
(0, 0, 1.0, 0),
(0, 0, 0, 1.0),
]
for pad in pads:
top, right, bottom, left = pad
top_px = int(top * height)
right_px = int(right * width)
bottom_px = int(bottom * height)
left_px = int(left * width)
aug = iaa.Pad(percent=pad, keep_size=False)
image_padded = np.pad(image, ((top_px, bottom_px), (left_px, right_px)),
mode="constant",
constant_values=0)
observed = aug.augment_image(image)
assert np.array_equal(observed, image_padded)
keypoints_moved = [keypoints[0].shift(x=left_px, y=top_px)]
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_moved)
# test pad by range of percentages
aug = iaa.Pad(percent=((0, 1.0), 0, 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((4, 4), dtype=np.uint8) + 255)
n_padded = 0
while np.all(observed[0, :] == 0):
n_padded += 1
observed = observed[1:, :]
seen[n_padded] += 1
# note that we cant just check for 100-50 < x < 100+50 here. The first and last value (0px
# and 4px) padding have half the probability of occuring compared to the other values.
# E.g. 0px is padded if sampled p falls in range [0, 0.125). 1px is padded if sampled p
# falls in range [0.125, 0.375].
assert all([v > 30 for v in seen])
aug = iaa.Pad(percent=(0, (0, 1.0), 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((4, 4), dtype=np.uint8) + 255)
n_padded = 0
while np.all(observed[:, -1] == 0):
n_padded += 1
observed = observed[:, 0:-1]
seen[n_padded] += 1
assert all([v > 30 for v in seen])
# test pad by list of percentages
aug = iaa.Pad(percent=([0.0, 1.0], 0, 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((4, 4), dtype=np.uint8) + 255)
n_padded = 0
while np.all(observed[0, :] == 0):
n_padded += 1
observed = observed[1:, :]
seen[n_padded] += 1
assert 250 - 50 < seen[0] < 250 + 50
assert seen[1] == 0
assert seen[2] == 0
assert seen[3] == 0
assert 250 - 50 < seen[4] < 250 + 50
aug = iaa.Pad(percent=(0, [0.0, 1.0], 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((4, 4), dtype=np.uint8) + 255)
n_padded = 0
while np.all(observed[:, -1] == 0):
n_padded += 1
observed = observed[:, 0:-1]
seen[n_padded] += 1
assert 250 - 50 < seen[0] < 250 + 50
assert seen[1] == 0
assert seen[2] == 0
assert seen[3] == 0
assert 250 - 50 < seen[4] < 250 + 50
def test_Crop():
reseed()
base_img = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
heatmaps_arr = np.float32([[0, 0, 0],
[0, 1.0, 0],
[0, 0, 0]])
# test crop by 1 pixel on each side
crops = [
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
]
for crop in crops:
top, right, bottom, left = crop
height, width = base_img.shape[0:2]
aug = iaa.Crop(px=crop, keep_size=False)
base_img_cropped = base_img[top:height-bottom, left:width-right, :]
observed = aug.augment_images(images)
assert np.array_equal(observed, np.array([base_img_cropped]))
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, [base_img_cropped])
keypoints_moved = [keypoints[0].shift(x=-left, y=-top)]
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_moved)
height, width = heatmaps_arr.shape[0:2]
aug = iaa.Crop(px=crop, keep_size=False)
heatmaps_arr_cropped = heatmaps_arr[top:height-bottom, left:width-right]
observed = aug.augment_heatmaps([ia.HeatmapsOnImage(heatmaps_arr, shape=base_img.shape)])[0]
assert np.array_equal(observed.get_arr(), heatmaps_arr_cropped)
# test crop by range of pixels
crops = [
((0, 2), 0, 0, 0),
(0, (0, 2), 0, 0),
(0, 0, (0, 2), 0),
(0, 0, 0, (0, 2)),
]
for crop in crops:
top, right, bottom, left = crop
height, width = base_img.shape[0:2]
aug = iaa.Crop(px=crop, keep_size=False)
aug_det = aug.to_deterministic()
images_cropped = []
keypoints_cropped = []
top_range = top if isinstance(top, tuple) else (top, top)
right_range = right if isinstance(right, tuple) else (right, right)
bottom_range = bottom if isinstance(bottom, tuple) else (bottom, bottom)
left_range = left if isinstance(left, tuple) else (left, left)
for top_val in sm.xrange(top_range[0], top_range[1]+1):
for right_val in sm.xrange(right_range[0], right_range[1]+1):
for bottom_val in sm.xrange(bottom_range[0], bottom_range[1]+1):
for left_val in sm.xrange(left_range[0], left_range[1]+1):
images_cropped.append(base_img[top_val:height-bottom_val, left_val:width-right_val, :])
keypoints_cropped.append(keypoints[0].shift(x=-left_val, y=-top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_cropped])) else 0
for base_img_cropped in images_cropped]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_cropped])) else 0
for base_img_cropped in images_cropped]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images(images_list)
assert any([array_equal_lists(observed, [base_img_cropped])
for base_img_cropped in images_cropped])
observed = aug.augment_keypoints(keypoints)
assert any([keypoints_equal(observed, [kp]) for kp in keypoints_cropped])
assert len(set(movements)) == 3
assert len(set(movements_det)) == 1
# test crop by list of exact pixel values
crops = [
([0, 2], 0, 0, 0),
(0, [0, 2], 0, 0),
(0, 0, [0, 2], 0),
(0, 0, 0, [0, 2]),
]
for crop in crops:
top, right, bottom, left = crop
height, width = base_img.shape[0:2]
aug = iaa.Crop(px=crop, keep_size=False)
aug_det = aug.to_deterministic()
images_cropped = []
keypoints_cropped = []
top_range = top if isinstance(top, list) else [top]
right_range = right if isinstance(right, list) else [right]
bottom_range = bottom if isinstance(bottom, list) else [bottom]
left_range = left if isinstance(left, list) else [left]
for top_val in top_range:
for right_val in right_range:
for bottom_val in bottom_range:
for left_val in left_range:
images_cropped.append(base_img[top_val:height-bottom_val, left_val:width-right_val, :])
keypoints_cropped.append(keypoints[0].shift(x=-left_val, y=-top_val))
movements = []
movements_det = []
for i in sm.xrange(100):
observed = aug.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_cropped])) else 0
for base_img_cropped in images_cropped]
movements.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug_det.augment_images(images)
matches = [1 if np.array_equal(observed, np.array([base_img_cropped])) else 0
for base_img_cropped in images_cropped]
movements_det.append(np.argmax(np.array(matches)))
assert any([val == 1 for val in matches])
observed = aug.augment_images(images_list)
assert any([array_equal_lists(observed, [base_img_cropped])
for base_img_cropped in images_cropped])
observed = aug.augment_keypoints(keypoints)
assert any([keypoints_equal(observed, [kp]) for kp in keypoints_cropped])
assert len(set(movements)) == 2
assert len(set(movements_det)) == 1
# ------------------
# crop by percentages
# ------------------
# crop all sides by 10%
aug = iaa.Crop(percent=0.1, keep_size=False)
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.shape == (40, 40)
assert np.all(observed == image[5:-5, 5:-5])
# crop all sides by StochasticParameter
aug = iaa.Crop(percent=iap.Deterministic(0.1), keep_size=False)
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.shape == (40, 40)
assert np.all(observed == image[5:-5, 5:-5])
# crop all sides by 10-20%
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
aug = iaa.Crop(percent=(0.1, 0.2), keep_size=False)
observed = aug.augment_image(image)
assert 30 <= observed.shape[0] <= 40
assert 30 <= observed.shape[1] <= 40
# crop by invalid value
got_exception = False
try:
aug = iaa.Crop(percent="test", keep_size=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# test crop by 10% on each side
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
height, width = image.shape[0:2]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=10, y=11), ia.Keypoint(x=20, y=21),
ia.Keypoint(x=30, y=31)], shape=image.shape)]
crops = [
(0.1, 0, 0, 0),
(0, 0.1, 0, 0),
(0, 0, 0.1, 0),
(0, 0, 0, 0.1),
]
for crop in crops:
top, right, bottom, left = crop
top_px = int(round(top * height))
right_px = int(round(right * width))
bottom_px = int(round(bottom * height))
left_px = int(round(left * width))
aug = iaa.Crop(percent=crop, keep_size=False)
image_cropped = image[top_px:50-bottom_px, left_px:50-right_px] # dont use :-bottom_px and ;-right_px here, because these values can be 0
observed = aug.augment_image(image)
assert np.array_equal(observed, image_cropped)
keypoints_moved = [keypoints[0].shift(x=-left_px, y=-top_px)]
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_moved)
# test crop by range of percentages
aug = iaa.Crop(percent=((0, 0.1), 0, 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((40, 40), dtype=np.uint8))
n_cropped = 40 - observed.shape[0]
seen[n_cropped] += 1
# note that we cant just check for 100-50 < x < 100+50 here. The first and last value (0px
# and 4px) have half the probability of occuring compared to the other values.
# E.g. 0px is cropped if sampled p falls in range [0, 0.125). 1px is cropped if sampled p
# falls in range [0.125, 0.375].
assert all([v > 30 for v in seen])
aug = iaa.Crop(percent=(0, (0, 0.1), 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((40, 40), dtype=np.uint8) + 255)
n_cropped = 40 - observed.shape[1]
seen[n_cropped] += 1
assert all([v > 30 for v in seen])
# test crop by list of percentages
aug = iaa.Crop(percent=([0.0, 0.1], 0, 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((40, 40), dtype=np.uint8) + 255)
n_cropped = 40 - observed.shape[0]
seen[n_cropped] += 1
assert 250 - 50 < seen[0] < 250 + 50
assert seen[1] == 0
assert seen[2] == 0
assert seen[3] == 0
assert 250 - 50 < seen[4] < 250 + 50
aug = iaa.Crop(percent=(0, [0.0, 0.1], 0, 0), keep_size=False)
seen = [0, 0, 0, 0, 0]
for _ in sm.xrange(500):
observed = aug.augment_image(np.zeros((40, 40), dtype=np.uint8) + 255)
n_cropped = 40 - observed.shape[1]
seen[n_cropped] += 1
assert 250 - 50 < seen[0] < 250 + 50
assert seen[1] == 0
assert seen[2] == 0
assert seen[3] == 0
assert 250 - 50 < seen[4] < 250 + 50
def test_Fliplr():
reseed()
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
base_img_flipped = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8)
base_img_flipped = base_img_flipped[:, :, np.newaxis]
images = np.array([base_img])
images_flipped = np.array([base_img_flipped])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
keypoints_flipped = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=0, y=2)], shape=base_img.shape)]
# 0% chance of flip
aug = iaa.Fliplr(0)
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# 0% chance of flip, heatmaps
aug = iaa.Fliplr(0)
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0, 0.5, 0.75],
[0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
observed = aug.augment_heatmaps([heatmaps])[0]
expected = heatmaps.get_arr()
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), expected)
# 100% chance of flip
aug = iaa.Fliplr(1.0)
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images_flipped
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images_flipped
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints_flipped
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints_flipped
assert keypoints_equal(observed, expected)
# 100% chance of flip, heatmaps
aug = iaa.Fliplr(1.0)
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0, 0.5, 0.75],
[0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
observed = aug.augment_heatmaps([heatmaps])[0]
expected = np.fliplr(heatmaps.get_arr())
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), expected)
# 50% chance of flip
aug = iaa.Fliplr(0.5)
aug_det = aug.to_deterministic()
nb_iterations = 1000
nb_images_flipped = 0
nb_images_flipped_det = 0
nb_keypoints_flipped = 0
nb_keypoints_flipped_det = 0
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images(images)
if np.array_equal(observed, images_flipped):
nb_images_flipped += 1
observed = aug_det.augment_images(images)
if np.array_equal(observed, images_flipped):
nb_images_flipped_det += 1
observed = aug.augment_keypoints(keypoints)
if keypoints_equal(observed, keypoints_flipped):
nb_keypoints_flipped += 1
observed = aug_det.augment_keypoints(keypoints)
if keypoints_equal(observed, keypoints_flipped):
nb_keypoints_flipped_det += 1
assert int(nb_iterations * 0.3) <= nb_images_flipped <= int(nb_iterations * 0.7)
assert int(nb_iterations * 0.3) <= nb_keypoints_flipped <= int(nb_iterations * 0.7)
assert nb_images_flipped_det in [0, nb_iterations]
assert nb_keypoints_flipped_det in [0, nb_iterations]
# 50% chance of flipped, multiple images, list as input
images_multi = [base_img, base_img]
aug = iaa.Fliplr(0.5)
aug_det = aug.to_deterministic()
nb_iterations = 1000
nb_flipped_by_pos = [0] * len(images_multi)
nb_flipped_by_pos_det = [0] * len(images_multi)
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images(images_multi)
for i in sm.xrange(len(images_multi)):
if np.array_equal(observed[i], base_img_flipped):
nb_flipped_by_pos[i] += 1
observed = aug_det.augment_images(images_multi)
for i in sm.xrange(len(images_multi)):
if np.array_equal(observed[i], base_img_flipped):
nb_flipped_by_pos_det[i] += 1
for val in nb_flipped_by_pos:
assert int(nb_iterations * 0.3) <= val <= int(nb_iterations * 0.7)
for val in nb_flipped_by_pos_det:
assert val in [0, nb_iterations]
# test StochasticParameter as p
aug = iaa.Fliplr(p=iap.Choice([0, 1], p=[0.7, 0.3]))
seen = [0, 0]
for _ in sm.xrange(1000):
observed = aug.augment_image(base_img)
if np.array_equal(observed, base_img):
seen[0] += 1
elif np.array_equal(observed, base_img_flipped):
seen[1] += 1
else:
assert False
assert 700 - 75 < seen[0] < 700 + 75
assert 300 - 75 < seen[1] < 300 + 75
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.Fliplr(p="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.Fliplr(p=1)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert params[0].p.value == 1
def test_Flipud():
reseed()
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
base_img_flipped = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8)
base_img_flipped = base_img_flipped[:, :, np.newaxis]
images = np.array([base_img])
images_flipped = np.array([base_img_flipped])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
keypoints_flipped = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=2), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=0)], shape=base_img.shape)]
# 0% chance of flip
aug = iaa.Flipud(0)
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# 0% chance of flip, heatmaps
aug = iaa.Flipud(0)
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0, 0.5, 0.75],
[0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
observed = aug.augment_heatmaps([heatmaps])[0]
expected = heatmaps.get_arr()
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), expected)
# 100% chance of flip
aug = iaa.Flipud(1.0)
aug_det = aug.to_deterministic()
for _ in sm.xrange(10):
observed = aug.augment_images(images)
expected = images_flipped
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images_flipped
assert np.array_equal(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints_flipped
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints_flipped
assert keypoints_equal(observed, expected)
# 100% chance of flip, heatmaps
aug = iaa.Flipud(1.0)
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0, 0.5, 0.75],
[0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
observed = aug.augment_heatmaps([heatmaps])[0]
expected = np.flipud(heatmaps.get_arr())
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), expected)
# 50% chance of flip
aug = iaa.Flipud(0.5)
aug_det = aug.to_deterministic()
nb_iterations = 1000
nb_images_flipped = 0
nb_images_flipped_det = 0
nb_keypoints_flipped = 0
nb_keypoints_flipped_det = 0
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images(images)
if np.array_equal(observed, images_flipped):
nb_images_flipped += 1
observed = aug_det.augment_images(images)
if np.array_equal(observed, images_flipped):
nb_images_flipped_det += 1
observed = aug.augment_keypoints(keypoints)
if keypoints_equal(observed, keypoints_flipped):
nb_keypoints_flipped += 1
observed = aug_det.augment_keypoints(keypoints)
if keypoints_equal(observed, keypoints_flipped):
nb_keypoints_flipped_det += 1
assert int(nb_iterations * 0.3) <= nb_images_flipped <= int(nb_iterations * 0.7)
assert int(nb_iterations * 0.3) <= nb_keypoints_flipped <= int(nb_iterations * 0.7)
assert nb_images_flipped_det in [0, nb_iterations]
assert nb_keypoints_flipped_det in [0, nb_iterations]
# 50% chance of flipped, multiple images, list as input
images_multi = [base_img, base_img]
aug = iaa.Flipud(0.5)
aug_det = aug.to_deterministic()
nb_iterations = 1000
nb_flipped_by_pos = [0] * len(images_multi)
nb_flipped_by_pos_det = [0] * len(images_multi)
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images(images_multi)
for i in sm.xrange(len(images_multi)):
if np.array_equal(observed[i], base_img_flipped):
nb_flipped_by_pos[i] += 1
observed = aug_det.augment_images(images_multi)
for i in sm.xrange(len(images_multi)):
if np.array_equal(observed[i], base_img_flipped):
nb_flipped_by_pos_det[i] += 1
for val in nb_flipped_by_pos:
assert int(nb_iterations * 0.3) <= val <= int(nb_iterations * 0.7)
for val in nb_flipped_by_pos_det:
assert val in [0, nb_iterations]
# test StochasticParameter as p
aug = iaa.Flipud(p=iap.Choice([0, 1], p=[0.7, 0.3]))
seen = [0, 0]
for _ in sm.xrange(1000):
observed = aug.augment_image(base_img)
if np.array_equal(observed, base_img):
seen[0] += 1
elif np.array_equal(observed, base_img_flipped):
seen[1] += 1
else:
assert False
assert 700 - 75 < seen[0] < 700 + 75
assert 300 - 75 < seen[1] < 300 + 75
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.Flipud(p="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.Flipud(p=1)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert params[0].p.value == 1
def test_GaussianBlur():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.GaussianBlur(sigma=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
#np.set_printoptions(formatter={'float_kind': lambda x: "%.6f" % x})
#from scipy import ndimage
#images2 = np.copy(images).astype(np.float32)
#images2[0, ...] = ndimage.gaussian_filter(images2[0, ...], 0.4)
#print(images2)
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# keypoints shouldnt be changed
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying blur sigmas
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
def test_AverageBlur():
reseed()
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[5, 5, 0] = 200
base_img[4, 5, 0] = 100
base_img[6, 5, 0] = 100
base_img[5, 4, 0] = 100
base_img[5, 6, 0] = 100
blur3x3 = np.copy(base_img)
blur3x3 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]
blur4x4 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]
blur5x5 = np.copy(base_img)
blur5x5 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.AverageBlur(k=0)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, base_img)
# k=3
aug = iaa.AverageBlur(k=3)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur3x3)
# k=5
aug = iaa.AverageBlur(k=5)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur5x5)
# k as (3, 4)
aug = iaa.AverageBlur(k=(3, 4))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur4x4):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@1")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
# k as (3, 5)
aug = iaa.AverageBlur(k=(3, 5))
nb_iterations = 100
nb_seen = [0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur4x4):
nb_seen[1] += 1
elif np.array_equal(observed, blur5x5):
nb_seen[2] += 1
else:
raise Exception("Unexpected result in AverageBlur@2")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.23 <= p_seen[0] <= 0.43
assert 0.23 <= p_seen[1] <= 0.43
assert 0.23 <= p_seen[2] <= 0.43
# k as stochastic parameter
aug = iaa.AverageBlur(k=iap.Choice([3, 5]))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur5x5):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@3")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
# k as ((3, 5), (3, 5))
aug = iaa.AverageBlur(k=((3, 5), (3, 5)))
possible = dict()
for kh in [3, 4, 5]:
for kw in [3, 4, 5]:
key = (kh, kw)
if kh == 0 or kw == 0:
possible[key] = np.copy(base_img)
else:
possible[key] = cv2.blur(base_img, (kh, kw))[..., np.newaxis]
nb_iterations = 250
#nb_seen = [0] * len(possible.keys())
nb_seen = dict([(key, 0) for key, val in possible.items()])
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
for key, img_aug in possible.items():
if np.array_equal(observed, img_aug):
nb_seen[key] += 1
# dont check sum here, because 0xX and Xx0 are all the same, i.e. much
# higher sum than nb_iterations
assert all([v > 0 for v in nb_seen.values()])
# keypoints shouldnt be changed
aug = iaa.AverageBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_MedianBlur():
reseed()
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[3:8, 3:8, 0] = 1
base_img[4:7, 4:7, 0] = 2
base_img[5:6, 5:6, 0] = 3
blur3x3 = np.zeros_like(base_img)
blur3x3[3:8, 3:8, 0] = 1
blur3x3[4:7, 4:7, 0] = 2
blur3x3[4, 4, 0] = 1
blur3x3[4, 6, 0] = 1
blur3x3[6, 4, 0] = 1
blur3x3[6, 6, 0] = 1
blur3x3[3, 3, 0] = 0
blur3x3[3, 7, 0] = 0
blur3x3[7, 3, 0] = 0
blur3x3[7, 7, 0] = 0
blur5x5 = np.copy(blur3x3)
blur5x5[4, 3, 0] = 0
blur5x5[3, 4, 0] = 0
blur5x5[6, 3, 0] = 0
blur5x5[7, 4, 0] = 0
blur5x5[4, 7, 0] = 0
blur5x5[3, 6, 0] = 0
blur5x5[6, 7, 0] = 0
blur5x5[7, 6, 0] = 0
blur5x5[blur5x5 > 1] = 1
#blur5x5 = np.zeros_like(base_img)
#blur5x5[2:9, 2:9, 0] = 1
#blur5x5[3:8, 3:8, 0] = 1
#blur5x5[4:7, 4:7, 0] = 1
#blur5x5[5:6, 5:6, 0] = 1
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.MedianBlur(k=1)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, base_img)
# k=3
aug = iaa.MedianBlur(k=3)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur3x3)
# k=5
aug = iaa.MedianBlur(k=5)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur5x5)
# k as (3, 5)
aug = iaa.MedianBlur(k=(3, 5))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
seen[0] = True
elif np.array_equal(observed, blur5x5):
seen[1] = True
else:
raise Exception("Unexpected result in MedianBlur@1")
if all(seen):
break
assert all(seen)
# k as stochastic parameter
aug = iaa.MedianBlur(k=iap.Choice([3, 5]))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
seen[0] += True
elif np.array_equal(observed, blur5x5):
seen[1] += True
else:
raise Exception("Unexpected result in MedianBlur@2")
if all(seen):
break
assert all(seen)
# keypoints shouldnt be changed
aug = iaa.MedianBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_AddToHueAndSaturation():
reseed()
# interestingly, when using this RGB2HSV and HSV2RGB conversion from skimage, the results
# differ quite a bit from the cv2 ones
"""
def _add_hue_saturation(img, value):
img_hsv = color.rgb2hsv(img / 255.0)
img_hsv[..., 0:2] += (value / 255.0)
return color.hsv2rgb(img_hsv) * 255
"""
def _add_hue_saturation(img, value):
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
img_hsv[..., 0:2] += value
return cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB)
base_img = np.zeros((2, 2, 3), dtype=np.uint8)
base_img[..., 0] += 20
base_img[..., 1] += 40
base_img[..., 2] += 60
aug = iaa.AddToHueAndSaturation(0)
observed = aug.augment_image(base_img)
expected = base_img
assert np.allclose(observed, expected)
aug = iaa.AddToHueAndSaturation(30)
observed = aug.augment_image(base_img)
expected = _add_hue_saturation(base_img, 30)
diff = np.abs(observed.astype(np.float32) - expected)
assert np.all(diff <= 3)
aug = iaa.AddToHueAndSaturation((0, 2))
base_img = base_img[0:1, 0:1, :]
expected_imgs = [
iaa.AddToHueAndSaturation(0).augment_image(base_img),
iaa.AddToHueAndSaturation(1).augment_image(base_img),
iaa.AddToHueAndSaturation(2).augment_image(base_img)
]
assert not np.array_equal(expected_imgs[0], expected_imgs[1])
assert not np.array_equal(expected_imgs[1], expected_imgs[2])
assert not np.array_equal(expected_imgs[0], expected_imgs[2])
nb_iterations = 300
seen = dict([(i, 0) for i, _ in enumerate(expected_imgs)])
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
for i, expected_img in enumerate(expected_imgs):
if np.allclose(observed, expected_img):
seen[i] += 1
assert np.sum(list(seen.values())) == nb_iterations
n_exp = nb_iterations / 3
n_exp_tol = nb_iterations * 0.1
assert all([n_exp - n_exp_tol < v < n_exp + n_exp_tol for v in seen.values()])
def test_Grayscale():
reseed()
def _compute_luminosity(r, g, b):
return 0.21 * r + 0.72 * g + 0.07 * b
base_img = np.zeros((4, 4, 3), dtype=np.uint8)
base_img[..., 0] += 10
base_img[..., 1] += 20
base_img[..., 2] += 30
aug = iaa.Grayscale(0.0)
observed = aug.augment_image(base_img)
expected = np.copy(base_img)
assert np.allclose(observed, expected)
aug = iaa.Grayscale(1.0)
observed = aug.augment_image(base_img)
luminosity = _compute_luminosity(10, 20, 30)
expected = np.zeros_like(base_img) + luminosity
assert np.allclose(observed, expected.astype(np.uint8))
aug = iaa.Grayscale(0.5)
observed = aug.augment_image(base_img)
luminosity = _compute_luminosity(10, 20, 30)
expected = 0.5 * base_img + 0.5 * luminosity
assert np.allclose(observed, expected.astype(np.uint8))
aug = iaa.Grayscale((0.0, 1.0))
base_img = base_img[0:1, 0:1, :]
base_img_gray = iaa.Grayscale(1.0).augment_image(base_img)
distance_max = np.average(np.abs(base_img_gray.astype(np.int32) - base_img.astype(np.int32)))
nb_iterations = 1000
distances = []
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
distance = np.average(np.abs(observed.astype(np.int32) - base_img.astype(np.int32))) / distance_max
distances.append(distance)
assert 0 - 1e-4 < min(distances) < 0.1
assert 0.4 < np.average(distances) < 0.6
assert 0.9 < max(distances) < 1.0 + 1e-4
nb_bins = 5
hist, _ = np.histogram(distances, bins=nb_bins, range=(0.0, 1.0), density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / nb_iterations
assert density_expected - density_tolerance < density < density_expected + density_tolerance
def test_Convolve():
reseed()
img = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
img = np.uint8(img)
# matrix is None
aug = iaa.Convolve(matrix=None)
observed = aug.augment_image(img)
assert np.array_equal(observed, img)
aug = iaa.Convolve(matrix=lambda _img, nb_channels, random_state: [None])
observed = aug.augment_image(img)
assert np.array_equal(observed, img)
# matrix is [[1]]
aug = iaa.Convolve(matrix=np.float32([[1]]))
observed = aug.augment_image(img)
assert np.array_equal(observed, img)
aug = iaa.Convolve(matrix=lambda _img, nb_channels, random_state: np.float32([[1]]))
observed = aug.augment_image(img)
assert np.array_equal(observed, img)
# matrix is [[0, 0, 0], [0, 1, 0], [0, 0, 0]]
m = np.float32([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
])
aug = iaa.Convolve(matrix=m)
observed = aug.augment_image(img)
assert np.array_equal(observed, img)
aug = iaa.Convolve(matrix=lambda _img, nb_channels, random_state: m)
observed = aug.augment_image(img)
assert np.array_equal(observed, img)
# matrix is [[0, 0, 0], [0, 2, 0], [0, 0, 0]]
m = np.float32([
[0, 0, 0],
[0, 2, 0],
[0, 0, 0]
])
aug = iaa.Convolve(matrix=m)
observed = aug.augment_image(img)
assert np.array_equal(observed, 2*img)
aug = iaa.Convolve(matrix=lambda _img, nb_channels, random_state: m)
observed = aug.augment_image(img)
assert np.array_equal(observed, 2*img)
# matrix is [[0, 0, 0], [0, 2, 0], [0, 0, 0]]
# with 3 channels
m = np.float32([
[0, 0, 0],
[0, 2, 0],
[0, 0, 0]
])
img3 = np.tile(img[..., np.newaxis], (1, 1, 3))
aug = iaa.Convolve(matrix=m)
observed = aug.augment_image(img3)
assert np.array_equal(observed, 2*img3)
aug = iaa.Convolve(matrix=lambda _img, nb_channels, random_state: m)
observed = aug.augment_image(img3)
assert np.array_equal(observed, 2*img3)
# matrix is [[0, -1, 0], [0, 10, 0], [0, 0, 0]]
m = np.float32([
[0, -1, 0],
[0, 10, 0],
[0, 0, 0]
])
expected = np.uint8([
[10*1+(-1)*4, 10*2+(-1)*5, 10*3+(-1)*6],
[10*4+(-1)*1, 10*5+(-1)*2, 10*6+(-1)*3],
[10*7+(-1)*4, 10*8+(-1)*5, 10*9+(-1)*6]
])
aug = iaa.Convolve(matrix=m)
observed = aug.augment_image(img)
assert np.array_equal(observed, expected)
aug = iaa.Convolve(matrix=lambda _img, nb_channels, random_state: m)
observed = aug.augment_image(img)
assert np.array_equal(observed, expected)
# changing matrices when using callable
expected = []
for i in sm.xrange(5):
expected.append(img * i)
aug = iaa.Convolve(matrix=lambda _img, nb_channels, random_state: np.float32([[random_state.randint(0, 5)]]))
seen = [False] * 5
for _ in sm.xrange(200):
observed = aug.augment_image(img)
found = False
for i, expected_i in enumerate(expected):
if np.array_equal(observed, expected_i):
seen[i] = True
found = True
break
assert found
if all(seen):
break
assert all(seen)
# bad datatype for matrix
got_exception = False
try:
aug = iaa.Convolve(matrix=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# get_parameters()
matrix = np.int32([[1]])
aug = iaa.Convolve(matrix=matrix)
params = aug.get_parameters()
assert np.array_equal(params[0], matrix)
assert params[1] == "constant"
# TODO add test for keypoints once their handling was improved in Convolve
def test_Sharpen():
reseed()
def _compute_sharpened_base_img(lightness, m):
base_img_sharpened = np.zeros((3, 3), dtype=np.float32)
k = 1
# note that cv2 uses reflection padding by default
base_img_sharpened[0, 0] = (m[1, 1] + lightness)/k * 10 + 4 * (m[0, 0]/k) * 10 + 4 * (m[2, 2]/k) * 20
base_img_sharpened[0, 2] = base_img_sharpened[0, 0]
base_img_sharpened[2, 0] = base_img_sharpened[0, 0]
base_img_sharpened[2, 2] = base_img_sharpened[0, 0]
base_img_sharpened[0, 1] = (m[1, 1] + lightness)/k * 10 + 6 * (m[0, 1]/k) * 10 + 2 * (m[2, 2]/k) * 20
base_img_sharpened[1, 0] = base_img_sharpened[0, 1]
base_img_sharpened[1, 2] = base_img_sharpened[0, 1]
base_img_sharpened[2, 1] = base_img_sharpened[0, 1]
base_img_sharpened[1, 1] = (m[1, 1] + lightness)/k * 20 + 8 * (m[0, 1]/k) * 10
#print("A", base_img_sharpened, "Am", m)
base_img_sharpened = np.clip(base_img_sharpened, 0, 255).astype(np.uint8)
return base_img_sharpened
base_img = [[10, 10, 10],
[10, 20, 10],
[10, 10, 10]]
base_img = np.uint8(base_img)
m = np.float32([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
m_noop = np.float32([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
base_img_sharpened = _compute_sharpened_base_img(1, m)
aug = iaa.Sharpen(alpha=0, lightness=1)
observed = aug.augment_image(base_img)
expected = base_img
assert np.allclose(observed, expected)
aug = iaa.Sharpen(alpha=1.0, lightness=1)
observed = aug.augment_image(base_img)
expected = base_img_sharpened
assert np.allclose(observed, expected)
aug = iaa.Sharpen(alpha=0.5, lightness=1)
observed = aug.augment_image(base_img)
expected = _compute_sharpened_base_img(0.5*1, 0.5 * m_noop + 0.5 * m)
assert np.allclose(observed, expected.astype(np.uint8))
aug = iaa.Sharpen(alpha=0.75, lightness=1)
observed = aug.augment_image(base_img)
expected = _compute_sharpened_base_img(0.75*1, 0.25 * m_noop + 0.75 * m)
assert np.allclose(observed, expected)
aug = iaa.Sharpen(alpha=iap.Choice([0.5, 1.0]), lightness=1)
observed = aug.augment_image(base_img)
expected1 = _compute_sharpened_base_img(0.5*1, m)
expected2 = _compute_sharpened_base_img(1.0*1, m)
assert np.allclose(observed, expected1) or np.allclose(observed, expected2)
got_exception = False
try:
aug = iaa.Sharpen(alpha="test", lightness=1)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
aug = iaa.Sharpen(alpha=1.0, lightness=2)
observed = aug.augment_image(base_img)
expected = _compute_sharpened_base_img(1.0*2, m)
assert np.allclose(observed, expected)
aug = iaa.Sharpen(alpha=1.0, lightness=3)
observed = aug.augment_image(base_img)
expected = _compute_sharpened_base_img(1.0*3, m)
assert np.allclose(observed, expected)
aug = iaa.Sharpen(alpha=1.0, lightness=iap.Choice([1.0, 1.5]))
observed = aug.augment_image(base_img)
expected1 = _compute_sharpened_base_img(1.0*1.0, m)
expected2 = _compute_sharpened_base_img(1.0*1.5, m)
assert np.allclose(observed, expected1) or np.allclose(observed, expected2)
got_exception = False
try:
aug = iaa.Sharpen(alpha=1.0, lightness="test")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# this part doesnt really work so far due to nonlinearities resulting from clipping to uint8
"""
# alpha range
aug = iaa.Sharpen(alpha=(0.0, 1.0), lightness=1)
base_img = np.copy(base_img)
base_img_sharpened_min = _compute_sharpened_base_img(0.0*1, 1.0 * m_noop + 0.0 * m)
base_img_sharpened_max = _compute_sharpened_base_img(1.0*1, 0.0 * m_noop + 1.0 * m)
#distance_max = np.average(np.abs(base_img_sharpened.astype(np.float32) - base_img.astype(np.float32)))
distance_max = np.average(np.abs(base_img_sharpened_max - base_img_sharpened_min))
nb_iterations = 250
distances = []
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
distance = np.average(np.abs(observed.astype(np.float32) - base_img_sharpened_max.astype(np.float32))) / distance_max
distances.append(distance)
print(distances)
print(min(distances), np.average(distances), max(distances))
assert 0 - 1e-4 < min(distances) < 0.1
assert 0.4 < np.average(distances) < 0.6
assert 0.9 < max(distances) < 1.0 + 1e-4
nb_bins = 5
hist, _ = np.histogram(distances, bins=nb_bins, range=(0.0, 1.0), density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / nb_iterations
assert density_expected - density_tolerance < density < density_expected + density_tolerance
# lightness range
aug = iaa.Sharpen(alpha=1.0, lightness=(0.5, 2.0))
base_img = np.copy(base_img)
base_img_sharpened = _compute_sharpened_base_img(1.0*2.0, m)
distance_max = np.average(np.abs(base_img_sharpened.astype(np.int32) - base_img.astype(np.int32)))
nb_iterations = 250
distances = []
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
distance = np.average(np.abs(observed.astype(np.int32) - base_img.astype(np.int32))) / distance_max
distances.append(distance)
assert 0 - 1e-4 < min(distances) < 0.1
assert 0.4 < np.average(distances) < 0.6
assert 0.9 < max(distances) < 1.0 + 1e-4
nb_bins = 5
hist, _ = np.histogram(distances, bins=nb_bins, range=(0.0, 1.0), density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / nb_iterations
assert density_expected - density_tolerance < density < density_expected + density_tolerance
"""
def test_Emboss():
reseed()
base_img = [[10, 10, 10],
[10, 20, 10],
[10, 10, 15]]
base_img = np.uint8(base_img)
def _compute_embossed_base_img(img, alpha, strength):
img = np.copy(img)
base_img_embossed = np.zeros((3, 3), dtype=np.float32)
m = np.float32([[-1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
strength_matrix = strength * np.float32([
[-1, -1, 0],
[-1, 0, 1],
[0, 1, 1]
])
ms = m + strength_matrix
#print(ms)
for i in range(base_img_embossed.shape[0]):
for j in range(base_img_embossed.shape[1]):
for u in range(ms.shape[0]):
for v in range(ms.shape[1]):
weight = ms[u, v]
inputs_i = abs(i + (u - (ms.shape[0]-1)//2))
inputs_j = abs(j + (v - (ms.shape[1]-1)//2))
#print("in1", inputs_i, inputs_j)
#print("A", i, j, u, v, "|", inputs_i, inputs_j, "|", None, weight, "->", None)
if inputs_i >= img.shape[0]:
diff = inputs_i - (img.shape[0]-1)
inputs_i = img.shape[0] - 1 - diff
if inputs_j >= img.shape[1]:
diff = inputs_j - (img.shape[1]-1)
inputs_j = img.shape[1] - 1 - diff
#print("in2", inputs_i, inputs_j)
inputs = img[inputs_i, inputs_j]
#print("B", i, j, u, v, "|", inputs_i, inputs_j, "|", inputs, weight, "->", inputs * weight)
base_img_embossed[i, j] += inputs * weight
#print(ms)
#print(base_img_embossed)
return np.clip((1-alpha) * img + alpha * base_img_embossed, 0, 255).astype(np.uint8)
def _allclose(a, b):
return np.max(a.astype(np.float32) - b.astype(np.float32)) <= 2.1
aug = iaa.Emboss(alpha=0, strength=1)
observed = aug.augment_image(base_img)
expected = base_img
assert _allclose(observed, expected)
aug = iaa.Emboss(alpha=1.0, strength=1)
observed = aug.augment_image(base_img)
expected = _compute_embossed_base_img(base_img, alpha=1.0, strength=1)
assert _allclose(observed, expected)
aug = iaa.Emboss(alpha=0.5, strength=1)
observed = aug.augment_image(base_img)
expected = _compute_embossed_base_img(base_img, alpha=0.5, strength=1)
assert _allclose(observed, expected.astype(np.uint8))
aug = iaa.Emboss(alpha=0.75, strength=1)
observed = aug.augment_image(base_img)
expected = _compute_embossed_base_img(base_img, alpha=0.75, strength=1)
assert _allclose(observed, expected)
aug = iaa.Emboss(alpha=iap.Choice([0.5, 1.0]), strength=1)
observed = aug.augment_image(base_img)
expected1 = _compute_embossed_base_img(base_img, alpha=0.5, strength=1)
expected2 = _compute_embossed_base_img(base_img, alpha=1.0, strength=1)
assert _allclose(observed, expected1) or np.allclose(observed, expected2)
got_exception = False
try:
aug = iaa.Emboss(alpha="test", strength=1)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
aug = iaa.Emboss(alpha=1.0, strength=2)
observed = aug.augment_image(base_img)
expected = _compute_embossed_base_img(base_img, alpha=1.0, strength=2)
assert _allclose(observed, expected)
aug = iaa.Emboss(alpha=1.0, strength=3)
observed = aug.augment_image(base_img)
expected = _compute_embossed_base_img(base_img, alpha=1.0, strength=3)
assert _allclose(observed, expected)
aug = iaa.Emboss(alpha=1.0, strength=6)
observed = aug.augment_image(base_img)
expected = _compute_embossed_base_img(base_img, alpha=1.0, strength=6)
assert _allclose(observed, expected)
aug = iaa.Emboss(alpha=1.0, strength=iap.Choice([1.0, 1.5]))
observed = aug.augment_image(base_img)
expected1 = _compute_embossed_base_img(base_img, alpha=1.0, strength=1.0)
expected2 = _compute_embossed_base_img(base_img, alpha=1.0, strength=1.5)
assert _allclose(observed, expected1) or np.allclose(observed, expected2)
got_exception = False
try:
aug = iaa.Emboss(alpha=1.0, strength="test")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_AdditiveGaussianNoise():
reseed()
#base_img = np.array([[128, 128, 128],
# [128, 128, 128],
# [128, 128, 128]], dtype=np.uint8)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
#base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no noise, shouldnt change anything
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
# zero-centered noise
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
# std correct?
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
# non-zero loc
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
# varying locs
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
# varying locs by stochastic param
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
# varying stds
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
# varying stds by stochastic param
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
#def test_MultiplicativeGaussianNoise():
# pass
#def test_ReplacingGaussianNoise():
# pass
def test_Dropout():
reseed()
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no dropout, shouldnt change anything
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
# 50% dropout
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
# test exception for wrong parameter datatype
got_exception = False
try:
aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_CoarseDropout():
reseed()
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
# test exception for bad parameters
got_exception = False
try:
aug = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_Multiply():
reseed()
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no multiply, shouldnt change anything
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# multiply >1.0
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
# multiply <1.0
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
# keypoints shouldnt be changed
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying multiply factors
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
# test channelwise
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_MultiplyElementwise():
reseed()
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no multiply, shouldnt change anything
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# multiply >1.0
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
# multiply <1.0
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
# keypoints shouldnt be changed
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying multiply factors
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
# values should change between pixels
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_ReplaceElementwise():
reseed()
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no replace, shouldnt change anything
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# replace at 100 percent prob., should change everything
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
"""
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
"""
# keypoints shouldnt be changed
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=1, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert params[0].p.value >= 1 - 1e-8
assert params[1].value == 2
assert params[2].value == 0
def test_SaltAndPepper():
reseed()
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
def test_CoarseSaltAndPepper():
reseed()
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_Salt():
reseed()
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed >= 127) # Salt() occasionally replaces with 127,
# which probably should be the center-point here anyways
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
def test_CoarseSalt():
reseed()
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_Pepper():
reseed()
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
def test_CoarsePepper():
reseed()
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
aug = iaa.CoarsePepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
aug = iaa.CoarsePepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.CoarsePepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.CoarsePepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_Add():
reseed()
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no add, shouldnt change anything
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# add > 0
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
# add < 0
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
# test other parameters
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
# keypoints shouldnt be changed
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying values
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_AddElementwise():
reseed()
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no add, shouldnt change anything
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# add > 0
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
# add < 0
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
# test other parameters
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
# keypoints shouldnt be changed
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying values
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
# values should change between pixels
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_Invert():
reseed()
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=zeros.shape)]
observed = iaa.Invert(p=1.0).augment_image(zeros + 255)
expected = zeros
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=0.0).augment_image(zeros + 255)
expected = zeros + 255
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200).augment_image(zeros + 200)
expected = zeros
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 200)
expected = zeros + 100
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 100)
expected = zeros + 200
assert np.array_equal(observed, expected)
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=0.8)
img = np.zeros((1, 1, 1), dtype=np.uint8) + 256
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=iap.Binomial(0.8))
img = np.zeros((1, 1, 1), dtype=np.uint8) + 256
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=0.5, per_channel=True)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 256
observed = aug.augment_image(img)
assert len(np.unique(observed)) == 2
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=iap.Binomial(0.8), per_channel=0.7)
img = np.zeros((1, 1, 20), dtype=np.uint8) + 256
seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) == 2:
seen[1] += 1
else:
assert False
assert 300 - 75 < seen[0] < 300 + 75
assert 700 - 75 < seen[1] < 700 + 75
# keypoints shouldnt be changed
aug = iaa.Invert(p=1.0)
aug_det = iaa.Invert(p=1.0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.Invert(p="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.Invert(p=0.5, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.Invert(p=1, per_channel=False, min_value=10, max_value=20)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].p.value == 1
assert params[1].value == 0
assert params[2] == 10
assert params[3] == 20
def test_ContrastNormalization():
reseed()
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=zeros.shape)]
# contrast stays the same
observed = iaa.ContrastNormalization(alpha=1.0).augment_image(zeros + 50)
expected = zeros + 50
assert np.array_equal(observed, expected)
# image with mean intensity (ie 128), contrast cannot be changed
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128)
expected = zeros + 128
assert np.array_equal(observed, expected)
# increase contrast
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128 + 10)
expected = zeros + 128 + 20
assert np.array_equal(observed, expected)
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128 - 10)
expected = zeros + 128 - 20
assert np.array_equal(observed, expected)
# decrease contrast
observed = iaa.ContrastNormalization(alpha=0.5).augment_image(zeros + 128 + 10)
expected = zeros + 128 + 5
assert np.array_equal(observed, expected)
observed = iaa.ContrastNormalization(alpha=0.5).augment_image(zeros + 128 - 10)
expected = zeros + 128 - 5
assert np.array_equal(observed, expected)
# increase contrast by stochastic parameter
observed = iaa.ContrastNormalization(alpha=iap.Choice([2.0, 3.0])).augment_image(zeros + 128 + 10)
expected1 = zeros + 128 + 20
expected2 = zeros + 128 + 30
assert np.array_equal(observed, expected1) or np.array_equal(observed, expected2)
# change contrast by tuple
nb_iterations = 1000
nb_changed = 0
last = None
for i in sm.xrange(nb_iterations):
observed = iaa.ContrastNormalization(alpha=(0.5, 2.0)).augment_image(zeros + 128 + 40)
if last is None:
last = observed
else:
if not np.array_equal(observed, last):
nb_changed += 1
p_changed = nb_changed / (nb_iterations-1)
assert p_changed > 0.5
# per_channel=True
aug = iaa.ContrastNormalization(alpha=(1.0, 6.0), per_channel=True)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 128 + 10
observed = aug.augment_image(img)
uq = np.unique(observed)
assert len(uq) > 5
# per_channel with probability
aug = iaa.ContrastNormalization(alpha=(1.0, 4.0), per_channel=0.7)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 128 + 10
seen = [0, 0]
for _ in sm.xrange(1000):
observed = aug.augment_image(img)
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) >= 2:
seen[1] += 1
assert 300 - 75 < seen[0] < 300 + 75
assert 700 - 75 < seen[1] < 700 + 75
# keypoints shouldnt be changed
aug = iaa.ContrastNormalization(alpha=2.0)
aug_det = iaa.ContrastNormalization(alpha=2.0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.ContrastNormalization(alpha="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.ContrastNormalization(alpha=1.5, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.ContrastNormalization(alpha=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_Affine():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in works with that
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)],
shape=image.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=0.765, y=0.765), ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235), ia.Keypoint(x=2.235, y=2.235)],
shape=image.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images_list = [image]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
aug = iaa.Affine(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage")
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage")
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage, order=ALL
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage", order=ia.ALL)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage", order=[0, 1, 3])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = cv2, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="cv2", order=[0, 1, 3])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = cv2, order=StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="cv2", order=iap.Choice([0, 1, 3]))
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
aug = iaa.Affine(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
aug = iaa.Affine(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
# ---------------------
# translate heatmaps
# ---------------------
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
])
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# rotate
# ---------------------
# rotate by 45 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# rotate by StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20), shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
# random rotation 0-364 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
pixels_sums_aug = np.copy(image).astype(np.int32) * 0
pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
#assert len(observed_aug[0].nonzero()[0]) == 1
#assert len(observed_aug_det[0].nonzero()[0]) == 1
pixels_sums_aug += (observed_aug[0] > 100)
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])
assert (pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))).all()
assert (pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))).all()
# ---------------------
# shear
# ---------------------
# TODO
# shear by StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
# ---------------------
# cval
# ---------------------
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=128)
aug_det = aug.to_deterministic()
image = np.ones((3, 3, 1), dtype=np.uint8) * 255
image_aug = np.copy(image)
images = np.array([image])
images_list = [image]
observed = aug.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
averages = []
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
averages.append(int(np.average(observed_aug)))
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
assert len(set(averages)) > 200
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
# ------------
# mode
# ------------
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode="edge")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "edge"
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=["constant", "edge"])
assert isinstance(aug.mode, iap.Choice)
assert len(aug.mode.a) == 2 and "constant" in aug.mode.a and "edge" in aug.mode.a
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=iap.Choice(["constant", "edge"]))
assert isinstance(aug.mode, iap.Choice)
assert len(aug.mode.a) == 2 and "constant" in aug.mode.a and "edge" in aug.mode.a
# ------------
# exceptions for bad inputs
# ------------
# scale
got_exception = False
try:
aug = iaa.Affine(scale=False)
except Exception:
got_exception = True
assert got_exception
# translate_px
got_exception = False
try:
aug = iaa.Affine(translate_px=False)
except Exception:
got_exception = True
assert got_exception
# translate_percent
got_exception = False
try:
aug = iaa.Affine(translate_percent=False)
except Exception:
got_exception = True
assert got_exception
# rotate
got_exception = False
try:
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=False, shear=0, cval=0)
except Exception:
got_exception = True
assert got_exception
# shear
got_exception = False
try:
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=False, cval=0)
except Exception:
got_exception = True
assert got_exception
# cval
got_exception = False
try:
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=None)
except Exception:
got_exception = True
assert got_exception
# mode
got_exception = False
try:
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=False)
except Exception:
got_exception = True
assert got_exception
# non-existent order in case of backend=cv2
got_exception = False
try:
aug = iaa.Affine(backend="cv2", order=-1)
except Exception:
got_exception = True
assert got_exception
# bad order datatype in case of backend=cv2
got_exception = False
try:
aug = iaa.Affine(backend="cv2", order="test")
except Exception:
got_exception = True
assert got_exception
# ----------
# get_parameters
# ----------
aug = iaa.Affine(scale=1, translate_px=2, rotate=3, shear=4, order=1, cval=0, mode="constant", backend="cv2")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
assert params[7] == "cv2" # backend
def test_AffineCv2():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.AffineCv2(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.AffineCv2(scale={"x": 1.75, "y": 1.0}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.AffineCv2(scale={"x": 1.0, "y": 1.75}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in works with that
aug = iaa.AffineCv2(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)],
shape=image.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=0.765, y=0.765), ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235), ia.Keypoint(x=2.235, y=2.235)],
shape=image.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.AffineCv2(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images_list = [image]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
aug = iaa.AffineCv2(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=ALL
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, order=ia.ALL)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=list
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, order=[0, 1, 2])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, order=iap.Choice([0, 1, 2]))
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0.3333, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0, "y": 0.3333}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
aug = iaa.AffineCv2(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
aug = iaa.AffineCv2(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
# ---------------------
# translate heatmaps
# ---------------------
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
])
aug = iaa.AffineCv2(translate_px={"x": 1})
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.AffineCv2(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.AffineCv2(translate_px={"x": 1}, mode="replicate", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# rotate
# ---------------------
# rotate by 45 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# rotate by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20), shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
# random rotation 0-364 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
pixels_sums_aug = np.copy(image).astype(np.int32) * 0
pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
#assert len(observed_aug[0].nonzero()[0]) == 1
#assert len(observed_aug_det[0].nonzero()[0]) == 1
pixels_sums_aug += (observed_aug[0] > 100)
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])
assert (pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))).all()
assert (pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))).all()
# ---------------------
# shear
# ---------------------
# TODO
# shear by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
# ---------------------
# cval
# ---------------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=128)
aug_det = aug.to_deterministic()
image = np.ones((3, 3, 1), dtype=np.uint8) * 255
image_aug = np.copy(image)
images = np.array([image])
images_list = [image]
observed = aug.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
# random cvals
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
averages = []
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
averages.append(int(np.average(observed_aug)))
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
assert len(set(averages)) > 200
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=ia.ALL)
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
# ------------
# mode
# ------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode="replicate")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "replicate"
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=["replicate", "reflect"])
assert isinstance(aug.mode, iap.Choice)
assert len(aug.mode.a) == 2 and "replicate" in aug.mode.a and "reflect" in aug.mode.a
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=iap.Choice(["replicate", "reflect"]))
assert isinstance(aug.mode, iap.Choice)
assert len(aug.mode.a) == 2 and "replicate" in aug.mode.a and "reflect" in aug.mode.a
# ------------
# exceptions for bad inputs
# ------------
# scale
got_exception = False
try:
aug = iaa.AffineCv2(scale=False)
except Exception:
got_exception = True
assert got_exception
# translate_px
got_exception = False
try:
aug = iaa.AffineCv2(translate_px=False)
except Exception:
got_exception = True
assert got_exception
# translate_percent
got_exception = False
try:
aug = iaa.AffineCv2(translate_percent=False)
except Exception:
got_exception = True
assert got_exception
# rotate
got_exception = False
try:
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=False, shear=0, cval=0)
except Exception:
got_exception = True
assert got_exception
# shear
got_exception = False
try:
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=False, cval=0)
except Exception:
got_exception = True
assert got_exception
# cval
got_exception = False
try:
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=None)
except Exception:
got_exception = True
assert got_exception
# mode
got_exception = False
try:
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0, cval=0, mode=False)
except Exception:
got_exception = True
assert got_exception
# non-existent order
got_exception = False
try:
aug = iaa.AffineCv2(order=-1)
except Exception:
got_exception = True
assert got_exception
# bad order datatype
got_exception = False
try:
aug = iaa.AffineCv2(order="test")
except Exception:
got_exception = True
assert got_exception
# ----------
# get_parameters
# ----------
aug = iaa.AffineCv2(scale=1, translate_px=2, rotate=3, shear=4, order=1, cval=0, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
def test_PiecewiseAffine():
reseed()
img = np.zeros((60, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
mask = img > 0
heatmaps = ia.HeatmapsOnImage((img / 255.0).astype(np.float32), shape=(60, 80, 3))
heatmaps_arr = heatmaps.get_arr()
# -----
# scale
# -----
# basic test
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_image(img)
assert 100.0 < np.average(observed[mask]) < np.average(img[mask])
assert 75.0 > np.average(observed[~mask]) > np.average(img[~mask])
# basic test, heatmaps
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert 100.0/255.0 < np.average(observed_arr[mask]) < np.average(heatmaps_arr[mask])
assert 75.0/255.0 > np.average(observed_arr[~mask]) > np.average(heatmaps_arr[~mask])
# scale 0
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_image(img)
assert np.array_equal(observed, img)
# scale 0, heatmaps
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed_arr, heatmaps_arr)
# stronger scale should lead to stronger changes
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_image(img)
observed2 = aug2.augment_image(img)
assert np.average(observed1[~mask]) < np.average(observed2[~mask])
# stronger scale should lead to stronger changes, heatmaps
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_heatmaps([heatmaps])[0]
observed1_arr = observed1.get_arr()
observed2 = aug2.augment_heatmaps([heatmaps])[0]
observed2_arr = observed2.get_arr()
assert observed1.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed1.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed1.max_value < heatmaps.max_value + 1e-6
assert observed2.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed2.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed2.max_value < heatmaps.max_value + 1e-6
assert np.average(observed1_arr[~mask]) < np.average(observed2_arr[~mask])
# scale as list
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)
assert isinstance(aug.scale, iap.Choice)
assert 0.01 - 1e-8 < aug.scale.a[0] < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.scale.a[1] < 0.10 + 1e-8
avg1 = np.average([np.average(aug1.augment_image(img) * (~mask).astype(np.float32)) for _ in sm.xrange(3)])
avg2 = np.average([np.average(aug2.augment_image(img) * (~mask).astype(np.float32)) for _ in sm.xrange(3)])
seen = [0, 0]
for _ in sm.xrange(15):
observed = aug.augment_image(img)
avg = np.average(observed * (~mask).astype(np.float32))
diff1 = abs(avg - avg1)
diff2 = abs(avg - avg2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# scale as tuple
aug = iaa.PiecewiseAffine(scale=(0.01, 0.10), nb_rows=12, nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
# scale as StochasticParameter
aug = iaa.PiecewiseAffine(scale=iap.Uniform(0.01, 0.10), nb_rows=12, nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
# bad datatype for scale
got_exception = False
try:
aug = iaa.PiecewiseAffine(scale=False, nb_rows=12, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# rows and cols
# -----
# verify effects of rows/cols
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)
std1 = []
std2 = []
for _ in sm.xrange(3):
observed1 = aug1.augment_image(img)
observed2 = aug2.augment_image(img)
grad_vert1 = observed1[1:, :].astype(np.float32) - observed1[:-1, :].astype(np.float32)
grad_vert2 = observed2[1:, :].astype(np.float32) - observed2[:-1, :].astype(np.float32)
grad_vert1 = grad_vert1 * (~mask[1:, :]).astype(np.float32)
grad_vert2 = grad_vert2 * (~mask[1:, :]).astype(np.float32)
std1.append(np.std(grad_vert1))
std2.append(np.std(grad_vert2))
std1 = np.average(std1)
std2 = np.average(std2)
assert std1 < std2
# -----
# rows
# -----
# rows as list
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)
assert isinstance(aug.nb_rows, iap.Choice)
assert aug.nb_rows.a[0] == 4
assert aug.nb_rows.a[1] == 20
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_image(img)
grad_vert = observed[1:, :].astype(np.float32) - observed[:-1, :].astype(np.float32)
grad_vert = grad_vert * (~mask[1:, :]).astype(np.float32)
std = np.std(grad_vert)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# rows as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=(4, 20), nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
# rows as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=iap.DiscreteUniform(4, 20), nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
# bad datatype for rows
got_exception = False
try:
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=False, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# nb_cols
# -----
# cols as list
img_cols = img.T
mask_cols = mask.T
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])
assert isinstance(aug.nb_cols, iap.Choice)
assert aug.nb_cols.a[0] == 4
assert aug.nb_cols.a[1] == 20
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=20)
std1 = []
std2 = []
for _ in sm.xrange(3):
observed1 = aug1.augment_image(img_cols)
observed2 = aug2.augment_image(img_cols)
grad_hori1 = observed1[:, 1:].astype(np.float32) - observed1[:, :-1].astype(np.float32)
grad_hori2 = observed2[:, 1:].astype(np.float32) - observed2[:, :-1].astype(np.float32)
grad_hori1 = grad_hori1 * (~mask_cols[:, 1:]).astype(np.float32)
grad_hori2 = grad_hori2 * (~mask_cols[:, 1:]).astype(np.float32)
std1.append(np.std(grad_hori1))
std2.append(np.std(grad_hori2))
std1 = np.average(std1)
std2 = np.average(std2)
seen = [0, 0]
for _ in sm.xrange(15):
observed = aug.augment_image(img_cols)
grad_hori = observed[:, 1:].astype(np.float32) - observed[:, :-1].astype(np.float32)
grad_hori = grad_hori * (~mask_cols[:, 1:]).astype(np.float32)
std = np.std(grad_hori)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# cols as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
# cols as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=iap.DiscreteUniform(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
# bad datatype for cols
got_exception = False
try:
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# order
# -----
# single int for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=0)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 0
# list for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=[0, 1, 3])
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
# StochasticParameter for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=iap.Choice([0, 1, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
# ALL for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3, 4, 5]])
# bad datatype for order
got_exception = False
try:
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# cval
# -----
# cval as deterministic
img = np.zeros((50, 50, 3), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10, mode="constant", cval=0)
observed = aug.augment_image(img)
assert np.sum([observed[:, :] == [0, 0, 0]]) > 0
# cval as deterministic, heatmaps should always use cval=0
heatmaps = ia.HeatmapsOnImage(np.zeros((50, 50, 1), dtype=np.float32), shape=(50, 50, 3))
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10, mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum([observed.get_arr()[:, :] >= 0.01]) == 0
# cval as list
img = np.zeros((20, 20), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5, mode="constant", cval=[0, 10])
assert isinstance(aug.cval, iap.Choice)
assert aug.cval.a[0] == 0
assert aug.cval.a[1] == 10
seen = [0, 0, 0]
for _ in sm.xrange(30):
observed = aug.augment_image(img)
nb_0 = np.sum([observed[:, :] == 0])
nb_10 = np.sum([observed[:, :] == 10])
if nb_0 > 0:
seen[0] += 1
elif nb_10 > 0:
seen[1] += 1
else:
seen[2] += 1
assert seen[0] > 5
assert seen[1] > 5
assert seen[2] <= 4
# cval as tuple
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode="constant", cval=(0, 10))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
# cval as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode="constant", cval=iap.DiscreteUniform(0, 10))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
# ALL as cval
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode="constant", cval=ia.ALL)
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
# bas datatype for cval
got_exception = False
try:
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# mode
# -----
# single string for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
# list for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=["nearest", "edge", "symmetric"])
assert isinstance(aug.mode, iap.Choice)
assert all([v in aug.mode.a for v in ["nearest", "edge", "symmetric"]])
# StochasticParameter for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=iap.Choice(["nearest", "edge", "symmetric"]))
assert isinstance(aug.mode, iap.Choice)
assert all([v in aug.mode.a for v in ["nearest", "edge", "symmetric"]])
# ALL for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([v in aug.mode.a for v in ["constant", "edge", "symmetric", "reflect", "wrap"]])
# bad datatype for mode
got_exception = False
try:
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# ---------
# keypoints
# ---------
# basic test
img = np.zeros((100, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
mask = img > 0
kps = [ia.Keypoint(x=10, y=20), ia.Keypoint(x=10, y=40),
ia.Keypoint(x=70, y=20), ia.Keypoint(x=70, y=40)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
aug_det = aug.to_deterministic()
observed_img = aug_det.augment_image(img)
observed_kpsoi = aug_det.augment_keypoints([kpsoi])
assert not keypoints_equal([kpsoi], observed_kpsoi)
for kp in observed_kpsoi[0].keypoints:
assert observed_img[int(kp.y), int(kp.x)] > 0
# scale 0
aug = iaa.PiecewiseAffine(scale=0, nb_rows=10, nb_cols=10)
observed = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], observed)
# keypoints outside of image
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
kps = [ia.Keypoint(x=-10, y=-20)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
observed = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], observed)
# ---------
# get_parameters
# ---------
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=10, order=1, cval=2, mode="nearest", absolute_scale=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert isinstance(params[5], iap.Deterministic)
assert params[6] == False
assert 0.1 - 1e-8 < params[0].value < 0.1 + 1e-8
assert params[1].value == 8
assert params[2].value == 10
assert params[3].value == 1
assert params[4].value == 2
assert params[5].value == "nearest"
def test_PerspectiveTransform():
reseed()
img = np.zeros((30, 30), dtype=np.uint8)
img[10:20, 10:20] = 255
heatmaps = ia.HeatmapsOnImage((img / 255.0).astype(np.float32), shape=img.shape)
# without keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(img)
expected = img[int(30*0.2):int(30*0.8), int(30*0.2):int(30*0.8)]
assert all([abs(s1-s2)<=1 for s1, s2 in zip(observed.shape, expected.shape)])
if observed.shape != expected.shape:
observed = ia.imresize_single_image(observed, expected.shape[0:2], interpolation="cubic")
# differences seem to mainly appear around the border of the inner rectangle, possibly
# due to interpolation
"""
from scipy import misc
misc.imshow(
np.hstack([
observed,
expected,
np.abs(observed.astype(np.int32) - expected.astype(np.int32)).astype(np.uint8)
])
)
print(np.average(np.abs(observed.astype(np.int32) - expected.astype(np.int32))))
"""
assert np.average(np.abs(observed.astype(np.int32) - expected.astype(np.int32))) < 30.0
# with keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(img)
expected = img[int(30*0.2):int(30*0.8), int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(expected, img.shape[0:2], interpolation="cubic")
assert observed.shape == img.shape
# differences seem to mainly appear around the border of the inner rectangle, possibly
# due to interpolation
assert np.average(np.abs(observed.astype(np.int32) - expected.astype(np.int32))) < 30.0
#expected = ia.imresize_single_image(expected, (30, 30))
# with keep_size, heatmaps
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_heatmaps([heatmaps])[0]
expected = heatmaps.get_arr()[int(30*0.2):int(30*0.8), int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image((expected*255).astype(np.uint8), img.shape[0:2], interpolation="cubic")
expected = (expected / 255.0).astype(np.float32)
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
# differences seem to mainly appear around the border of the inner rectangle, possibly
# due to interpolation
assert np.average(np.abs(observed.get_arr() - expected)) < 30.0
#expected = ia.imresize_single_image(expected, (30, 30))
# with keep_size, RGB images
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
imgs = np.tile(img[np.newaxis, :, :, np.newaxis], (2, 1, 1, 3))
observed = aug.augment_images(imgs)
for img_idx in sm.xrange(2):
for c in sm.xrange(3):
observed_i = observed[img_idx, :, :, c]
expected = imgs[img_idx, int(30*0.2):int(30*0.8), int(30*0.2):int(30*0.8), c]
expected = ia.imresize_single_image(expected, imgs.shape[1:3], interpolation="cubic")
assert observed_i.shape == imgs.shape[1:3]
# differences seem to mainly appear around the border of the inner rectangle, possibly
# due to interpolation
assert np.average(np.abs(observed_i.astype(np.int32) - expected.astype(np.int32))) < 30.0
#expected = ia.imresize_single_image(expected, (30, 30))
# tuple for scale
aug = iaa.PerspectiveTransform(scale=(0.1, 0.2))
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.1 - 1e-8 < aug.jitter.scale.a.value < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.b.value < 0.2 + 1e-8
# list for scale
aug = iaa.PerspectiveTransform(scale=[0.1, 0.2, 0.3])
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
# StochasticParameter for scale
aug = iaa.PerspectiveTransform(scale=iap.Choice([0.1, 0.2, 0.3]))
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
# bad datatype for scale
got_exception = False
try:
aug = iaa.PerspectiveTransform(scale=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# keypoint augmentation without keep_size
# TODO deviations of around 0.4-0.7 in this and the next test (between expected and observed
# coordinates) -- why?
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=10-0.2*30, y=10-0.2*30),
ia.Keypoint(x=14-0.2*30, y=11-0.2*30)
]
for kp_observed, kp_expected in zip(observed[0].keypoints, kps_expected):
assert kp_expected.x - 1.5 < kp_observed.x < kp_expected.x + 1.5
assert kp_expected.y - 1.5 < kp_observed.y < kp_expected.y + 1.5
# keypoint augmentation with keep_size
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=((10-0.2*30)/(30*0.6))*30, y=((10-0.2*30)/(30*0.6))*30),
ia.Keypoint(x=((14-0.2*30)/(30*0.6))*30, y=((11-0.2*30)/(30*0.6))*30)
]
for kp_observed, kp_expected in zip(observed[0].keypoints, kps_expected):
assert kp_expected.x - 1.5 < kp_observed.x < kp_expected.x + 1.5
assert kp_expected.y - 1.5 < kp_observed.y < kp_expected.y + 1.5
# get_parameters
aug = iaa.PerspectiveTransform(scale=0.1, keep_size=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Normal)
assert isinstance(params[0].scale, iap.Deterministic)
assert 0.1 - 1e-8 < params[0].scale.value < 0.1 + 1e-8
assert params[1] == False
def test_ElasticTransformation():
reseed()
img = np.zeros((50, 50), dtype=np.uint8) + 255
img = np.pad(img, ((100, 100), (100, 100)), mode="constant", constant_values=0)
mask = img > 0
heatmaps = ia.HeatmapsOnImage((img / 255.0).astype(np.float32), shape=img.shape)
# test basic funtionality
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_image(img)
# assume that some white/255 pixels have been moved away from the center and replaced by black/0 pixels
assert np.sum(observed[mask]) < np.sum(img[mask])
# assume that some black/0 pixels have been moved away from the outer area and replaced by white/255 pixels
assert np.sum(observed[~mask]) > np.sum(img[~mask])
# test basic funtionality, heatmaps
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.sum(observed.get_arr()[mask]) < np.sum(heatmaps.get_arr()[mask])
assert np.sum(observed.get_arr()[~mask]) > np.sum(heatmaps.get_arr()[~mask])
# test effects of increased alpha strength
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_image(img)
observed2 = aug2.augment_image(img)
# assume that the inner area has become more black-ish when using high alphas (more white pixels were moved out of the inner area)
assert np.sum(observed1[mask]) > np.sum(observed2[mask])
# assume that the outer area has become more white-ish when using high alphas (more black pixels were moved into the inner area)
assert np.sum(observed1[~mask]) < np.sum(observed2[~mask])
# test effects of increased alpha strength, heatmaps
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_heatmaps([heatmaps])[0]
observed2 = aug2.augment_heatmaps([heatmaps])[0]
assert observed1.shape == heatmaps.shape
assert observed2.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed1.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed1.max_value < heatmaps.max_value + 1e-6
assert heatmaps.min_value - 1e-6 < observed2.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed2.max_value < heatmaps.max_value + 1e-6
assert np.sum(observed1.get_arr()[mask]) > np.sum(observed2.get_arr()[mask])
assert np.sum(observed1.get_arr()[~mask]) < np.sum(observed2.get_arr()[~mask])
# test effectsof increased sigmas
aug1 = iaa.ElasticTransformation(alpha=3.0, sigma=0.1)
aug2 = iaa.ElasticTransformation(alpha=3.0, sigma=3.0)
observed1 = aug1.augment_image(img)
observed2 = aug2.augment_image(img)
observed1_std_hori = np.std(observed1.astype(np.float32)[:, 1:] - observed1.astype(np.float32)[:, :-1])
observed2_std_hori = np.std(observed2.astype(np.float32)[:, 1:] - observed2.astype(np.float32)[:, :-1])
observed1_std_vert = np.std(observed1.astype(np.float32)[1:, :] - observed1.astype(np.float32)[:-1, :])
observed2_std_vert = np.std(observed2.astype(np.float32)[1:, :] - observed2.astype(np.float32)[:-1, :])
observed1_std = (observed1_std_hori + observed1_std_vert) / 2
observed2_std = (observed2_std_hori + observed2_std_vert) / 2
assert observed1_std > observed2_std
# test alpha being iap.Choice
aug = iaa.ElasticTransformation(alpha=iap.Choice([0.001, 5.0]), sigma=0.25)
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(img)
diff = np.average(np.abs(img.astype(np.float32) - observed.astype(np.float32)))
if diff < 1.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
# test alpha being tuple
aug = iaa.ElasticTransformation(alpha=(1.0, 2.0), sigma=0.25)
assert isinstance(aug.alpha, iap.Uniform)
assert isinstance(aug.alpha.a, iap.Deterministic)
assert isinstance(aug.alpha.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.alpha.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.alpha.b.value < 2.0 + 1e-8
# test alpha having bad datatype
got_exception = False
try:
aug = iaa.ElasticTransformation(alpha=False, sigma=0.25)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# test sigma being iap.Choice
aug = iaa.ElasticTransformation(alpha=3.0, sigma=iap.Choice([0.01, 5.0]))
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(img)
observed_std_hori = np.std(observed.astype(np.float32)[:, 1:] - observed.astype(np.float32)[:, :-1])
observed_std_vert = np.std(observed.astype(np.float32)[1:, :] - observed.astype(np.float32)[:-1, :])
observed_std = (observed_std_hori + observed_std_vert) / 2
if observed_std > 10.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
# test sigma being tuple
aug = iaa.ElasticTransformation(alpha=0.25, sigma=(1.0, 2.0))
assert isinstance(aug.sigma, iap.Uniform)
assert isinstance(aug.sigma.a, iap.Deterministic)
assert isinstance(aug.sigma.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.sigma.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.sigma.b.value < 2.0 + 1e-8
# test sigma having bad datatype
got_exception = False
try:
aug = iaa.ElasticTransformation(alpha=0.25, sigma=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# order
# no proper tests here, because unclear how to test
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3, 4, 5]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=1)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 1
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=[0, 1, 2])
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=iap.Choice([0, 1, 2, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3]])
got_exception = False
try:
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# cval
# few proper tests here, because unclear how to test
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=ia.ALL)
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=128)
assert isinstance(aug.cval, iap.Deterministic)
assert aug.cval.value == 128
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=(128, 255))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 128
assert aug.cval.b.value == 255
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=[16, 32, 64])
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=iap.Choice([16, 32, 64]))
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant", cval=255)
img = np.zeros((50, 50), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) > 0
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant", cval=0)
img = np.zeros((50, 50), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) == 0
got_exception = False
try:
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# cval with heatmaps
heatmaps = ia.HeatmapsOnImage(np.zeros((32, 32, 1), dtype=np.float32), shape=(32, 32, 3))
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.sum(observed.get_arr() > 0.01) == 0
# mode
# no proper tests here, because unclear how to test
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest", "reflect", "wrap"]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=["constant", "nearest"])
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=iap.Choice(["constant", "nearest"]))
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
got_exception = False
try:
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# keypoints
# currently shouldnt change
kps = [ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4)]
kpsoi = ia.KeypointsOnImage(kps, shape=(10, 10))
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], observed)
# get_parameters()
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=2, cval=10, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert 0.25 - 1e-8 < params[0].value < 0.25 + 1e-8
assert 1.0 - 1e-8 < params[1].value < 1.0 + 1e-8
assert params[2].value == 2
assert params[3].value == 10
assert params[4].value == "constant"
def test_copy_dtypes_for_restore():
# TODO using dtype=np.bool is causing this to fail as it ends up being <type bool> instead of
# <type 'numpy.bool_'>. Any problems from that for the library?
images = [
np.zeros((1, 1, 3), dtype=np.uint8),
np.zeros((10, 16, 3), dtype=np.float32),
np.zeros((20, 10, 6), dtype=np.int32)
]
dtypes_copy = iaa.copy_dtypes_for_restore(images, force_list=False)
assert all([dtype_i.type == dtype_j for dtype_i, dtype_j in zip(dtypes_copy, [np.uint8, np.float32, np.int32])])
dts = [np.uint8, np.float32, np.int32]
for dt in dts:
images = np.zeros((10, 16, 32, 3), dtype=dt)
dtypes_copy = iaa.copy_dtypes_for_restore(images)
assert isinstance(dtypes_copy, np.dtype)
assert dtypes_copy.type == dt
dtypes_copy = iaa.copy_dtypes_for_restore(images, force_list=True)
assert isinstance(dtypes_copy, list)
assert all([dtype_i.type == dt for dtype_i in dtypes_copy])
def test_restore_augmented_image_dtype_():
image = np.zeros((16, 32, 3), dtype=np.uint8)
image_result = iaa.restore_augmented_image_dtype_(image, np.int32)
assert image_result.dtype.type == np.int32
def test_restore_augmented_image_dtype():
image = np.zeros((16, 32, 3), dtype=np.uint8)
image_result = iaa.restore_augmented_image_dtype(image, np.int32)
assert image_result.dtype.type == np.int32
def test_restore_augmented_images_dtypes_():
images = np.zeros((10, 16, 32, 3), dtype=np.int32)
dtypes = iaa.copy_dtypes_for_restore(images)
images = images.astype(np.uint8)
assert images.dtype.type == np.uint8
images_result = iaa.restore_augmented_images_dtypes_(images, dtypes)
assert images_result.dtype.type == np.int32
images = [np.zeros((16, 32, 3), dtype=np.int32) for _ in sm.xrange(10)]
dtypes = iaa.copy_dtypes_for_restore(images)
images = [image.astype(np.uint8) for image in images]
assert all([image.dtype.type == np.uint8 for image in images])
images_result = iaa.restore_augmented_images_dtypes_(images, dtypes)
assert all([image_result.dtype.type == np.int32 for image_result in images_result])
def test_restore_augmented_images_dtypes():
images = np.zeros((10, 16, 32, 3), dtype=np.int32)
dtypes = iaa.copy_dtypes_for_restore(images)
images = images.astype(np.uint8)
assert images.dtype.type == np.uint8
images_restored = iaa.restore_augmented_images_dtypes(images, dtypes)
assert images_restored.dtype.type == np.int32
images = [np.zeros((16, 32, 3), dtype=np.int32) for _ in sm.xrange(10)]
dtypes = iaa.copy_dtypes_for_restore(images)
images = [image.astype(np.uint8) for image in images]
assert all([image.dtype.type == np.uint8 for image in images])
images_restored = iaa.restore_augmented_images_dtypes(images, dtypes)
assert all([image_restored.dtype.type == np.int32 for image_restored in images_restored])
def test_clip_augmented_image_():
image = np.zeros((1, 3), dtype=np.uint8)
image[0, 0] = 10
image[0, 1] = 20
image[0, 2] = 30
image_clipped = iaa.clip_augmented_image_(image, min_value=15, max_value=25)
assert image_clipped[0, 0] == 15
assert image_clipped[0, 1] == 20
assert image_clipped[0, 2] == 25
def test_clip_augmented_image():
image = np.zeros((1, 3), dtype=np.uint8)
image[0, 0] = 10
image[0, 1] = 20
image[0, 2] = 30
image_clipped = iaa.clip_augmented_image(image, min_value=15, max_value=25)
assert image_clipped[0, 0] == 15
assert image_clipped[0, 1] == 20
assert image_clipped[0, 2] == 25
def test_clip_augmented_images_():
images = np.zeros((2, 1, 3), dtype=np.uint8)
images[:, 0, 0] = 10
images[:, 0, 1] = 20
images[:, 0, 2] = 30
images_clipped = iaa.clip_augmented_images_(images, min_value=15, max_value=25)
assert np.all(images_clipped[:, 0, 0] == 15)
assert np.all(images_clipped[:, 0, 1] == 20)
assert np.all(images_clipped[:, 0, 2] == 25)
images = [np.zeros((1, 3), dtype=np.uint8) for _ in sm.xrange(2)]
for i in sm.xrange(len(images)):
images[i][0, 0] = 10
images[i][0, 1] = 20
images[i][0, 2] = 30
images_clipped = iaa.clip_augmented_images_(images, min_value=15, max_value=25)
assert isinstance(images_clipped, list)
assert all([images_clipped[i][0, 0] == 15 for i in sm.xrange(len(images))])
assert all([images_clipped[i][0, 1] == 20 for i in sm.xrange(len(images))])
assert all([images_clipped[i][0, 2] == 25 for i in sm.xrange(len(images))])
def test_clip_augmented_images():
images = np.zeros((2, 1, 3), dtype=np.uint8)
images[:, 0, 0] = 10
images[:, 0, 1] = 20
images[:, 0, 2] = 30
images_clipped = iaa.clip_augmented_images(images, min_value=15, max_value=25)
assert np.all(images_clipped[:, 0, 0] == 15)
assert np.all(images_clipped[:, 0, 1] == 20)
assert np.all(images_clipped[:, 0, 2] == 25)
images = [np.zeros((1, 3), dtype=np.uint8) for _ in sm.xrange(2)]
for i in sm.xrange(len(images)):
images[i][0, 0] = 10
images[i][0, 1] = 20
images[i][0, 2] = 30
images_clipped = iaa.clip_augmented_images(images, min_value=15, max_value=25)
assert isinstance(images_clipped, list)
assert all([images_clipped[i][0, 0] == 15 for i in sm.xrange(len(images))])
assert all([images_clipped[i][0, 1] == 20 for i in sm.xrange(len(images))])
assert all([images_clipped[i][0, 2] == 25 for i in sm.xrange(len(images))])
def test_Augmenter():
reseed()
class DummyAugmenter(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return []
# --------
# __init__
# --------
# TODO incomplete tests, handle only cases that were missing in code coverage report
aug = DummyAugmenter()
assert aug.random_state == ia.CURRENT_RANDOM_STATE
aug = DummyAugmenter(deterministic=True)
assert aug.random_state != ia.CURRENT_RANDOM_STATE
rs = np.random.RandomState(123)
aug = DummyAugmenter(random_state=rs)
assert aug.random_state == rs
aug = DummyAugmenter(random_state=123)
assert aug.random_state.randint(0, 10**6) == np.random.RandomState(123).randint(0, 10**6)
# --------
# augment_batches
# --------
# TODO incomplete tests, handle only cases that were missing in code coverage report
aug = DummyAugmenter()
batches_aug = list(aug.augment_batches([[]]))
assert isinstance(batches_aug, list)
assert len(batches_aug) == 1
assert isinstance(batches_aug[0], list)
aug = DummyAugmenter()
image_batches = [np.zeros((1, 2, 2, 3), dtype=np.uint8)]
batches_aug = list(aug.augment_batches(image_batches))
assert isinstance(batches_aug, list)
assert len(batches_aug) == 1
assert array_equal_lists(batches_aug, image_batches)
aug = DummyAugmenter()
image_batches = [[np.zeros((2, 2, 3), dtype=np.uint8), np.zeros((2, 3, 3))]]
batches_aug = list(aug.augment_batches(image_batches))
assert isinstance(batches_aug, list)
assert len(batches_aug) == 1
assert array_equal_lists(batches_aug[0], image_batches[0])
aug = DummyAugmenter()
got_exception = False
try:
batches_aug = list(aug.augment_batches(None))
except Exception:
got_exception = True
assert got_exception
aug = DummyAugmenter()
got_exception = False
try:
batches_aug = list(aug.augment_batches([None]))
except Exception as exc:
got_exception = True
assert "Unknown datatype of batch" in str(exc)
assert got_exception
aug = DummyAugmenter()
got_exception = False
try:
batches_aug = list(aug.augment_batches([[None]]))
except Exception as exc:
got_exception = True
assert "Unknown datatype in batch[0]" in str(exc)
assert got_exception
# --------
# augment_images
# --------
# TODO incomplete tests, handle only cases that were missing in code coverage report
aug = DummyAugmenter()
with warnings.catch_warnings(record=True) as caught_warnings:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
images_aug = aug.augment_images(np.zeros((16, 32, 3), dtype=np.uint8))
# Verify some things
assert len(caught_warnings) == 1
assert "indicates that you provided a single image with shape (H, W, C)" in str(caught_warnings[-1].message)
aug = DummyAugmenter()
got_exception = False
try:
images_aug = aug.augment_images(None)
except Exception:
got_exception = True
assert got_exception
# behaviour when getting arrays as input and lists as output of augmenter
aug = iaa.Crop(((1, 8), (1, 8), (1, 8), (1, 8)), keep_size=False)
images = np.zeros((16, 64, 64, 3), dtype=np.uint8)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_images(images)
if ia.is_np_array(observed):
seen[0] += 1
else:
seen[1] += 1
assert all([image.ndim == 3 and 48 <= image.shape[0] <= 62 and 48 <= image.shape[1] <= 62 and image.shape[2] == 3 for image in observed])
assert seen[0] <= 3
assert seen[1] >= 17
# same as above but image's channel axis is now 1
aug = iaa.Crop(((1, 8), (1, 8), (1, 8), (1, 8)), keep_size=False)
images = np.zeros((16, 64, 64, 1), dtype=np.uint8)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_images(images)
if ia.is_np_array(observed):
seen[0] += 1
else:
seen[1] += 1
assert all([image.ndim == 3 and 48 <= image.shape[0] <= 62 and 48 <= image.shape[1] <= 62 and image.shape[2] == 1 for image in observed])
assert seen[0] <= 3
assert seen[1] >= 17
# same as above but now with 2D images
aug = iaa.Crop(((1, 8), (1, 8), (1, 8), (1, 8)), keep_size=False)
images = np.zeros((16, 64, 64), dtype=np.uint8)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_images(images)
if ia.is_np_array(observed):
seen[0] += 1
else:
seen[1] += 1
assert all([image.ndim == 2 and 48 <= image.shape[0] <= 62 and 48 <= image.shape[1] <= 62 for image in observed])
assert seen[0] <= 3
assert seen[1] >= 17
# same as above but image's channel axis now varies between [None, 1, 3, 4, 9]
aug = iaa.Crop(((1, 8), (1, 8), (1, 8), (1, 8)), keep_size=False)
seen = [0, 0]
for _ in sm.xrange(20):
channels = np.random.choice([None, 1, 3, 4, 9], size=(16,))
images = [np.zeros((64, 64), dtype=np.uint8) if c is None else np.zeros((64, 64, c), dtype=np.uint8) for c in channels]
observed = aug.augment_images(images)
if ia.is_np_array(observed):
seen[0] += 1
else:
seen[1] += 1
for image, c in zip(observed, channels):
if c is None:
assert image.ndim == 2
else:
assert image.ndim == 3
assert image.shape[2] == c
assert 48 <= image.shape[0] <= 62
assert 48 <= image.shape[1] <= 62
assert seen[0] == 0
assert seen[1] == 20
# --------
# _augment_images
# --------
# TODO incomplete tests, handle only cases that were missing in code coverage report
class DummyAugmenterCallsParent(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return super(DummyAugmenterCallsParent, self)._augment_images(images, random_state, parents, hooks)
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return super(DummyAugmenterCallsParent, self)._augment_heatmaps(heatmaps, random_state, parents, hooks)
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return super(DummyAugmenterCallsParent, self)._augment_keypoints(keypoints_on_images, random_state, parents, hooks)
def get_parameters(self):
return super(DummyAugmenterCallsParent, self).get_parameters()
aug = DummyAugmenterCallsParent()
got_exception = False
try:
images_aug = aug.augment_images(np.zeros((2, 4, 4, 3), dtype=np.uint8))
except NotImplementedError:
got_exception = True
assert got_exception
# --------
# _augment_heatmaps
# --------
# TODO incomplete tests, handle only cases that were missing in code coverage report
heatmaps = ia.HeatmapsOnImage(np.zeros((3, 3, 1), dtype=np.float32), shape=(3, 3, 3))
got_exception = False
try:
heatmaps_aug = aug.augment_heatmaps([heatmaps])
except NotImplementedError:
got_exception = True
assert got_exception
# --------
# _augment_keypoints
# --------
# TODO incomplete tests, handle only cases that were missing in code coverage report
aug = DummyAugmenterCallsParent()
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)], shape=(4, 4, 3))]
got_exception = False
try:
keypoints_aug = aug.augment_keypoints(keypoints)
except NotImplementedError:
got_exception = True
assert got_exception
# --------
# augment_bounding_boxes
# --------
class DummyAugmenterBBs(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return [keypoints_on_images_i.shift(x=1) for keypoints_on_images_i in keypoints_on_images]
def get_parameters(self):
return []
aug = DummyAugmenterBBs()
bb = ia.BoundingBox(x1=1, y1=4, x2=2, y2=5)
bbs = [bb]
bbsois = [ia.BoundingBoxesOnImage(bbs, shape=(10, 10, 3))]
bbsois_aug = aug.augment_bounding_boxes(bbsois)
bb_aug = bbsois_aug[0].bounding_boxes[0]
assert bb_aug.x1 == 1+1
assert bb_aug.y1 == 4
assert bb_aug.x2 == 2+1
assert bb_aug.y2 == 5
# --------
# draw_grid
# --------
aug = DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
# list, shape (3, 3, 3)
grid = aug.draw_grid([image], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
# list, shape (3, 3)
grid = aug.draw_grid([image[..., 0]], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image[..., 0:1], image[..., 0:1]]),
np.hstack([image[..., 0:1], image[..., 0:1]])
])
grid_expected = np.tile(grid_expected, (1, 1, 3))
assert np.array_equal(grid, grid_expected)
# list, shape (2,)
got_exception = False
try:
grid = aug.draw_grid([np.zeros((2,), dtype=np.uint8)], rows=2, cols=2)
except Exception:
got_exception = True
assert got_exception
# array, shape (1, 3, 3, 3)
grid = aug.draw_grid(np.uint8([image]), rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
# array, shape (3, 3, 3)
grid = aug.draw_grid(image, rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
# array, shape (3, 3)
grid = aug.draw_grid(image[..., 0], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image[..., 0:1], image[..., 0:1]]),
np.hstack([image[..., 0:1], image[..., 0:1]])
])
grid_expected = np.tile(grid_expected, (1, 1, 3))
assert np.array_equal(grid, grid_expected)
# array, shape (2,)
got_exception = False
try:
grid = aug.draw_grid(np.zeros((2,), dtype=np.uint8), rows=2, cols=2)
except Exception:
got_exception = True
assert got_exception
# --------
# localize_random_state
# --------
aug = DummyAugmenter()
assert aug.random_state == ia.CURRENT_RANDOM_STATE
aug_localized = aug.localize_random_state()
assert aug_localized.random_state != ia.CURRENT_RANDOM_STATE
# --------
# reseed
# --------
def _same_rs(rs1, rs2):
rs1_copy = copy.deepcopy(rs1)
rs2_copy = copy.deepcopy(rs2)
rnd1 = rs1_copy.randint(0, 10**6)
rnd2 = rs2_copy.randint(0, 10**6)
return rnd1 == rnd2
aug1 = DummyAugmenter()
aug2 = DummyAugmenter(deterministic=True)
aug0 = iaa.Sequential([aug1, aug2])
aug0_copy = aug0.deepcopy()
assert _same_rs(aug0.random_state, aug0_copy.random_state)
assert _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy.reseed()
assert not _same_rs(aug0.random_state, aug0_copy.random_state)
assert not _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy = aug0.deepcopy()
assert _same_rs(aug0.random_state, aug0_copy.random_state)
assert _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy.reseed(deterministic_too=True)
assert not _same_rs(aug0.random_state, aug0_copy.random_state)
assert not _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert not _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy = aug0.deepcopy()
assert _same_rs(aug0.random_state, aug0_copy.random_state)
assert _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy.reseed(random_state=123)
assert not _same_rs(aug0.random_state, aug0_copy.random_state)
assert not _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
assert aug0_copy.random_state.randint(0, 10**6) == np.random.RandomState(np.random.RandomState(123).randint(0, 10**6)).randint(0, 10**6)
aug0_copy = aug0.deepcopy()
assert _same_rs(aug0.random_state, aug0_copy.random_state)
assert _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy.reseed(random_state=np.random.RandomState(123))
assert not _same_rs(aug0.random_state, aug0_copy.random_state)
assert not _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
assert aug0_copy.random_state.randint(0, 10**6) == np.random.RandomState(np.random.RandomState(123).randint(0, 10**6)).randint(0, 10**6)
# --------
# get_parameters
# --------
aug = DummyAugmenterCallsParent()
got_exception = False
try:
aug.get_parameters()
except NotImplementedError:
got_exception = True
assert got_exception
# --------
# get_all_children
# --------
aug1 = DummyAugmenter()
aug21 = DummyAugmenter()
aug2 = iaa.Sequential([aug21])
aug0 = iaa.Sequential([aug1, aug2])
children = aug0.get_all_children(flat=True)
assert isinstance(children, list)
assert children[0] == aug1
assert children[1] == aug2
assert children[2] == aug21
children = aug0.get_all_children(flat=False)
assert isinstance(children, list)
assert children[0] == aug1
assert children[1] == aug2
assert isinstance(children[2], list)
assert children[2][0] == aug21
# --------
# __repr__, __str__
# --------
class DummyAugmenterRepr(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return ["A", "B", "C"]
aug = DummyAugmenterRepr(name="Example")
assert aug.__repr__() == aug.__str__() == "DummyAugmenterRepr(name=Example, parameters=[A, B, C], deterministic=False)"
aug = DummyAugmenterRepr(name="Example", deterministic=True)
assert aug.__repr__() == aug.__str__() == "DummyAugmenterRepr(name=Example, parameters=[A, B, C], deterministic=True)"
def test_Augmenter_find():
reseed()
noop1 = iaa.Noop(name="Noop")
fliplr = iaa.Fliplr(name="Fliplr")
flipud = iaa.Flipud(name="Flipud")
noop2 = iaa.Noop(name="Noop2")
seq2 = iaa.Sequential([flipud, noop2], name="Seq2")
seq1 = iaa.Sequential([noop1, fliplr, seq2], name="Seq")
augs = seq1.find_augmenters_by_name("Seq")
assert len(augs) == 1
assert augs[0] == seq1
augs = seq1.find_augmenters_by_name("Seq2")
assert len(augs) == 1
assert augs[0] == seq2
augs = seq1.find_augmenters_by_names(["Seq", "Seq2"])
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
augs = seq1.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
augs = seq1.find_augmenters(lambda aug, parents: aug.name in ["Seq", "Seq2"])
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
augs = seq1.find_augmenters(lambda aug, parents: aug.name in ["Seq", "Seq2"] and len(parents) > 0)
assert len(augs) == 1
assert augs[0] == seq2
augs = seq1.find_augmenters(lambda aug, parents: aug.name in ["Seq", "Seq2"], flat=False)
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == [seq2]
def test_Augmenter_remove():
reseed()
def get_seq():
noop1 = iaa.Noop(name="Noop")
fliplr = iaa.Fliplr(name="Fliplr")
flipud = iaa.Flipud(name="Flipud")
noop2 = iaa.Noop(name="Noop2")
seq2 = iaa.Sequential([flipud, noop2], name="Seq2")
seq1 = iaa.Sequential([noop1, fliplr, seq2], name="Seq")
return seq1
augs = get_seq()
augs = augs.remove_augmenters(lambda aug, parents: aug.name == "Seq2")
seqs = augs.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(seqs) == 1
assert seqs[0].name == "Seq"
augs = get_seq()
augs = augs.remove_augmenters(lambda aug, parents: aug.name == "Seq2" and len(parents) == 0)
seqs = augs.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(seqs) == 2
assert seqs[0].name == "Seq"
assert seqs[1].name == "Seq2"
augs = get_seq()
augs = augs.remove_augmenters(lambda aug, parents: True)
assert augs is not None
assert isinstance(augs, iaa.Noop)
augs = get_seq()
got_exception = False
try:
augs = augs.remove_augmenters(lambda aug, parents: aug.name == "Seq", copy=False)
except Exception as exc:
got_exception = True
assert "Inplace removal of topmost augmenter requested, which is currently not possible" in str(exc)
assert got_exception
augs = get_seq()
augs = augs.remove_augmenters(lambda aug, parents: True, noop_if_topmost=False)
assert augs is None
def test_Augmenter_hooks():
# TODO these tests change the input type from list to array. Might be reasnoable to change
# and test that scenario separetely
reseed()
image = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
image_lr = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8)
image_ud = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8)
image_lrud = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8)
image = image[:, :, np.newaxis]
image_lr = image_lr[:, :, np.newaxis]
image_ud = image_ud[:, :, np.newaxis]
image_lrud = image_lrud[:, :, np.newaxis]
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
# preprocessing
def preprocessor(images, augmenter, parents):
img = np.copy(images)
img[0][1, 1, 0] += 1
return img
hooks = ia.HooksImages(preprocessor=preprocessor)
images_aug = seq.augment_images([image], hooks=hooks)
expected = np.copy(image_lrud)
expected[1, 1, 0] = 3
assert np.array_equal(images_aug[0], expected)
# postprocessing
def postprocessor(images, augmenter, parents):
img = np.copy(images)
img[0][1, 1, 0] += 1
return img
hooks = ia.HooksImages(postprocessor=postprocessor)
images_aug = seq.augment_images([image], hooks=hooks)
expected = np.copy(image_lrud)
expected[1, 1, 0] = 3
assert np.array_equal(images_aug[0], expected)
# propagating
def propagator(images, augmenter, parents, default):
if "Seq" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(propagator=propagator)
images_aug = seq.augment_images([image], hooks=hooks)
assert np.array_equal(images_aug[0], image)
# activation
def activator(images, augmenter, parents, default):
if "Flipud" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(activator=activator)
images_aug = seq.augment_images([image], hooks=hooks)
assert np.array_equal(images_aug[0], image_lr)
# keypoint aug deactivated
aug = iaa.Affine(translate_px=1)
def activator(keypoints_on_images, augmenter, parents, default):
return False
hooks = ia.HooksKeypoints(activator=activator)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)], shape=image.shape)]
keypoints_aug = seq.augment_keypoints(keypoints, hooks=hooks)
assert keypoints_equal(keypoints_aug, keypoints)
def test_Augmenter_copy_random_state():
image = ia.quokka_square(size=(128, 128))
images = np.array([image] * 64, dtype=np.uint8)
source = iaa.Sequential([
iaa.Fliplr(0.5, name="hflip"),
iaa.Dropout(0.05, name="dropout"),
iaa.Affine(translate_px=(-10, 10), name="translate", random_state=3),
iaa.GaussianBlur(1.0, name="blur", random_state=4)
], random_state=5)
target = iaa.Sequential([
iaa.Fliplr(0.5, name="hflip"),
iaa.Dropout(0.05, name="dropout"),
iaa.Affine(translate_px=(-10, 10), name="translate")
])
source.localize_random_state_()
target_cprs = target.copy_random_state(source, matching="position")
source_alt = source.remove_augmenters(lambda aug, parents: aug.name == "blur")
images_aug_source = source_alt.augment_images(images)
images_aug_target = target_cprs.augment_images(images)
#misc.imshow(np.hstack([images_aug_source[0], images_aug_source[1], images_aug_target[0], images_aug_target[1]]))
assert np.array_equal(images_aug_source, images_aug_target)
source[0].deterministic = True
target_cprs = target.copy_random_state(source, matching="position", copy_determinism=True)
source_alt = source.remove_augmenters(lambda aug, parents: aug.name == "blur")
images_aug_source = source_alt.augment_images(images)
images_aug_target = target_cprs.augment_images(images)
assert target_cprs[0].deterministic == True
assert np.array_equal(images_aug_source, images_aug_target)
source[0].deterministic = False
target[0].deterministic = False
target_cprs = target.copy_random_state(source, matching="name")
source_alt = source.remove_augmenters(lambda aug, parents: aug.name == "blur")
images_aug_source = source_alt.augment_images(images)
images_aug_target = target_cprs.augment_images(images)
assert np.array_equal(images_aug_source, images_aug_target)
source_alt = source.remove_augmenters(lambda aug, parents: aug.name == "blur")
source_det = source_alt.to_deterministic()
target_cprs_det = target.copy_random_state(source_det, matching="name",
copy_determinism=True)
images_aug_source1 = source_det.augment_images(images)
images_aug_target1 = target_cprs_det.augment_images(images)
images_aug_source2 = source_det.augment_images(images)
images_aug_target2 = target_cprs_det.augment_images(images)
assert np.array_equal(images_aug_source1, images_aug_source2)
assert np.array_equal(images_aug_target1, images_aug_target2)
assert np.array_equal(images_aug_source1, images_aug_target1)
assert np.array_equal(images_aug_source2, images_aug_target2)
source = iaa.Fliplr(0.5, name="hflip")
target = iaa.Fliplr(0.5, name="hflip")
got_exception = False
try:
target_cprs = target.copy_random_state(source, matching="name")
except Exception as exc:
got_exception = True
assert "localize_random_state" in str(exc)
assert got_exception
source = iaa.Fliplr(0.5, name="hflip-other-name")
target = iaa.Fliplr(0.5, name="hflip")
source.localize_random_state_()
got_exception = False
try:
target_cprs = target.copy_random_state(source, matching="name", matching_tolerant=False)
except Exception as exc:
got_exception = True
assert "not found among source augmenters" in str(exc)
assert got_exception
source = iaa.Fliplr(0.5, name="hflip")
target = iaa.Fliplr(0.5, name="hflip")
got_exception = False
try:
target_cprs = target.copy_random_state(source, matching="position")
except Exception as exc:
got_exception = True
assert "localize_random_state" in str(exc)
assert got_exception
source = iaa.Sequential([iaa.Fliplr(0.5, name="hflip"), iaa.Fliplr(0.5, name="hflip2")])
target = iaa.Sequential([iaa.Fliplr(0.5, name="hflip")])
source.localize_random_state_()
got_exception = False
try:
target_cprs = target.copy_random_state(source, matching="position", matching_tolerant=False)
except Exception as exc:
got_exception = True
assert "different lengths" in str(exc)
assert got_exception
source = iaa.Sequential([iaa.Fliplr(0.5, name="hflip"), iaa.Fliplr(0.5, name="hflip2")])
target = iaa.Sequential([iaa.Fliplr(0.5, name="hflip")])
source.localize_random_state_()
got_exception = False
try:
target_cprs = target.copy_random_state(source, matching="test")
except Exception as exc:
got_exception = True
assert "Unknown matching method" in str(exc)
assert got_exception
source = iaa.Sequential([iaa.Fliplr(0.5, name="hflip"), iaa.Fliplr(0.5, name="hflip")])
target = iaa.Sequential([iaa.Fliplr(0.5, name="hflip")])
source.localize_random_state_()
with warnings.catch_warnings(record=True) as caught_warnings:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
target_cprs = target.copy_random_state(source, matching="name")
# Verify some things
assert len(caught_warnings) == 1
assert "contains multiple augmenters with the same name" in str(caught_warnings[-1].message)
def test_Sequential():
reseed()
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8) * 255
image = image[:, :, np.newaxis]
images_list = [image]
images = np.array([image])
image_lr = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8) * 255
image_lr = image_lr[:, :, np.newaxis]
images_lr = np.array([image_lr])
image_ud = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8) * 255
image_ud = image_ud[:, :, np.newaxis]
images_ud = np.array([image_ud])
image_lr_ud = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8) * 255
image_lr_ud = image_lr_ud[:, :, np.newaxis]
images_lr_ud_list = [image_lr_ud]
images_lr_ud = np.array([image_lr_ud])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)], shape=image.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2), ia.Keypoint(x=0, y=2),
ia.Keypoint(x=0, y=1)], shape=image.shape)]
aug = iaa.Sequential([
iaa.Fliplr(1.0),
iaa.Flipud(1.0)
])
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert np.array_equal(observed, images_lr_ud)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_lr_ud)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_lr_ud_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_lr_ud_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# heatmaps
heatmaps_arr = np.float32([[0, 0, 1.0],
[0, 0, 1.0],
[0, 1.0, 1.0]])
heatmaps_arr_expected = np.float32([[1.0, 1.0, 0.0],
[1.0, 0, 0],
[1.0, 0, 0]])
observed = aug.augment_heatmaps([ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))])[0]
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1.0 - 1e-6 < observed.max_value < 1.0 + 1e-6
assert np.array_equal(observed.get_arr(), heatmaps_arr_expected)
# 50% horizontal flip, 50% vertical flip
aug = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5)
])
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 200
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert np.array_equal(observed_aug, images) \
or np.array_equal(observed_aug, images_lr) \
or np.array_equal(observed_aug, images_ud) \
or np.array_equal(observed_aug, images_lr_ud)
assert np.array_equal(observed_aug_det, images) \
or np.array_equal(observed_aug_det, images_lr) \
or np.array_equal(observed_aug_det, images_ud) \
or np.array_equal(observed_aug_det, images_lr_ud)
assert (0.25 - 0.10) <= (1 - (nb_changed_aug / nb_iterations)) <= (0.25 + 0.10) # should be the same in roughly 25% of all cases
assert nb_changed_aug_det == 0
# random order
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8)
image = image[:, :, np.newaxis]
images = np.array([image])
images_first_second = (images + 10) * 10
images_second_first = (images * 10) + 10
heatmaps_arr = np.float32([[0.0, 0.5, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5]])
heatmaps_arr_first_second = (heatmaps_arr + 0.1) * 0.5
heatmaps_arr_second_first = (heatmaps_arr * 0.5) + 0.1
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=image.shape)]
keypoints_first_second = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=image.shape)]
keypoints_second_first = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0)], shape=image.shape)]
def images_first(images, random_state, parents, hooks):
return images + 10
def images_second(images, random_state, parents, hooks):
return images * 10
def heatmaps_first(heatmaps, random_state, parents, hooks):
for heatmaps_i in heatmaps:
heatmaps_i.arr_0to1 += 0.1
return heatmaps
def heatmaps_second(heatmaps, random_state, parents, hooks):
for heatmaps_i in heatmaps:
heatmaps_i.arr_0to1 *= 0.5
return heatmaps
def keypoints_first(keypoints_on_images, random_state, parents, hooks):
for keypoints_on_image in keypoints_on_images:
for keypoint in keypoints_on_image.keypoints:
keypoint.x = keypoint.x + 1
return keypoints_on_images
def keypoints_second(keypoints_on_images, random_state, parents, hooks):
for keypoints_on_image in keypoints_on_images:
for keypoint in keypoints_on_image.keypoints:
keypoint.y = keypoint.y + keypoint.x
return keypoints_on_images
aug_unrandom = iaa.Sequential([
iaa.Lambda(images_first, heatmaps_first, keypoints_first),
iaa.Lambda(images_second, heatmaps_second, keypoints_second)
], random_order=False)
aug_unrandom_det = aug_unrandom.to_deterministic()
aug_random = iaa.Sequential([
iaa.Lambda(images_first, heatmaps_first, keypoints_first),
iaa.Lambda(images_second, heatmaps_second, keypoints_second)
], random_order=True)
aug_random_det = aug_random.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 200
nb_images_first_second_unrandom = 0
nb_images_second_first_unrandom = 0
nb_images_first_second_random = 0
nb_images_second_first_random = 0
nb_heatmaps_first_second_unrandom = 0
nb_heatmaps_second_first_unrandom = 0
nb_heatmaps_first_second_random = 0
nb_heatmaps_second_first_random = 0
nb_keypoints_first_second_unrandom = 0
nb_keypoints_second_first_unrandom = 0
nb_keypoints_first_second_random = 0
nb_keypoints_second_first_random = 0
for i in sm.xrange(nb_iterations):
observed_aug_unrandom = aug_unrandom.augment_images(images)
observed_aug_unrandom_det = aug_unrandom_det.augment_images(images)
observed_aug_random = aug_random.augment_images(images)
observed_aug_random_det = aug_random_det.augment_images(images)
heatmaps_aug_unrandom = aug_unrandom.augment_heatmaps([heatmaps])[0]
heatmaps_aug_random = aug_random.augment_heatmaps([heatmaps])[0]
keypoints_aug_unrandom = aug_unrandom.augment_keypoints(keypoints)
keypoints_aug_random = aug_random.augment_keypoints(keypoints)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
if np.array_equal(observed_aug_unrandom, images_first_second):
nb_images_first_second_unrandom += 1
elif np.array_equal(observed_aug_unrandom, images_second_first):
nb_images_second_first_unrandom += 1
else:
raise Exception("Received output doesnt match any expected output.")
if np.array_equal(observed_aug_random, images_first_second):
nb_images_first_second_random += 1
elif np.array_equal(observed_aug_random, images_second_first):
nb_images_second_first_random += 1
else:
raise Exception("Received output doesnt match any expected output.")
if np.allclose(heatmaps_aug_unrandom.get_arr(), heatmaps_arr_first_second):
nb_heatmaps_first_second_unrandom += 1
elif np.allclose(heatmaps_aug_unrandom.get_arr(), heatmaps_arr_second_first):
nb_heatmaps_second_first_unrandom += 1
else:
raise Exception("Received output doesnt match any expected output.")
if np.allclose(heatmaps_aug_random.get_arr(), heatmaps_arr_first_second):
nb_heatmaps_first_second_random += 1
elif np.allclose(heatmaps_aug_random.get_arr(), heatmaps_arr_second_first):
nb_heatmaps_second_first_random += 1
else:
raise Exception("Received output doesnt match any expected output.")
if keypoints_equal(keypoints_aug_unrandom, keypoints_first_second):
nb_keypoints_first_second_unrandom += 1
elif keypoints_equal(keypoints_aug_unrandom, keypoints_second_first):
nb_keypoints_second_first_unrandom += 1
else:
raise Exception("Received output doesnt match any expected output.")
if keypoints_equal(keypoints_aug_random, keypoints_first_second):
nb_keypoints_first_second_random += 1
elif keypoints_equal(keypoints_aug_random, keypoints_second_first):
nb_keypoints_second_first_random += 1
else:
raise Exception("Received output doesnt match any expected output.")
assert nb_changed_aug == 0
assert nb_changed_aug_det == 0
assert nb_images_first_second_unrandom == nb_iterations
assert nb_images_second_first_unrandom == 0
assert nb_heatmaps_first_second_unrandom == nb_iterations
assert nb_heatmaps_second_first_unrandom == 0
assert nb_keypoints_first_second_unrandom == nb_iterations
assert nb_keypoints_second_first_unrandom == 0
assert (0.50 - 0.1) <= nb_images_first_second_random / nb_iterations <= (0.50 + 0.1)
assert (0.50 - 0.1) <= nb_images_second_first_random / nb_iterations <= (0.50 + 0.1)
assert (0.50 - 0.1) <= nb_keypoints_first_second_random / nb_iterations <= (0.50 + 0.1)
assert (0.50 - 0.1) <= nb_keypoints_second_first_random / nb_iterations <= (0.50 + 0.1)
# random order for heatmaps
# TODO this is now already tested above via lamdba functions?
aug = iaa.Sequential([
iaa.Affine(translate_px={"x": 1}),
iaa.Fliplr(1.0)
], random_order=True)
heatmaps_arr = np.float32([[0, 0, 1.0],
[0, 0, 1.0],
[0, 1.0, 1.0]])
heatmaps_arr_expected1 = np.float32([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0]])
heatmaps_arr_expected2 = np.float32([[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
seen = [False, False]
for _ in sm.xrange(100):
observed = aug.augment_heatmaps([ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))])[0]
if np.allclose(observed.get_arr(), heatmaps_arr_expected1):
seen[0] = True
elif np.allclose(observed.get_arr(), heatmaps_arr_expected2):
seen[1] = True
else:
assert False
if all(seen):
break
assert all(seen)
# None as children
aug = iaa.Sequential(children=None)
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
aug = iaa.Sequential()
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
# Single child
aug = iaa.Sequential(iaa.Fliplr(1.0))
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(image))
# Sequential of Sequential
aug = iaa.Sequential(iaa.Sequential(iaa.Fliplr(1.0)))
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(image))
# Sequential of list of Sequentials
aug = iaa.Sequential([iaa.Sequential(iaa.Flipud(1.0)), iaa.Sequential(iaa.Fliplr(1.0))])
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(np.flipud(image)))
# add
aug = iaa.Sequential()
aug.add(iaa.Fliplr(1.0))
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(image))
aug = iaa.Sequential(iaa.Fliplr(1.0))
aug.add(iaa.Flipud(1.0))
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(np.flipud(image)))
# get_parameters
aug = iaa.Sequential(iaa.Fliplr(1.0), random_order=False)
assert aug.get_parameters() == [False]
aug = iaa.Sequential(iaa.Fliplr(1.0), random_order=True)
assert aug.get_parameters() == [True]
# get_children_lists
flip = iaa.Fliplr(1.0)
aug = iaa.Sequential(flip)
assert aug.get_children_lists() == [aug]
# str/repr
flip = iaa.Fliplr(1.0)
aug = iaa.Sequential(flip, random_order=True)
expected = "Sequential(name=%s, random_order=%s, children=[%s], deterministic=%s)" % (aug.name, "True", str(flip), "False")
assert aug.__str__() == aug.__repr__() == expected
def test_SomeOf():
reseed()
zeros = np.zeros((3, 3, 1), dtype=np.uint8)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)], shape=zeros.shape)]
# no child augmenters
observed = iaa.SomeOf(n=0, children=[]).augment_image(zeros)
assert np.array_equal(observed, zeros)
observed = iaa.SomeOf(n=0).augment_image(zeros)
assert np.array_equal(observed, zeros)
# up to three child augmenters
augs = [iaa.Add(1), iaa.Add(2), iaa.Add(3)]
observed = iaa.SomeOf(n=0, children=augs).augment_image(zeros)
assert np.array_equal(observed, zeros)
observed = iaa.SomeOf(n=1, children=augs).augment_image(zeros)
assert np.sum(observed) in [9*1, 9*2, 9*3]
observed = iaa.SomeOf(n=2, children=augs).augment_image(zeros)
assert np.sum(observed) in [9*1+9*2, 9*1+9*3, 9*2+9*3]
observed = iaa.SomeOf(n=3, children=augs).augment_image(zeros)
assert np.sum(observed) in [9*1+9*2+9*3]
observed = iaa.SomeOf(n=4, children=augs).augment_image(zeros)
assert np.sum(observed) in [9*1+9*2+9*3]
# basic heatmaps test
augs = [iaa.Affine(translate_px={"x":1}), iaa.Affine(translate_px={"x":1}), iaa.Affine(translate_px={"x":1})]
heatmaps_arr = np.float32([[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0]])
heatmaps_arr0 = np.float32([[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0]])
heatmaps_arr1 = np.float32([[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0]])
heatmaps_arr2 = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0]])
heatmaps_arr3 = np.float32([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
observed0 = iaa.SomeOf(n=0, children=augs).augment_heatmaps([heatmaps])[0]
observed1 = iaa.SomeOf(n=1, children=augs).augment_heatmaps([heatmaps])[0]
observed2 = iaa.SomeOf(n=2, children=augs).augment_heatmaps([heatmaps])[0]
observed3 = iaa.SomeOf(n=3, children=augs).augment_heatmaps([heatmaps])[0]
assert all([obs.shape == (3, 3, 3) for obs in [observed0, observed1, observed2, observed3]])
assert all([0 - 1e-6 < obs.min_value < 0 + 1e-6 for obs in [observed0, observed1, observed2, observed3]])
assert all([1 - 1e-6 < obs.max_value < 1 + 1e-6 for obs in [observed0, observed1, observed2, observed3]])
for obs, exp in zip([observed0, observed1, observed2, observed3], [heatmaps_arr0, heatmaps_arr1, heatmaps_arr2, heatmaps_arr3]):
assert np.array_equal(obs.get_arr(), exp)
# n as tuple
augs = [iaa.Add(1), iaa.Add(2), iaa.Add(4)]
nb_iterations = 1000
nb_observed = [0, 0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = iaa.SomeOf(n=(0, 3), children=augs).augment_image(zeros)
s = observed[0, 0, 0]
if s == 0:
nb_observed[0] += 1
if s & 1 > 0:
nb_observed[1] += 1
if s & 2 > 0:
nb_observed[2] += 1
if s & 4 > 0:
nb_observed[3] += 1
p_observed = [n/nb_iterations for n in nb_observed]
assert 0.25-0.1 <= p_observed[0] <= 0.25+0.1
assert 0.5-0.1 <= p_observed[1] <= 0.5+0.1
assert 0.5-0.1 <= p_observed[2] <= 0.5+0.1
assert 0.5-0.1 <= p_observed[3] <= 0.5+0.1
# in-order vs random order
augs = [iaa.Multiply(2.0), iaa.Add(100)]
observed = iaa.SomeOf(n=2, children=augs, random_order=False).augment_image(zeros)
assert np.sum(observed) == 9*100
nb_iterations = 1000
nb_observed = [0, 0]
for i in sm.xrange(nb_iterations):
augs = [iaa.Multiply(2.0), iaa.Add(100)]
observed = iaa.SomeOf(n=2, children=augs, random_order=True).augment_image(zeros)
s = np.sum(observed)
if s == 9*100:
nb_observed[0] += 1
elif s == 9*200:
nb_observed[1] += 1
else:
raise Exception("Unexpected sum: %.8f (@2)" % (s,))
p_observed = [n/nb_iterations for n in nb_observed]
assert 0.5-0.1 <= p_observed[0] <= 0.5+0.1
assert 0.5-0.1 <= p_observed[1] <= 0.5+0.1
# invalid argument for children
got_exception = False
try:
aug = iaa.SomeOf(1, children=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# n is None
aug = iaa.SomeOf(None, children=[iaa.Fliplr(1.0), iaa.Flipud(1.0)])
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.flipud(np.fliplr(image)))
# n is (x, None)
children = [iaa.Fliplr(1.0), iaa.Flipud(1.0), iaa.Add(5)]
image = np.random.randint(0, 255-5, size=(16, 16), dtype=np.uint8)
expected = [iaa.Sequential(children).augment_image(image)]
for _, aug in enumerate(children):
children_i = [child for child in children if child != aug]
expected.append(iaa.Sequential(children_i).augment_image(image))
aug = iaa.SomeOf((2, None), children)
seen = [0, 0, 0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(image)
found = 0
for i, expected_i in enumerate(expected):
if np.array_equal(observed, expected_i):
seen[i] += 1
found += 1
assert found == 1
assert 200 - 50 < seen[0] < 200 + 50
assert 200 - 50 < seen[1] + seen[2] + seen[3] < 200 + 50
# n is bad (int, "test")
got_exception = False
try:
aug = iaa.SomeOf((2, "test"), children=iaa.Fliplr(1.0))
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# n is stochastic param
aug = iaa.SomeOf(iap.Choice([0, 1]), children=iaa.Fliplr(1.0))
image = np.random.randint(0, 255-5, size=(16, 16), dtype=np.uint8)
seen = [0, 1]
for _ in sm.xrange(100):
observed = aug.augment_image(image)
if np.array_equal(observed, image):
seen[0] += 1
elif np.array_equal(observed, np.fliplr(image)):
seen[1] += 1
else:
assert False
assert seen[0] > 10
assert seen[1] > 10
# bad datatype for n
got_exception = False
try:
aug = iaa.SomeOf(False, children=iaa.Fliplr(1.0))
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# test for https://github.com/aleju/imgaug/issues/143
# (shapes change in child augmenters, leading to problems if input arrays are assumed to
# stay input arrays)
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.SomeOf(1, [
iaa.Crop((2, 0, 2, 0), keep_size=False),
iaa.Crop((1, 0, 1, 0), keep_size=False)
])
for _ in sm.xrange(10):
observed = aug.augment_images(np.uint8([image, image, image, image]))
assert isinstance(observed, list)
assert all([img.shape in [(4, 8, 3), (6, 8, 3)] for img in observed])
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
assert all([img.shape in [(4, 8, 3), (6, 8, 3)] for img in observed])
observed = aug.augment_images(np.uint8([image]))
assert isinstance(observed, list)
assert all([img.shape in [(4, 8, 3), (6, 8, 3)] for img in observed])
observed = aug.augment_images([image])
assert isinstance(observed, list)
assert all([img.shape in [(4, 8, 3), (6, 8, 3)] for img in observed])
observed = aug.augment_image(image)
assert ia.is_np_array(image)
assert observed.shape in [(4, 8, 3), (6, 8, 3)]
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.SomeOf(1, [
iaa.Crop((2, 0, 2, 0), keep_size=True),
iaa.Crop((1, 0, 1, 0), keep_size=True)
])
for _ in sm.xrange(10):
observed = aug.augment_images(np.uint8([image, image, image, image]))
assert ia.is_np_array(observed)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images(np.uint8([image]))
assert ia.is_np_array(observed)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images([image])
assert isinstance(observed, list)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_image(image)
assert ia.is_np_array(observed)
assert observed.shape in [(8, 8, 3)]
def test_OneOf():
reseed()
zeros = np.zeros((3, 3, 1), dtype=np.uint8)
# one child augmenter
observed = iaa.OneOf(children=iaa.Add(1)).augment_image(zeros)
assert np.array_equal(observed, zeros + 1)
observed = iaa.OneOf(children=iaa.Sequential([iaa.Add(1)])).augment_image(zeros)
assert np.array_equal(observed, zeros + 1)
observed = iaa.OneOf(children=[iaa.Add(1)]).augment_image(zeros)
assert np.array_equal(observed, zeros + 1)
# up to three child augmenters
augs = [iaa.Add(1), iaa.Add(2), iaa.Add(3)]
aug = iaa.OneOf(augs)
results = {9*1: 0, 9*2: 0, 9*3: 0}
nb_iterations = 1000
for _ in sm.xrange(nb_iterations):
result = aug.augment_image(zeros)
s = np.sum(result)
results[s] += 1
expected = int(nb_iterations / len(augs))
expected_tolerance = int(nb_iterations * 0.05)
for key, val in results.items():
assert expected - expected_tolerance < val < expected + expected_tolerance
def test_Sometimes():
reseed()
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8) * 255
image = image[:, :, np.newaxis]
images_list = [image]
images = np.array([image])
image_lr = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8) * 255
image_lr = image_lr[:, :, np.newaxis]
images_lr_list = [image_lr]
images_lr = np.array([image_lr])
image_ud = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8) * 255
image_ud = image_ud[:, :, np.newaxis]
images_ud_list = [image_ud]
images_ud = np.array([image_ud])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)], shape=image.shape)]
keypoints_lr = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=0, y=0),
ia.Keypoint(x=0, y=1)], shape=image.shape)]
keypoints_ud = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=2),
ia.Keypoint(x=2, y=1)], shape=image.shape)]
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps_arr_lr = np.fliplr(heatmaps_arr)
heatmaps_arr_ud = np.flipud(heatmaps_arr)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
# 100% chance of if-branch
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert np.array_equal(observed, images_lr)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_lr)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_lr_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_lr_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_lr)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_lr)
# 100% chance of if-branch, heatmaps
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.array_equal(observed.get_arr(), heatmaps_arr_lr)
# 100% chance of else-branch
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert np.array_equal(observed, images_ud)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_ud)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_ud_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_ud_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_ud)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_ud)
# 100% chance of else-branch, heatmaps
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.array_equal(observed.get_arr(), heatmaps_arr_ud)
# 50% if branch, 50% else branch
aug = iaa.Sometimes(0.5, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
nb_images_if_branch = 0
nb_images_else_branch = 0
nb_keypoints_if_branch = 0
nb_keypoints_else_branch = 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
keypoints_aug = aug.augment_keypoints(keypoints)
keypoints_aug_det = aug.augment_keypoints(keypoints)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
if np.array_equal(observed_aug, images_lr):
nb_images_if_branch += 1
elif np.array_equal(observed_aug, images_ud):
nb_images_else_branch += 1
else:
raise Exception("Received output doesnt match any expected output.")
if keypoints_equal(keypoints_aug, keypoints_lr):
nb_keypoints_if_branch += 1
elif keypoints_equal(keypoints_aug, keypoints_ud):
nb_keypoints_else_branch += 1
else:
raise Exception("Received output doesnt match any expected output.")
assert (0.50 - 0.10) <= nb_images_if_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_images_else_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_keypoints_if_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_keypoints_else_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= (1 - (nb_changed_aug / nb_iterations)) <= (0.50 + 0.10) # should be the same in roughly 50% of all cases
assert nb_changed_aug_det == 0
# 50% if branch, otherwise no change
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
nb_images_if_branch = 0
nb_images_else_branch = 0
nb_keypoints_if_branch = 0
nb_keypoints_else_branch = 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
keypoints_aug = aug.augment_keypoints(keypoints)
keypoints_aug_det = aug.augment_keypoints(keypoints)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
if np.array_equal(observed_aug, images_lr):
nb_images_if_branch += 1
elif np.array_equal(observed_aug, images):
nb_images_else_branch += 1
else:
raise Exception("Received output doesnt match any expected output.")
if keypoints_equal(keypoints_aug, keypoints_lr):
nb_keypoints_if_branch += 1
elif keypoints_equal(keypoints_aug, keypoints):
nb_keypoints_else_branch += 1
else:
raise Exception("Received output doesnt match any expected output.")
assert (0.50 - 0.10) <= nb_images_if_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_images_else_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_keypoints_if_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= nb_keypoints_else_branch / nb_iterations <= (0.50 + 0.10)
assert (0.50 - 0.10) <= (1 - (nb_changed_aug / nb_iterations)) <= (0.50 + 0.10) # should be the same in roughly 50% of all cases
assert nb_changed_aug_det == 0
# p as stochastic parameter
image = np.zeros((1, 1), dtype=np.uint8) + 100
images = [image] * 10
aug = iaa.Sometimes(p=iap.Binomial(iap.Choice([0.0, 1.0])), then_list=iaa.Add(10))
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_images(images)
uq = np.unique(np.uint8(observed))
assert len(uq) == 1
if uq[0] == 100:
seen[0] += 1
elif uq[0] == 110:
seen[1] += 1
else:
assert False
assert seen[0] > 20
assert seen[1] > 20
# bad datatype for p
got_exception = False
try:
aug = iaa.Sometimes(p=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# both lists none
aug = iaa.Sometimes(0.2, then_list=None, else_list=None)
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
# then_list bad datatype
got_exception = False
try:
aug = iaa.Sometimes(p=0.2, then_list=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# else_list bad datatype
got_exception = False
try:
aug = iaa.Sometimes(p=0.2, then_list=None, else_list=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# deactivated propagation via hooks
image = np.random.randint(0, 255-10, size=(16, 16), dtype=np.uint8)
aug = iaa.Sometimes(1.0, iaa.Add(10))
observed1 = aug.augment_image(image)
observed2 = aug.augment_image(image, hooks=ia.HooksImages(propagator=lambda images, augmenter, parents, default: False if augmenter == aug else default))
assert np.array_equal(observed1, image + 10)
assert np.array_equal(observed2, image)
# get_parameters
aug = iaa.Sometimes(0.75)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert 0.75 - 1e-8 < params[0].p.value < 0.75 + 1e-8
# str/repr
then_list = iaa.Add(1)
else_list = iaa.Add(2)
aug = iaa.Sometimes(0.5, then_list=then_list, else_list=else_list, name="SometimesTest")
expected = "Sometimes(p=%s, name=%s, then_list=%s, else_list=%s, deterministic=%s)" % (
"Binomial(Deterministic(float 0.50000000))",
"SometimesTest",
"Sequential(name=SometimesTest-then, random_order=False, children=[%s], deterministic=False)" % (str(then_list),),
"Sequential(name=SometimesTest-else, random_order=False, children=[%s], deterministic=False)" % (str(else_list),),
"False"
)
assert aug.__repr__() == aug.__str__() == expected
aug = iaa.Sometimes(0.5, then_list=None, else_list=None, name="SometimesTest")
expected = "Sometimes(p=%s, name=%s, then_list=%s, else_list=%s, deterministic=%s)" % (
"Binomial(Deterministic(float 0.50000000))",
"SometimesTest",
"Sequential(name=SometimesTest-then, random_order=False, children=[], deterministic=False)",
"Sequential(name=SometimesTest-else, random_order=False, children=[], deterministic=False)",
"False"
)
assert aug.__repr__() == aug.__str__() == expected
# Test for https://github.com/aleju/imgaug/issues/143
# (shapes change in child augmenters, leading to problems if input arrays are assumed to
# stay input arrays)
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop((2, 0, 2, 0), keep_size=False),
iaa.Crop((1, 0, 1, 0), keep_size=False)
)
for _ in sm.xrange(10):
observed = aug.augment_images(np.uint8([image, image, image, image]))
assert isinstance(observed, list) or (ia.is_np_array(observed) and len(set([img.shape for img in observed])) == 1)
assert all([img.shape in [(4, 8, 3), (6, 8, 3)] for img in observed])
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
assert all([img.shape in [(4, 8, 3), (6, 8, 3)] for img in observed])
observed = aug.augment_images(np.uint8([image]))
assert isinstance(observed, list) or (ia.is_np_array(observed) and len(set([img.shape for img in observed])) == 1)
assert all([img.shape in [(4, 8, 3), (6, 8, 3)] for img in observed])
observed = aug.augment_images([image])
assert isinstance(observed, list)
assert all([img.shape in [(4, 8, 3), (6, 8, 3)] for img in observed])
observed = aug.augment_image(image)
assert ia.is_np_array(image)
assert observed.shape in [(4, 8, 3), (6, 8, 3)]
image = np.zeros((32, 32, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop(((1, 4), 0, (1, 4), 0), keep_size=False),
iaa.Crop(((4, 8), 0, (4, 8), 0), keep_size=False)
)
for _ in sm.xrange(10):
observed = aug.augment_images(np.uint8([image, image, image, image]))
assert isinstance(observed, list) or (ia.is_np_array(observed) and len(set([img.shape for img in observed])) == 1)
assert all([16 <= img.shape[0] <= 30 and img.shape[1:] == (32, 3) for img in observed])
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
assert all([16 <= img.shape[0] <= 30 and img.shape[1:] == (32, 3) for img in observed])
observed = aug.augment_images(np.uint8([image]))
assert isinstance(observed, list) or (ia.is_np_array(observed) and len(set([img.shape for img in observed])) == 1)
assert all([16 <= img.shape[0] <= 30 and img.shape[1:] == (32, 3) for img in observed])
observed = aug.augment_images([image])
assert isinstance(observed, list)
assert all([16 <= img.shape[0] <= 30 and img.shape[1:] == (32, 3) for img in observed])
observed = aug.augment_image(image)
assert ia.is_np_array(image)
assert 16 <= observed.shape[0] <= 30 and observed.shape[1:] == (32, 3)
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop((2, 0, 2, 0), keep_size=True),
iaa.Crop((1, 0, 1, 0), keep_size=True)
)
for _ in sm.xrange(10):
observed = aug.augment_images(np.uint8([image, image, image, image]))
assert ia.is_np_array(observed)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images(np.uint8([image]))
assert ia.is_np_array(observed)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images([image])
assert isinstance(observed, list)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_image(image)
assert ia.is_np_array(observed)
assert observed.shape in [(8, 8, 3)]
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop(((1, 4), 0, (1, 4), 0), keep_size=True),
iaa.Crop(((4, 8), 0, (4, 8), 0), keep_size=True)
)
for _ in sm.xrange(10):
observed = aug.augment_images(np.uint8([image, image, image, image]))
assert ia.is_np_array(observed)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images(np.uint8([image]))
assert ia.is_np_array(observed)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_images([image])
assert isinstance(observed, list)
assert all([img.shape in [(8, 8, 3)] for img in observed])
observed = aug.augment_image(image)
assert ia.is_np_array(observed)
assert observed.shape in [(8, 8, 3)]
def test_WithChannels():
base_img = np.zeros((3, 3, 2), dtype=np.uint8)
base_img[..., 0] += 100
base_img[..., 1] += 200
aug = iaa.WithChannels(None, iaa.Add(10))
observed = aug.augment_image(base_img)
expected = base_img + 10
assert np.allclose(observed, expected)
aug = iaa.WithChannels(0, iaa.Add(10))
observed = aug.augment_image(base_img)
expected = np.copy(base_img)
expected[..., 0] += 10
assert np.allclose(observed, expected)
aug = iaa.WithChannels(1, iaa.Add(10))
observed = aug.augment_image(base_img)
expected = np.copy(base_img)
expected[..., 1] += 10
assert np.allclose(observed, expected)
base_img = np.zeros((3, 3, 2), dtype=np.uint8)
base_img[..., 0] += 5
base_img[..., 1] += 10
aug = iaa.WithChannels(1, [iaa.Add(10), iaa.Multiply(2.0)])
observed = aug.augment_image(base_img)
expected = np.copy(base_img)
expected[..., 1] += 10
expected[..., 1] *= 2
assert np.allclose(observed, expected)
# multiple images, given as array
images = np.concatenate([base_img[np.newaxis, ...], base_img[np.newaxis, ...]], axis=0)
aug = iaa.WithChannels(1, iaa.Add(10))
observed = aug.augment_images(images)
expected = np.copy(images)
expected[..., 1] += 10
assert np.allclose(observed, expected)
# multiple images, given as list
images = [base_img, base_img]
aug = iaa.WithChannels(1, iaa.Add(10))
observed = aug.augment_images(images)
expected = np.copy(base_img)
expected[..., 1] += 10
expected = [expected, expected]
assert array_equal_lists(observed, expected)
# children list is empty
aug = iaa.WithChannels(1, children=None)
observed = aug.augment_image(base_img)
expected = np.copy(base_img)
assert np.array_equal(observed, expected)
# channel list is empty
aug = iaa.WithChannels([], iaa.Add(10))
observed = aug.augment_image(base_img)
expected = np.copy(base_img)
assert np.array_equal(observed, expected)
# invalid datatype for channels
got_exception = False
try:
aug = iaa.WithChannels(False, iaa.Add(10))
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# invalid datatype for children
got_exception = False
try:
aug = iaa.WithChannels(1, False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# get_parameters
aug = iaa.WithChannels([1], iaa.Add(10))
params = aug.get_parameters()
assert len(params) == 1
assert params[0] == [1]
# get_children_lists
children = iaa.Sequential([iaa.Add(10)])
aug = iaa.WithChannels(1, children)
assert aug.get_children_lists() == [children]
# repr/str
children = iaa.Sequential([iaa.Noop()])
aug = iaa.WithChannels(1, children, name="WithChannelsTest")
expected = "WithChannels(channels=[1], name=WithChannelsTest, children=%s, deterministic=False)" % (str(children),)
assert aug.__repr__() == aug.__str__() == expected
def test_2d_inputs():
"""Test whether inputs of 2D-images (i.e. (H, W) instead of (H, W, C)) work.
"""
reseed()
base_img1 = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 1, 1, 1]], dtype=np.uint8)
base_img2 = np.array([[0, 0, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 0]], dtype=np.uint8)
base_img1_flipped = np.array([[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]], dtype=np.uint8)
base_img2_flipped = np.array([[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 0]], dtype=np.uint8)
images = np.array([base_img1, base_img2])
images_flipped = np.array([base_img1_flipped, base_img2_flipped])
images_list = [base_img1, base_img2]
images_flipped_list = [base_img1_flipped, base_img2_flipped]
images_list2d3d = [base_img1, base_img2[:, :, np.newaxis]]
images_flipped_list2d3d = [base_img1_flipped, base_img2_flipped[:, :, np.newaxis]]
aug = iaa.Fliplr(1.0)
noaug = iaa.Fliplr(0.0)
# one numpy array as input
observed = aug.augment_images(images)
assert np.array_equal(observed, images_flipped)
observed = noaug.augment_images(images)
assert np.array_equal(observed, images)
# list of 2d images
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_flipped_list)
observed = noaug.augment_images(images_list)
assert array_equal_lists(observed, images_list)
# list of images, one 2d and one 3d
observed = aug.augment_images(images_list2d3d)
assert array_equal_lists(observed, images_flipped_list2d3d)
observed = noaug.augment_images(images_list2d3d)
assert array_equal_lists(observed, images_list2d3d)
def test_Augmenter_augment_batches():
reseed()
image = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 1, 1, 1]], dtype=np.uint8)
image_flipped = np.fliplr(image)
keypoint = ia.Keypoint(x=2, y=1)
keypoints = [ia.KeypointsOnImage([keypoint], shape=image.shape + (1,))]
kp_flipped = ia.Keypoint(
x=image.shape[1]-1-keypoint.x,
y=keypoint.y
)
seq = iaa.Fliplr(0.5)
"""
# with images as list, background=False
nb_flipped_images = 0
nb_flipped_keypoints = 0
nb_iterations = 1000
batches = [ia.Batch(images=[np.copy(image)], keypoints=[keypoints[0].deepcopy()]) for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=False))
for batch_aug in batches_aug:
image_aug = batch_aug.images_aug[0]
keypoint_aug = batch_aug.keypoints_aug[0].keypoints[0]
assert np.array_equal(image_aug, image) or np.array_equal(image_aug, image_flipped)
if np.array_equal(image_aug, image_flipped):
nb_flipped_images += 1
assert (keypoint_aug.x == keypoint.x and keypoint_aug.y == keypoint.y) \
or (keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y)
if keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y:
nb_flipped_keypoints += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
assert nb_flipped_images == nb_flipped_keypoints
"""
for bg in [False, True]:
# with images as list
nb_flipped_images = 0
nb_flipped_keypoints = 0
nb_iterations = 1000
batches = [ia.Batch(images=[np.copy(image)], keypoints=[keypoints[0].deepcopy()]) for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
image_aug = batch_aug.images_aug[0]
keypoint_aug = batch_aug.keypoints_aug[0].keypoints[0]
assert np.array_equal(image_aug, image) or np.array_equal(image_aug, image_flipped)
if np.array_equal(image_aug, image_flipped):
nb_flipped_images += 1
assert (keypoint_aug.x == keypoint.x and keypoint_aug.y == keypoint.y) \
or (keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y)
if keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y:
nb_flipped_keypoints += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
assert nb_flipped_images == nb_flipped_keypoints
# with images as array
nb_flipped_images = 0
nb_flipped_keypoints = 0
nb_iterations = 1000
batches = [ia.Batch(images=np.array([np.copy(image)], dtype=np.uint8), keypoints=None) for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
#batch = ia.Batch(images=np.array([image], dtype=np.uint8), keypoints=keypoints)
#batches_aug = list(seq.augment_batches([batch], background=True))
#batch_aug = batches_aug[0]
image_aug = batch_aug.images_aug[0]
assert np.array_equal(image_aug, image) or np.array_equal(image_aug, image_flipped)
if np.array_equal(image_aug, image_flipped):
nb_flipped_images += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
# array (N, H, W) as input
nb_flipped_images = 0
nb_iterations = 1000
batches = [np.array([np.copy(image)], dtype=np.uint8) for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
#batch = np.array([image], dtype=np.uint8)
#batches_aug = list(seq.augment_batches([batch], background=True))
#image_aug = batches_aug[0][0]
image_aug = batch_aug[0]
assert np.array_equal(image_aug, image) or np.array_equal(image_aug, image_flipped)
if np.array_equal(image_aug, image_flipped):
nb_flipped_images += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
# list of list of KeypointsOnImage as input
nb_flipped_keypoints = 0
nb_iterations = 1000
#batches = [ia.Batch(images=[np.copy(image)], keypoints=None) for _ in sm.xrange(nb_iterations)]
batches = [[keypoints[0].deepcopy()] for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
#batch = [keypoints]
#batches_aug = list(seq.augment_batches([batch], background=True))
#batch_aug = batches_aug[0]
#keypoint_aug = batches_aug[0].keypoints[0].keypoints[0]
keypoint_aug = batch_aug[0].keypoints[0]
assert (keypoint_aug.x == keypoint.x and keypoint_aug.y == keypoint.y) \
or (keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y)
if keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y:
nb_flipped_keypoints += 1
assert 0.4*nb_iterations <= nb_flipped_keypoints <= 0.6*nb_iterations
# test all augmenters
# this test is currently skipped by default as it takes around 40s on its own,
# probably because of having to start background processes
"""
augs = [
iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.SomeOf(1, [iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.OneOf([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.Sometimes(1.0, iaa.Fliplr(1)),
iaa.WithColorspace("HSV", children=iaa.Add((-50, 50))),
iaa.WithChannels([0], iaa.Add((-50, 50))),
iaa.Noop(name="Noop-nochange"),
iaa.Lambda(
func_images=lambda images, random_state, parents, hooks: images,
func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: keypoints_on_images,
name="Lambda-nochange"
),
iaa.AssertLambda(
func_images=lambda images, random_state, parents, hooks: True,
func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: True,
name="AssertLambda-nochange"
),
iaa.AssertShape(
(None, 64, 64, 3),
check_keypoints=False,
name="AssertShape-nochange"
),
iaa.Scale((0.5, 0.9)),
iaa.CropAndPad(px=(-50, 50)),
iaa.Pad(px=(1, 50)),
iaa.Crop(px=(1, 50)),
iaa.Fliplr(1.0),
iaa.Flipud(1.0),
iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128)),
iaa.ChangeColorspace(to_colorspace="GRAY"),
iaa.Grayscale(alpha=(0.1, 1.0)),
iaa.GaussianBlur(1.0),
iaa.AverageBlur(5),
iaa.MedianBlur(5),
iaa.Convolve(np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])),
iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0.8, 1.2)),
iaa.Emboss(alpha=(0.1, 1.0), strength=(0.8, 1.2)),
iaa.EdgeDetect(alpha=(0.1, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0.0, 1.0)),
iaa.Add((-50, 50)),
iaa.AddElementwise((-50, 50)),
iaa.AdditiveGaussianNoise(scale=(0.1, 1.0)),
iaa.Multiply((0.6, 1.4)),
iaa.MultiplyElementwise((0.6, 1.4)),
iaa.Dropout((0.3, 0.5)),
iaa.CoarseDropout((0.3, 0.5), size_percent=(0.05, 0.2)),
iaa.Invert(0.5),
iaa.ContrastNormalization((0.6, 1.4)),
iaa.Affine(scale=(0.7, 1.3), translate_percent=(-0.1, 0.1), rotate=(-20, 20),
shear=(-20, 20), order=ia.ALL, mode=ia.ALL, cval=(0, 255)),
iaa.PiecewiseAffine(scale=(0.1, 0.3)),
iaa.ElasticTransformation(alpha=0.5)
]
nb_iterations = 100
image = ia.quokka(size=(64, 64))
batch = ia.Batch(images=np.array([image]), keypoints=keypoints)
batches = [ia.Batch(images=[np.copy(image)], keypoints=[keypoints[0].deepcopy()])
for _ in sm.xrange(nb_iterations)]
for aug in augs:
nb_changed = 0
batches_aug = list(aug.augment_batches(batches, background=True))
for batch_aug in batches_aug:
image_aug = batch_aug.images_aug[0]
if image.shape != image_aug.shape or not np.array_equal(image, image_aug):
nb_changed += 1
if nb_changed > 10:
break
if "-nochange" not in aug.name:
assert nb_changed > 0
else:
assert nb_changed == 0
"""
def test_determinism():
reseed()
images = [
ia.quokka(size=(128, 128)),
ia.quokka(size=(64, 64)),
misc.imresize(data.astronaut(), (128, 256))
]
keypoints = [
ia.KeypointsOnImage([
ia.Keypoint(x=20, y=10), ia.Keypoint(x=5, y=5), ia.Keypoint(x=10, y=43)
], shape=(50, 60, 3))
]
augs = [
iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.SomeOf(1, [iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.OneOf([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.Sometimes(1.0, iaa.Fliplr(1)),
iaa.WithColorspace("HSV", children=iaa.Add((-50, 50))),
iaa.WithChannels([0], iaa.Add((-50, 50))),
iaa.Noop(name="Noop-nochange"),
iaa.Lambda(
func_images=lambda images, random_state, parents, hooks: images,
func_heatmaps=lambda heatmaps, random_state, parents, hooks: heatmaps,
func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: keypoints_on_images,
name="Lambda-nochange"
),
iaa.AssertLambda(
func_images=lambda images, random_state, parents, hooks: True,
func_heatmaps=lambda heatmaps, random_state, parents, hooks: True,
func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: True,
name="AssertLambda-nochange"
),
iaa.AssertShape(
(None, None, None, 3),
check_keypoints=False,
name="AssertShape-nochange"
),
iaa.Scale((0.5, 0.9)),
iaa.CropAndPad(px=(-50, 50)),
iaa.Pad(px=(1, 50)),
iaa.Crop(px=(1, 50)),
iaa.Fliplr(1.0),
iaa.Flipud(1.0),
iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128)),
iaa.ChangeColorspace(to_colorspace="GRAY"),
iaa.Grayscale(alpha=(0.1, 1.0)),
iaa.GaussianBlur(1.0),
iaa.AverageBlur(5),
iaa.MedianBlur(5),
iaa.Convolve(np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])),
iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0.8, 1.2)),
iaa.Emboss(alpha=(0.1, 1.0), strength=(0.8, 1.2)),
iaa.EdgeDetect(alpha=(0.1, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0.0, 1.0)),
iaa.Add((-50, 50)),
iaa.AddElementwise((-50, 50)),
iaa.AdditiveGaussianNoise(scale=(0.1, 1.0)),
iaa.Multiply((0.6, 1.4)),
iaa.MultiplyElementwise((0.6, 1.4)),
iaa.Dropout((0.3, 0.5)),
iaa.CoarseDropout((0.3, 0.5), size_percent=(0.05, 0.2)),
iaa.Invert(0.5),
iaa.ContrastNormalization((0.6, 1.4)),
iaa.Affine(scale=(0.7, 1.3), translate_percent=(-0.1, 0.1),
rotate=(-20, 20), shear=(-20, 20), order=ia.ALL,
mode=ia.ALL, cval=(0, 255)),
iaa.PiecewiseAffine(scale=(0.1, 0.3)),
iaa.ElasticTransformation(alpha=0.5)
]
for aug in augs:
aug_det = aug.to_deterministic()
images_aug1 = aug_det.augment_images(images)
images_aug2 = aug_det.augment_images(images)
kps_aug1 = aug_det.augment_keypoints(keypoints)
kps_aug2 = aug_det.augment_keypoints(keypoints)
assert array_equal_lists(images_aug1, images_aug2), \
"Images not identical for %s" % (aug.name,)
assert keypoints_equal(kps_aug1, kps_aug2), \
"Keypoints not identical for %s" % (aug.name,)
def test_keypoint_augmentation():
ia.seed(1)
keypoints = []
for y in range(40//5):
for x in range(60//5):
keypoints.append(ia.Keypoint(y=y*5, x=x*5))
keypoints_oi = ia.KeypointsOnImage(keypoints, shape=(40, 60, 3))
augs = [
iaa.Add((-5, 5), name="Add"),
iaa.AddElementwise((-5, 5), name="AddElementwise"),
iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"),
iaa.Multiply((0.95, 1.05), name="Multiply"),
iaa.Dropout(0.01, name="Dropout"),
iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
iaa.Invert(0.01, per_channel=True, name="Invert"),
iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
iaa.AverageBlur((3, 5), name="AverageBlur"),
iaa.MedianBlur((3, 5), name="MedianBlur"),
#iaa.BilateralBlur((3, 5), name="BilateralBlur"),
# WithColorspace ?
#iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"),
# ChangeColorspace ?
# Grayscale cannot be tested, input not RGB
# Convolve ?
iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0, name="DirectedEdgeDetect"),
iaa.Fliplr(0.5, name="Fliplr"),
iaa.Flipud(0.5, name="Flipud"),
iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"),
iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
iaa.Affine(shear=(-20, 20), name="Affine-shear"),
iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
#iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"),
iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2), name="ElasticTransformation"),
# Sequential
# SomeOf
# OneOf
# Sometimes
# WithChannels
# Noop
# Lambda
# AssertLambda
# AssertShape
iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10),
name="SimplexNoiseAlpha"),
iaa.Superpixels(p_replace=0.01, n_segments=64),
iaa.Scale(0.5, name="Scale"),
iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
iaa.Pad(px=(0, 10), name="Pad"),
iaa.Crop(px=(0, 10), name="Crop")
]
for aug in augs:
#if aug.name != "PiecewiseAffine":
# continue
dss = []
for i in range(10):
aug_det = aug.to_deterministic()
kp_image = keypoints_oi.to_keypoint_image(size=5)
kp_image_aug = aug_det.augment_image(kp_image)
kp_image_aug_rev = ia.KeypointsOnImage.from_keypoint_image(
kp_image_aug,
if_not_found_coords={"x": -9999, "y": -9999},
nb_channels=1
)
kp_aug = aug_det.augment_keypoints([keypoints_oi])[0]
ds = []
assert len(kp_image_aug_rev.keypoints) == len(kp_aug.keypoints),\
"Lost keypoints for '%s' (%d vs expected %d)" \
% (aug.name, len(kp_aug.keypoints), len(kp_image_aug_rev.keypoints))
for kp_pred, kp_pred_img in zip(kp_aug.keypoints, kp_image_aug_rev.keypoints):
kp_pred_lost = (kp_pred.x == -9999 and kp_pred.y == -9999)
kp_pred_img_lost = (kp_pred_img.x == -9999 and kp_pred_img.y == -9999)
#if kp_pred_lost and not kp_pred_img_lost:
# print("lost kp_pred", kp_pred_img)
#elif not kp_pred_lost and kp_pred_img_lost:
# print("lost kp_pred_img", kp_pred)
#elif kp_pred_lost and kp_pred_img_lost:
# print("lost both keypoints")
if not kp_pred_lost and not kp_pred_img_lost:
d = np.sqrt((kp_pred.x - kp_pred_img.x) ** 2
+ (kp_pred.y - kp_pred_img.y) ** 2)
ds.append(d)
#print(aug.name, np.average(ds), ds)
dss.extend(ds)
if len(ds) == 0:
print("[INFO] No valid keypoints found for '%s' "
"in test_keypoint_augmentation()" % (str(aug),))
assert np.average(dss) < 5.0, \
"Average distance too high (%.2f, with ds: %s)" \
% (np.average(dss), str(dss))
def test_unusual_channel_numbers():
ia.seed(1)
images = [
(0, create_random_images((4, 16, 16))),
(1, create_random_images((4, 16, 16, 1))),
(2, create_random_images((4, 16, 16, 2))),
(4, create_random_images((4, 16, 16, 4))),
(5, create_random_images((4, 16, 16, 5))),
(10, create_random_images((4, 16, 16, 10))),
(20, create_random_images((4, 16, 16, 20)))
]
augs = [
iaa.Add((-5, 5), name="Add"),
iaa.AddElementwise((-5, 5), name="AddElementwise"),
iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"),
iaa.Multiply((0.95, 1.05), name="Multiply"),
iaa.Dropout(0.01, name="Dropout"),
iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
iaa.Invert(0.01, per_channel=True, name="Invert"),
iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
iaa.AverageBlur((3, 5), name="AverageBlur"),
iaa.MedianBlur((3, 5), name="MedianBlur"),
#iaa.BilateralBlur((3, 5), name="BilateralBlur"), # works only with 3/RGB channels
# WithColorspace ?
#iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"), # works only with 3/RGB channels
# ChangeColorspace ?
#iaa.Grayscale((0.0, 0.1), name="Grayscale"), # works only with 3 channels
# Convolve ?
iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0,
name="DirectedEdgeDetect"),
iaa.Fliplr(0.5, name="Fliplr"),
iaa.Flipud(0.5, name="Flipud"),
iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"),
iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
iaa.Affine(shear=(-20, 20), name="Affine-shear"),
iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"),
iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2),
name="ElasticTransformation"),
iaa.Sequential([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]),
iaa.SomeOf(1, [iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]),
iaa.OneOf(iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))),
iaa.Sometimes(0.5, iaa.Add((-5, 5)), name="Sometimes"),
# WithChannels
iaa.Noop(name="Noop"),
# Lambda
# AssertLambda
# AssertShape
iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10),
name="SimplexNoiseAlpha"),
iaa.Superpixels(p_replace=0.01, n_segments=64),
iaa.Scale({"height": 4, "width": 4}, name="Scale"),
iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
iaa.Pad(px=(0, 10), name="Pad"),
iaa.Crop(px=(0, 10), name="Crop")
]
for aug in augs:
for (nb_channels, images_c) in images:
#print("shape", images_c.shape, aug.name)
if aug.name != "Scale":
images_aug = aug.augment_images(images_c)
assert images_aug.shape == images_c.shape
image_aug = aug.augment_image(images_c[0])
assert image_aug.shape == images_c[0].shape
else:
images_aug = aug.augment_images(images_c)
image_aug = aug.augment_image(images_c[0])
if images_c.ndim == 3:
assert images_aug.shape == (4, 4, 4)
assert image_aug.shape == (4, 4)
else:
assert images_aug.shape == (4, 4, 4, images_c.shape[3])
assert image_aug.shape == (4, 4, images_c.shape[3])
#@attr("now")
def test_dtype_preservation():
ia.seed(1)
size = (4, 16, 16, 3)
images = [
np.random.uniform(0, 255, size).astype(np.uint8),
np.random.uniform(0, 65535, size).astype(np.uint16),
np.random.uniform(0, 4294967295, size).astype(np.uint32), # not supported by cv2.blur in AverageBlur
np.random.uniform(-128, 127, size).astype(np.int16),
np.random.uniform(-32768, 32767, size).astype(np.int32),
np.random.uniform(0.0, 1.0, size).astype(np.float32),
np.random.uniform(-1000.0, 1000.0, size).astype(np.float16), # not supported by scipy.ndimage.filter in GaussianBlur
np.random.uniform(-1000.0, 1000.0, size).astype(np.float32),
np.random.uniform(-1000.0, 1000.0, size).astype(np.float64)
]
default_dtypes = set([arr.dtype for arr in images])
# Some dtypes are here removed per augmenter, because the respective
# augmenter does not support them. This test currently only checks whether
# dtypes are preserved from in- to output for all dtypes that are supported
# per augmenter.
# dtypes are here removed via list comprehension instead of
# `default_dtypes - set([dtype])`, because the latter one simply never
# removed the dtype(s) for some reason?!
augs = [
(iaa.Add((-5, 5), name="Add"), default_dtypes),
(iaa.AddElementwise((-5, 5), name="AddElementwise"), default_dtypes),
(iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"), default_dtypes),
(iaa.Multiply((0.95, 1.05), name="Multiply"), default_dtypes),
(iaa.Dropout(0.01, name="Dropout"), default_dtypes),
(iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"), default_dtypes),
(iaa.Invert(0.01, per_channel=True, name="Invert"), default_dtypes),
(iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"), default_dtypes),
(iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"), [dt for dt in default_dtypes if dt not in [np.float16]]),
(iaa.AverageBlur((3, 5), name="AverageBlur"), [dt for dt in default_dtypes if dt not in [np.uint32, np.float16]]),
(iaa.MedianBlur((3, 5), name="MedianBlur"), [dt for dt in default_dtypes if dt not in [np.uint32, np.int32, np.float16, np.float64]]),
(iaa.BilateralBlur((3, 5), name="BilateralBlur"), [dt for dt in default_dtypes if dt not in [np.uint16, np.uint32, np.int16, np.int32, np.float16, np.float64]]),
# WithColorspace ?
#iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"), # works only with RGB/uint8
# ChangeColorspace ?
#iaa.Grayscale((0.0, 0.1), name="Grayscale"), # works only with RGB/uint8
# Convolve ?
(iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"), [dt for dt in default_dtypes if dt not in [np.uint32, np.int32, np.float16, np.uint32]]),
(iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"), [dt for dt in default_dtypes if dt not in [np.uint32, np.int32, np.float16, np.uint32]]),
(iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"), [dt for dt in default_dtypes if dt not in [np.uint32, np.int32, np.float16, np.uint32]]),
(iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0, name="DirectedEdgeDetect"), [dt for dt in default_dtypes if dt not in [np.uint32, np.int32, np.float16, np.uint32]]),
(iaa.Fliplr(0.5, name="Fliplr"), default_dtypes),
(iaa.Flipud(0.5, name="Flipud"), default_dtypes),
(iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"), default_dtypes),
(iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"), default_dtypes),
(iaa.Affine(rotate=(-20, 20), name="Affine-rotate"), default_dtypes),
(iaa.Affine(shear=(-20, 20), name="Affine-shear"), default_dtypes),
(iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"), default_dtypes),
(iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"), default_dtypes),
#(iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"), [dt for dt in default_dtypes if dt not in [np.uint32]]),
(iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2), name="ElasticTransformation"), [dt for dt in default_dtypes if dt not in [np.float16]]),
(iaa.Sequential([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]), default_dtypes),
(iaa.SomeOf(1, [iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]), default_dtypes),
(iaa.OneOf(iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))), default_dtypes),
(iaa.Sometimes(0.5, iaa.Add((-5, 5)), name="Sometimes"), default_dtypes),
# WithChannels
(iaa.Noop(name="Noop"), default_dtypes),
# Lambda
# AssertLambda
# AssertShape
(iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"), default_dtypes),
(iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"), default_dtypes),
(iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"), default_dtypes),
(iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10), name="SimplexNoiseAlpha"), default_dtypes),
(iaa.Superpixels(p_replace=0.01, n_segments=64), [dt for dt in default_dtypes if dt not in [np.float16, np.float32]]),
(iaa.Scale({"height": 4, "width": 4}, name="Scale"), [dt for dt in default_dtypes if dt not in [np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64]]),
(iaa.CropAndPad(px=(-10, 10), name="CropAndPad"), [dt for dt in default_dtypes if dt not in [np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64]]),
(iaa.Pad(px=(0, 10), name="Pad"), [dt for dt in default_dtypes if dt not in [np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64]]),
(iaa.Crop(px=(0, 10), name="Crop"), [dt for dt in default_dtypes if dt not in [np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64]])
]
for (aug, allowed_dtypes) in augs:
#print(aug.name, allowed_dtypes)
for images_i in images:
if images_i.dtype in allowed_dtypes:
#print("shape", images_i.shape, images_i.dtype, aug.name)
images_aug = aug.augment_images(images_i)
#assert images_aug.shape == images_i.shape
assert images_aug.dtype == images_i.dtype
else:
#print("Skipped dtype %s for augmenter %s" % (images_i.dtype, aug.name))
pass
def test_parameters_handle_continuous_param():
# value without value range
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test1]", value_range=None, tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test1]" in str(e)
assert got_exception == False
# value without value range as (None, None)
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test1b]", value_range=(None, None), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test1b]" in str(e)
assert got_exception == False
# stochastic parameter
got_exception = False
try:
result = iap.handle_continuous_param(iap.Deterministic(1), "[test2]", value_range=None, tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test2]" in str(e)
assert got_exception == False
# value within value range
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test3]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test3]" in str(e)
assert got_exception == False
# value outside of value range
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test4]", value_range=(2, 12), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test4]" in str(e)
assert got_exception == True
# value within value range (without lower bound)
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test5]", value_range=(None, 12), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test5]" in str(e)
assert got_exception == False
# value outside of value range (without lower bound)
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test6]", value_range=(None, 0), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test6]" in str(e)
assert got_exception == True
# value within value range (without upper bound)
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test7]", value_range=(-1, None), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test7]" in str(e)
assert got_exception == False
# value outside of value range (without upper bound)
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test8]", value_range=(2, None), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test8]" in str(e)
assert got_exception == True
# tuple as value, but no tuples allowed
got_exception = False
try:
result = iap.handle_continuous_param((1, 2), "[test9]", value_range=None, tuple_to_uniform=False, list_to_choice=True)
assert isinstance(result, iap.Uniform)
except Exception as e:
got_exception = True
assert "[test9]" in str(e)
assert got_exception == True
# tuple as value and tuple allowed
got_exception = False
try:
result = iap.handle_continuous_param((1, 2), "[test10]", value_range=None, tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Uniform)
except Exception as e:
got_exception = True
assert "[test10]" in str(e)
assert got_exception == False
# tuple as value and tuple allowed and tuple within value range
got_exception = False
try:
result = iap.handle_continuous_param((1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Uniform)
except Exception as e:
got_exception = True
assert "[test11]" in str(e)
assert got_exception == False
# tuple as value and tuple allowed and tuple partially outside of value range
got_exception = False
try:
result = iap.handle_continuous_param((1, 2), "[test12]", value_range=(1.5, 13), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Uniform)
except Exception as e:
got_exception = True
assert "[test12]" in str(e)
assert got_exception == True
# tuple as value and tuple allowed and tuple fully outside of value range
got_exception = False
try:
result = iap.handle_continuous_param((1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Uniform)
except Exception as e:
got_exception = True
assert "[test13]" in str(e)
assert got_exception == True
# list as value, but no list allowed
got_exception = False
try:
result = iap.handle_continuous_param([1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True, list_to_choice=False)
assert isinstance(result, iap.Choice)
except Exception as e:
got_exception = True
assert "[test14]" in str(e)
assert got_exception == True
# list as value and list allowed
got_exception = False
try:
result = iap.handle_continuous_param([1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Choice)
except Exception as e:
got_exception = True
assert "[test15]" in str(e)
assert got_exception == False
# list as value and list allowed and list partially outside of value range
got_exception = False
try:
result = iap.handle_continuous_param([1, 2], "[test16]", value_range=(1.5, 13), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Choice)
except Exception as e:
got_exception = True
assert "[test16]" in str(e)
assert got_exception == True
# list as value and list allowed and list fully outside of value range
got_exception = False
try:
result = iap.handle_continuous_param([1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True, list_to_choice=True)
assert isinstance(result, iap.Choice)
except Exception as e:
got_exception = True
assert "[test17]" in str(e)
assert got_exception == True
# single value within value range given as callable
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test18]", value_range=lambda x: -1 < x < 1, tuple_to_uniform=True, list_to_choice=True)
except Exception as e:
got_exception = True
assert "[test18]" in str(e)
assert got_exception == False
# bad datatype for value range
got_exception = False
try:
result = iap.handle_continuous_param(1, "[test19]", value_range=False, tuple_to_uniform=True, list_to_choice=True)
except Exception as e:
got_exception = True
assert "Unexpected input for value_range" in str(e)
assert got_exception == True
def test_parameters_handle_discrete_param():
# float value without value range when no float value is allowed
got_exception = False
try:
result = iap.handle_discrete_param(1.5, "[test0]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test0]" in str(e)
assert got_exception == True
# value without value range
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test1]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test1]" in str(e)
assert got_exception == False
# value without value range as (None, None)
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test1b]", value_range=(None, None), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test1b]" in str(e)
assert got_exception == False
# stochastic parameter
got_exception = False
try:
result = iap.handle_discrete_param(iap.Deterministic(1), "[test2]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test2]" in str(e)
assert got_exception == False
# value within value range
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test3]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test3]" in str(e)
assert got_exception == False
# value outside of value range
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test4]", value_range=(2, 12), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test4]" in str(e)
assert got_exception == True
# value within value range (without lower bound)
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test5]", value_range=(None, 12), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test5]" in str(e)
assert got_exception == False
# value outside of value range (without lower bound)
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test6]", value_range=(None, 0), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test6]" in str(e)
assert got_exception == True
# value within value range (without upper bound)
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test7]", value_range=(-1, None), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test7]" in str(e)
assert got_exception == False
# value outside of value range (without upper bound)
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test8]", value_range=(2, None), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Deterministic)
except Exception as e:
got_exception = True
assert "[test8]" in str(e)
assert got_exception == True
# tuple as value, but no tuples allowed
got_exception = False
try:
result = iap.handle_discrete_param((1, 2), "[test9]", value_range=None, tuple_to_uniform=False, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.DiscreteUniform)
except Exception as e:
got_exception = True
assert "[test9]" in str(e)
assert got_exception == True
# tuple as value and tuple allowed
got_exception = False
try:
result = iap.handle_discrete_param((1, 2), "[test10]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.DiscreteUniform)
except Exception as e:
got_exception = True
assert "[test10]" in str(e)
assert got_exception == False
# tuple as value and tuple allowed and tuple within value range
got_exception = False
try:
result = iap.handle_discrete_param((1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.DiscreteUniform)
except Exception as e:
got_exception = True
assert "[test11]" in str(e)
assert got_exception == False
# tuple as value and tuple allowed and tuple within value range with allow_floats=False
got_exception = False
try:
result = iap.handle_discrete_param((1, 2), "[test11b]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
assert isinstance(result, iap.DiscreteUniform)
except Exception as e:
got_exception = True
assert "[test11b]" in str(e)
assert got_exception == False
# tuple as value and tuple allowed and tuple partially outside of value range
got_exception = False
try:
result = iap.handle_discrete_param((1, 3), "[test12]", value_range=(2, 13), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.DiscreteUniform)
except Exception as e:
got_exception = True
assert "[test12]" in str(e)
assert got_exception == True
# tuple as value and tuple allowed and tuple fully outside of value range
got_exception = False
try:
result = iap.handle_discrete_param((1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.DiscreteUniform)
except Exception as e:
got_exception = True
assert "[test13]" in str(e)
assert got_exception == True
# list as value, but no list allowed
got_exception = False
try:
result = iap.handle_discrete_param([1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True, list_to_choice=False, allow_floats=True)
assert isinstance(result, iap.Choice)
except Exception as e:
got_exception = True
assert "[test14]" in str(e)
assert got_exception == True
# list as value and list allowed
got_exception = False
try:
result = iap.handle_discrete_param([1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Choice)
except Exception as e:
got_exception = True
assert "[test15]" in str(e)
assert got_exception == False
# list as value and list allowed and list partially outside of value range
got_exception = False
try:
result = iap.handle_discrete_param([1, 3], "[test16]", value_range=(2, 13), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Choice)
except Exception as e:
got_exception = True
assert "[test16]" in str(e)
assert got_exception == True
# list as value and list allowed and list fully outside of value range
got_exception = False
try:
result = iap.handle_discrete_param([1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
assert isinstance(result, iap.Choice)
except Exception as e:
got_exception = True
assert "[test17]" in str(e)
assert got_exception == True
# single value within value range given as callable
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test18]", value_range=lambda x: -1 < x < 1, tuple_to_uniform=True, list_to_choice=True)
except Exception as e:
got_exception = True
assert "[test18]" in str(e)
assert got_exception == False
# bad datatype for value range
got_exception = False
try:
result = iap.handle_discrete_param(1, "[test19]", value_range=False, tuple_to_uniform=True, list_to_choice=True)
except Exception as e:
got_exception = True
assert "Unexpected input for value_range" in str(e)
assert got_exception == True
def test_parameters_handle_probability_param():
for val in [True, False, 0, 1, 0.0, 1.0]:
p = iap.handle_probability_param(val, "[test1]")
assert isinstance(p, iap.Deterministic)
assert p.value == int(val)
for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]:
p = iap.handle_probability_param(val, "[test2]")
assert isinstance(p, iap.Binomial)
assert isinstance(p.p, iap.Deterministic)
assert val-1e-8 < p.p.value < val+1e-8
det = iap.Deterministic(1)
p = iap.handle_probability_param(det, "[test3]")
assert p == det
got_exception = False
try:
p = iap.handle_probability_param("test", "[test4]")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
p = iap.handle_probability_param(-0.01, "[test5]")
except AssertionError:
got_exception = True
assert got_exception
got_exception = False
try:
p = iap.handle_probability_param(1.01, "[test6]")
except AssertionError:
got_exception = True
assert got_exception
def test_parameters_force_np_float_dtype():
dtypes = [
(np.float16, np.float16),
(np.float32, np.float32),
(np.float64, np.float64),
(np.uint8, np.float64),
(np.int32, np.float64)
]
for i, (dtype_in, dtype_out) in enumerate(dtypes):
assert iap.force_np_float_dtype(np.zeros((1,), dtype=dtype_in)).dtype == dtype_out,\
"force_np_float_dtype() failed at %d" % (i,)
def test_parameters_both_np_float_if_one_is_float():
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.float32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.type == np.float16, a2.dtype.type
assert b2.dtype.type == np.float32, b2.dtype.type
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.int32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.type == np.float16, a2.dtype.type
assert b2.dtype.type == np.float64, b2.dtype.type
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.float16)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.type == np.float64, a2.dtype.type
assert b2.dtype.type == np.float16, b2.dtype.type
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.uint8)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.type == np.float64, a2.dtype.type
assert b2.dtype.type == np.float64, b2.dtype.type
def test_parameters_draw_distribution_grid():
params = [iap.Deterministic(1), iap.Uniform(0, 1.0)]
graph1 = params[0].draw_distribution_graph(size=(100000,))
graph2 = params[1].draw_distribution_graph(size=(100000,))
graph1_rs = ia.imresize_many_images(np.array([graph1]), sizes=(100, 100))[0]
graph2_rs = ia.imresize_many_images(np.array([graph2]), sizes=(100, 100))[0]
grid_expected = ia.draw_grid([graph1_rs, graph2_rs])
grid_observed = iap.draw_distributions_grid(
params,
rows=None,
cols=None,
graph_sizes=(100, 100),
sample_sizes=[(100000,), (100000,)],
titles=None
)
diff = np.abs(grid_expected.astype(np.int32) - grid_observed.astype(np.int32))
#from scipy import misc
#misc.imshow(np.vstack([grid_expected, grid_observed, diff]))
#print(diff.flatten()[0:100])
assert np.average(diff) < 10
def test_parameters_draw_distribution_graph():
# this test is very rough as we get a not-very-well-defined image out of the function
param = iap.Uniform(0.0, 1.0)
graph_img = param.draw_distribution_graph(title=None, size=(10000,), bins=100)
assert graph_img.ndim == 3
assert graph_img.shape[2] == 3
# at least 10% of the image should be white-ish (background)
nb_white = np.sum(graph_img[..., :] > [200, 200, 200])
nb_all = np.prod(graph_img.shape)
assert nb_white > 0.1 * nb_all
graph_img_title = param.draw_distribution_graph(title="test", size=(10000,), bins=100)
assert graph_img_title.ndim == 3
assert graph_img_title.shape[2] == 3
assert not np.array_equal(graph_img_title, graph_img)
def test_parameters_Biomial():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
assert param.__str__() == param.__repr__() == "Binomial(Deterministic(int 0))"
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
assert param.__str__() == param.__repr__() == "Binomial(Deterministic(float 1.00000000))"
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000))
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (0.25 - 0.05 < p < 0.25 + 0.05) or (0.75 - 0.05 < p < 0.75 + 0.05)
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
def test_parameters_Choice():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(np.logical_or(np.logical_or(samples == 0, samples == 1), samples==2))
assert param.__str__() == param.__repr__() == "Choice(a=[0, 1, 2], replace=True, p=None)"
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert expected - expected_tolerance < count < expected + expected_tolerance
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.2 - eps < sample < -1.2 + eps or 1.7 - eps < sample < 1.7 + eps
assert np.all(
np.logical_or(
np.logical_and(-1.2 - eps < samples, samples < -1.2 + eps),
np.logical_and(1.7 - eps < samples, samples < 1.7 + eps)
)
)
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
got_exception = False
try:
param = iap.Choice(123)
except Exception as exc:
assert "Expected a to be an iterable" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param = iap.Choice([1, 2], p=123)
except Exception as exc:
assert "Expected p to be" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param = iap.Choice([1, 2], p=[1])
except Exception as exc:
assert "Expected lengths of" in str(exc)
got_exception = True
assert got_exception
def test_parameters_DiscreteUniform():
reseed()
eps = np.finfo(np.float32).eps
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(np.logical_or(np.logical_or(samples == 0, samples == 1), samples==2))
assert param.__str__() == param.__repr__() == "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert expected - expected_tolerance < count < expected + expected_tolerance
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(np.logical_or(np.logical_or(samples == -1, samples == 0), samples==1))
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(np.logical_or(np.logical_or(samples == -1, samples == 0), samples==1))
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(np.logical_or(np.logical_or(samples == -1, samples == 0), samples==1))
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
param = iap.Uniform(-1, 1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
def test_parameters_Poisson():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Poisson(1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).poisson(lam=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert 0 < sample
assert param.__str__() == param.__repr__() == "Poisson(Deterministic(int 1))"
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = np.sum(samples_direct == i)
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
def test_parameters_Normal():
reseed()
param = iap.Normal(0, 1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).normal(loc=0, scale=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert param.__str__() == param.__repr__() == "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.Normal(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
param1 = iap.Normal(0, 1)
param2 = iap.Normal(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert 100 - 10 < np.std(samples2) < 100 + 10
param = iap.Normal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_Laplace():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Laplace(0, 1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).laplace(loc=0, scale=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert param.__str__() == param.__repr__() == "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))"
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.Laplace(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
param1 = iap.Laplace(0, 1)
param2 = iap.Laplace(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
param1 = iap.Laplace(1, 0)
samples = param1.draw_samples((100,))
assert np.all(np.logical_and(
samples > 1 - eps,
samples < 1 + eps
))
param = iap.Laplace(0, 1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_ChiSquare():
reseed()
param = iap.ChiSquare(1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).chisquare(df=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert 0 <= sample
assert np.all(0 <= samples)
assert param.__str__() == param.__repr__() == "ChiSquare(df=Deterministic(int 1))"
samples = np.clip(samples, 0, 3)
samples_direct = np.clip(samples_direct, 0, 3)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(0, 3.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.ChiSquare(iap.Choice([1, 10]))
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if 1 - 1.0 < exp < 1 + 1.0:
seen[0] += 1
elif 10 - 4.0 < exp < 10 + 4.0:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
param1 = iap.ChiSquare(1)
param2 = iap.ChiSquare(10)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0
assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0
param = iap.ChiSquare(1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_Weibull():
reseed()
param = iap.Weibull(1)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).weibull(a=1, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert 0 <= sample
assert np.all(0 <= samples)
assert param.__str__() == param.__repr__() == "Weibull(a=Deterministic(int 1))"
samples = np.clip(samples, 0, 2)
samples_direct = np.clip(samples_direct, 0, 2)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(0, 2.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.Weibull(iap.Choice([1, 0.5]))
expected_first = scipy.special.gamma(1 + 1/1)
expected_second = scipy.special.gamma(1 + 1/0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((50000,))
observed = np.mean(samples)
if expected_first - 0.2 * expected_first < observed < expected_first + 0.2 * expected_first:
seen[0] += 1
elif expected_second - 0.2 * expected_second < observed < expected_second + 0.2 * expected_second:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
param1 = iap.Weibull(1)
param2 = iap.Weibull(0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
assert np.var(samples1) < np.var(samples2)
expected_first = scipy.special.gamma(1 + 2/1) - (scipy.special.gamma(1 + 1/1))**2
expected_second = scipy.special.gamma(1 + 2/0.5) - (scipy.special.gamma(1 + 1/0.5))**2
assert expected_first - 0.2 * expected_first < np.var(samples1) < expected_first + 0.2 * expected_first
assert expected_second - 0.2 * expected_second < np.var(samples2) < expected_second + 0.2 * expected_second
param = iap.Weibull(1)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_Uniform():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Uniform(0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(0 - eps < samples, samples < 1.0 + eps))
assert param.__str__() == param.__repr__() == "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))"
samples = param.draw_samples((10000,))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0), density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / samples.size
assert density_expected - density_tolerance < density < density_expected + density_tolerance
param = iap.Uniform(-1.0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(-1.0 - eps < samples, samples < 1.0 + eps))
param = iap.Uniform(1.0, -1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(-1.0 - eps < samples, samples < 1.0 + eps))
param = iap.Uniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(-1.0 - eps < samples, samples < 1.0 + eps))
param = iap.Uniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 1.0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(1.0 - eps < samples, samples < 1.0 + eps))
param = iap.Uniform(-1.0, 1.0)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_Beta():
def _mean(alpha, beta):
return alpha / (alpha + beta)
def _var(alpha, beta):
return (alpha * beta) / ((alpha + beta)**2 * (alpha + beta + 1))
reseed()
eps = np.finfo(np.float32).eps
param = iap.Beta(0.5, 0.5)
sample = param.draw_sample()
samples = param.draw_samples((100, 1000))
samples_direct = np.random.RandomState(1234).beta(a=0.5, b=0.5, size=(100, 1000))
assert sample.shape == tuple()
assert samples.shape == (100, 1000)
assert 0 - eps < sample < 1.0 + eps
assert np.all(np.logical_and(0 - eps <= samples, samples <= 1.0 + eps))
assert param.__str__() == param.__repr__() == "Beta(Deterministic(float 0.50000000), Deterministic(float 0.50000000))"
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0), density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert density_direct - tolerance < density < density_direct + tolerance
param = iap.Beta(iap.Choice([0.5, 2]), 0.5)
expected_first = _mean(0.5, 0.5)
expected_second = _mean(2, 0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((10000,))
observed = np.mean(samples)
if expected_first - 0.05 < observed < expected_first + 0.05:
seen[0] += 1
elif expected_second - 0.05 < observed < expected_second + 0.05:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
param1 = iap.Beta(2, 2)
param2 = iap.Beta(0.5, 0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
assert np.var(samples1) < np.var(samples2)
expected_first = _var(2, 2)
expected_second = _var(0.5, 0.5)
assert expected_first - 0.1 * expected_first < np.var(samples1) < expected_first + 0.1 * expected_first
assert expected_second - 0.1 * expected_second < np.var(samples2) < expected_second + 0.1 * expected_second
param = iap.Beta(0.5, 0.5)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
def test_parameters_Deterministic():
reseed()
eps = np.finfo(np.float32).eps
values_int = [-100, -54, -1, 0, 1, 54, 100]
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for value in values_int:
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert sample1 == sample2
samples1 = param.draw_samples(10)
samples2 = param.draw_samples(10)
samples3 = param.draw_samples((5, 3))
samples4 = param.draw_samples((5, 3))
samples5 = param.draw_samples((4, 5, 3))
samples6 = param.draw_samples((4, 5, 3))
samples1_unique = np.unique(samples1)
samples2_unique = np.unique(samples2)
samples3_unique = np.unique(samples3)
samples4_unique = np.unique(samples4)
samples5_unique = np.unique(samples5)
samples6_unique = np.unique(samples6)
assert samples1.shape == (10,)
assert samples2.shape == (10,)
assert samples3.shape == (5, 3)
assert samples4.shape == (5, 3)
assert samples5.shape == (4, 5, 3)
assert samples6.shape == (4, 5, 3)
assert len(samples1_unique) == 1 and samples1_unique[0] == value
assert len(samples2_unique) == 1 and samples2_unique[0] == value
assert len(samples3_unique) == 1 and samples3_unique[0] == value
assert len(samples4_unique) == 1 and samples4_unique[0] == value
assert len(samples5_unique) == 1 and samples5_unique[0] == value
assert len(samples6_unique) == 1 and samples6_unique[0] == value
rs1 = np.random.RandomState(123456)
rs2 = np.random.RandomState(123456)
assert np.array_equal(
param.draw_samples(20, random_state=rs1),
param.draw_samples(20, random_state=rs2)
)
for value in values_float:
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert sample1 - eps < sample2 < sample1 + eps
samples1 = param.draw_samples(10)
samples2 = param.draw_samples(10)
samples3 = param.draw_samples((5, 3))
samples4 = param.draw_samples((5, 3))
samples5 = param.draw_samples((4, 5, 3))
samples6 = param.draw_samples((4, 5, 3))
samples1_sorted = np.sort(samples1)
samples2_sorted = np.sort(samples2)
samples3_sorted = np.sort(samples3.flatten())
samples4_sorted = np.sort(samples4.flatten())
samples5_sorted = np.sort(samples5.flatten())
samples6_sorted = np.sort(samples6.flatten())
assert samples1.shape == (10,)
assert samples2.shape == (10,)
assert samples3.shape == (5, 3)
assert samples4.shape == (5, 3)
assert samples5.shape == (4, 5, 3)
assert samples6.shape == (4, 5, 3)
assert samples1_sorted[0] - eps < samples1_sorted[-1] < samples1_sorted[0] + eps
assert samples2_sorted[0] - eps < samples2_sorted[-1] < samples2_sorted[0] + eps
assert samples3_sorted[0] - eps < samples3_sorted[-1] < samples3_sorted[0] + eps
assert samples4_sorted[0] - eps < samples4_sorted[-1] < samples4_sorted[0] + eps
assert samples5_sorted[0] - eps < samples5_sorted[-1] < samples5_sorted[0] + eps
assert samples6_sorted[0] - eps < samples6_sorted[-1] < samples6_sorted[0] + eps
rs1 = np.random.RandomState(123456)
rs2 = np.random.RandomState(123456)
assert np.allclose(
param.draw_samples(20, random_state=rs1),
param.draw_samples(20, random_state=rs2)
)
param = iap.Deterministic(0)
assert param.__str__() == param.__repr__() == "Deterministic(int 0)"
param = iap.Deterministic(1.0)
assert param.__str__() == param.__repr__() == "Deterministic(float 1.00000000)"
param = iap.Deterministic("test")
assert param.__str__() == param.__repr__() == "Deterministic(test)"
seen = [0, 0]
for _ in sm.xrange(200):
param = iap.Deterministic(iap.Choice([0, 1]))
seen[param.value] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
got_exception = False
try:
param = iap.Deterministic([1, 2, 3])
except Exception as exc:
assert "Expected StochasticParameter object or number or string" in str(exc)
got_exception = True
assert got_exception
def test_parameters_FromLowerResolution():
reseed()
eps = np.finfo(np.float32).eps
# (H, W, C)
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 1))
assert samples.shape == (8, 8, 1)
uq = np.unique(samples)
assert len(uq) == 2 and (0 in uq or 1 in uq)
# (N, H, W, C)
samples_nhwc = param.draw_samples((1, 8, 8, 1))
assert samples_nhwc.shape == (1, 8, 8, 1)
uq = np.unique(samples_nhwc)
assert len(uq) == 2 and (0 in uq or 1 in uq)
# (N, H, W, C, something) causing error
got_exception = False
try:
samples_nhwcx = param.draw_samples((1, 8, 8, 1, 1))
except Exception as exc:
assert "FromLowerResolution can only generate samples of shape" in str(exc)
got_exception = True
assert got_exception
# C=3
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 3))
assert samples.shape == (8, 8, 3)
uq = np.unique(samples)
assert len(uq) == 2 and (0 in uq or 1 in uq)
# different sizes in px
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, neighbors=4, background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, neighbors=4, background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1]
# different sizes in px, one given as tuple (a, b)
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=(2, 16))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(400):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, neighbors=4, background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, neighbors=4, background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1]
# different sizes in px, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=iap.Deterministic(1))
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=iap.Choice([8, 16]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, neighbors=4, background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, neighbors=4, background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1]
# bad datatype for size_px
got_exception = False
try:
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# min_size
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=1, min_size=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, neighbors=4, background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, neighbors=4, background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1]
# different sizes in percent
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.01)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.8)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, neighbors=4, background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, neighbors=4, background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1]
# different sizes in percent, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=iap.Deterministic(0.01))
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=iap.Choice([0.4, 0.8]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, neighbors=4, background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, neighbors=4, background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1]
# bad datatype for size_percent
got_exception = False
try:
param = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# method given as StochasticParameter
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4, method=iap.Choice(["nearest", "linear"]))
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((16, 16, 1))
nb_in_between = np.sum(np.logical_and(samples < 0.95, samples > 0.05))
if nb_in_between == 0:
seen[0] += 1
else:
seen[1] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
# bad datatype for method
got_exception = False
try:
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4, method=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# multiple calls with same random_state
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
samples1 = param.draw_samples((10, 5, 1), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5, 1), random_state=np.random.RandomState(1234))
assert np.allclose(samples1, samples2)
# str / repr
param = iap.FromLowerResolution(other_param=iap.Deterministic(0), size_percent=1, method="nearest")
assert param.__str__() == param.__repr__() == "FromLowerResolution(size_percent=Deterministic(int 1), method=Deterministic(nearest), other_param=Deterministic(int 0))"
param = iap.FromLowerResolution(other_param=iap.Deterministic(0), size_px=1, method="nearest")
assert param.__str__() == param.__repr__() == "FromLowerResolution(size_px=Deterministic(int 1), method=Deterministic(nearest), other_param=Deterministic(int 0))"
def test_parameters_Clip():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Clip(iap.Deterministic(0), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
assert param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), -1.000000, 1.000000)"
param = iap.Clip(iap.Deterministic(1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
param = iap.Clip(iap.Deterministic(-1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
param = iap.Clip(iap.Deterministic(0.5), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 0.5 - eps < sample < 0.5 + eps
assert np.all(np.logical_and(0.5 - eps < samples, samples < 0.5 + eps))
param = iap.Clip(iap.Deterministic(2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
param = iap.Clip(iap.Deterministic(-2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
param = iap.Clip(iap.Deterministic(0), None, 1)
sample = param.draw_sample()
assert sample == 0
assert param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), None, 1.000000)"
param = iap.Clip(iap.Deterministic(0), 0, None)
sample = param.draw_sample()
assert sample == 0
assert param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), 0.000000, None)"
param = iap.Clip(iap.Deterministic(0), None, None)
sample = param.draw_sample()
assert sample == 0
assert param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), None, None)"
def test_parameters_Discretize():
reseed()
eps = np.finfo(np.float32).eps
values = [-100.2, -54.3, -1.0, -1, -0.7, -0.00043, 0, 0.00043, 0.7, 1.0, 1, 54.3, 100.2]
for value in values:
value_expected = np.round(np.float64([value])).astype(np.int32)[0]
param = iap.Discretize(iap.Deterministic(value))
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == value_expected
assert np.all(samples == value_expected)
param_orig = iap.DiscreteUniform(0, 1)
param = iap.Discretize(param_orig)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param_orig.draw_samples((10000,))
samples2 = param.draw_samples((10000,))
assert np.all(np.abs(samples1 - samples2) < 0.2*(10000/3))
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234))
assert np.array_equal(samples1, samples2)
param = iap.Discretize(iap.Deterministic(0))
assert param.__str__() == param.__repr__() == "Discretize(Deterministic(int 0))"
def test_parameters_Multiply():
reseed()
eps = np.finfo(np.float32).eps
values_int = [-100, -54, -1, 0, 1, 54, 100]
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1 in values_int:
for v2 in values_int:
p = iap.Multiply(iap.Deterministic(v1), v2)
assert p.draw_sample() == v1 * v2
samples = p.draw_samples((2, 3))
assert samples.dtype == np.int64
assert np.array_equal(samples, np.zeros((2, 3), dtype=np.int64) + v1 * v2)
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
assert p.draw_sample() == v1 * v2
samples = p.draw_samples((2, 3))
assert samples.dtype == np.int64
assert np.array_equal(samples, np.zeros((2, 3), dtype=np.int64) + v1 * v2)
for v1 in values_float:
for v2 in values_float:
p = iap.Multiply(iap.Deterministic(v1), v2)
assert v1 * v2 - eps < p.draw_sample() < v1 * v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 * v2)
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
assert v1 * v2 - eps < p.draw_sample() < v1 * v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 * v2)
param = iap.Multiply(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - eps)
assert np.all(samples < 1.0 * 2.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps
param = iap.Multiply(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - eps)
assert np.all(samples < 1.0 * 2.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - eps)
assert np.all(samples < 2.0 * 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - eps)
assert np.all(samples < 2.0 * 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Multiply(iap.Deterministic(0), 1, elementwise=False)
assert param.__str__() == param.__repr__() == "Multiply(Deterministic(int 0), Deterministic(int 1), False)"
def test_parameters_Divide():
reseed()
eps = np.finfo(np.float32).eps
values_int = [-100, -54, -1, 0, 1, 54, 100]
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1 in values_int:
for v2 in values_int:
if v2 == 0:
v2 = 1
p = iap.Divide(iap.Deterministic(v1), v2)
assert p.draw_sample() == v1 / v2
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.array_equal(samples, np.zeros((2, 3), dtype=np.float64) + v1 / v2)
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
assert p.draw_sample() == v1 / v2
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.array_equal(samples, np.zeros((2, 3), dtype=np.float64) + v1 / v2)
for v1 in values_float:
for v2 in values_float:
if v2 == 0:
v2 = 1
p = iap.Divide(iap.Deterministic(v1), v2)
assert v1 / v2 - eps < p.draw_sample() < v1 / v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 / v2)
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
assert v1 / v2 - eps < p.draw_sample() < v1 / v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 / v2)
param = iap.Divide(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 / 2.0 - eps)
assert np.all(samples < 1.0 / 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps
param = iap.Divide(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 / 2.0 - eps)
assert np.all(samples < 1.0 / 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Divide(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 / 1.0 - eps)
assert np.all(samples < 2.0 / 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Divide(iap.Deterministic(1), 0, elementwise=False)
sample = param.draw_sample()
assert sample == 1
param = iap.Divide(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 / 1.0 - eps)
assert np.all(samples < 2.0 / 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
# test division by zero automatically being converted to division by 1
param = iap.Divide(2, iap.Choice([0, 2]), elementwise=True)
samples = param.draw_samples((10, 20))
samples_unique = np.sort(np.unique(samples.flatten()))
assert samples_unique[0] == 1 and samples_unique[1] == 2
param = iap.Divide(iap.Deterministic(0), 1, elementwise=False)
assert param.__str__() == param.__repr__() == "Divide(Deterministic(int 0), Deterministic(int 1), False)"
def test_parameters_Add():
reseed()
eps = np.finfo(np.float32).eps
values_int = [-100, -54, -1, 0, 1, 54, 100]
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1 in values_int:
for v2 in values_int:
p = iap.Add(iap.Deterministic(v1), v2)
assert p.draw_sample() == v1 + v2
samples = p.draw_samples((2, 3))
assert samples.dtype == np.int64
assert np.array_equal(samples, np.zeros((2, 3), dtype=np.int64) + v1 + v2)
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
assert p.draw_sample() == v1 + v2
samples = p.draw_samples((2, 3))
assert samples.dtype == np.int64
assert np.array_equal(samples, np.zeros((2, 3), dtype=np.int64) + v1 + v2)
for v1 in values_float:
for v2 in values_float:
p = iap.Add(iap.Deterministic(v1), v2)
assert v1 + v2 - eps < p.draw_sample() < v1 + v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 + v2)
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
assert v1 + v2 - eps < p.draw_sample() < v1 + v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 + v2)
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 + 1.0 - eps)
assert np.all(samples < 1.0 + 2.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 + 1.0 - eps)
assert np.all(samples < 1.0 + 2.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 + 1.0 - eps)
assert np.all(samples < 2.0 + 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 + 1.0 - eps)
assert np.all(samples < 2.0 + 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Add(iap.Deterministic(0), 1, elementwise=False)
assert param.__str__() == param.__repr__() == "Add(Deterministic(int 0), Deterministic(int 1), False)"
def test_parameters_Subtract():
reseed()
eps = np.finfo(np.float32).eps
values_int = [-100, -54, -1, 0, 1, 54, 100]
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1 in values_int:
for v2 in values_int:
p = iap.Subtract(iap.Deterministic(v1), v2)
assert p.draw_sample() == v1 - v2
samples = p.draw_samples((2, 3))
assert samples.dtype == np.int64
assert np.array_equal(samples, np.zeros((2, 3), dtype=np.int64) + v1 - v2)
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
assert p.draw_sample() == v1 - v2
samples = p.draw_samples((2, 3))
assert samples.dtype == np.int64
assert np.array_equal(samples, np.zeros((2, 3), dtype=np.int64) + v1 - v2)
for v1 in values_float:
for v2 in values_float:
p = iap.Subtract(iap.Deterministic(v1), v2)
assert v1 - v2 - eps < p.draw_sample() < v1 - v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 - v2)
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
assert v1 - v2 - eps < p.draw_sample() < v1 - v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 - v2)
param = iap.Subtract(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - eps)
assert np.all(samples < 1.0 - 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps
param = iap.Subtract(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - eps)
assert np.all(samples < 1.0 - 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - eps)
assert np.all(samples < 2.0 - 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - eps)
assert np.all(samples < 2.0 - 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Subtract(iap.Deterministic(0), 1, elementwise=False)
assert param.__str__() == param.__repr__() == "Subtract(Deterministic(int 0), Deterministic(int 1), False)"
def test_parameters_Power():
reseed()
eps = np.finfo(np.float32).eps
values = [-100, -54, -1, 0, 1, 54, 100]
values = values + [float(v) for v in values]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for v1 in values:
for v2 in exponents:
if v1 < 0 and ia.is_single_float(v2):
continue
if v1 == 0 and v2 < 0:
continue
p = iap.Power(iap.Deterministic(v1), v2)
assert v1 ** v2 - eps < p.draw_sample() < v1 ** v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 ** v2)
p = iap.Power(iap.Deterministic(v1), iap.Deterministic(v2))
assert v1 ** v2 - eps < p.draw_sample() < v1 ** v2 + eps
samples = p.draw_samples((2, 3))
assert samples.dtype == np.float64
assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 ** v2)
param = iap.Power(iap.Deterministic(1.5), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - eps)
assert np.all(samples < 1.5 ** 2.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps
param = iap.Power(iap.Deterministic(1.5), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - eps)
assert np.all(samples < 1.5 ** 2.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - eps)
assert np.all(samples < 2.0 ** 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - eps)
assert np.all(samples < 2.0 ** 1.0 + eps)
samples_sorted = np.sort(samples.flatten())
assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps)
param = iap.Power(iap.Deterministic(0), 1, elementwise=False)
assert param.__str__() == param.__repr__() == "Power(Deterministic(int 0), Deterministic(int 1), False)"
def test_parameters_Absolute():
reseed()
eps = np.finfo(np.float32).eps
simple_values = [-1.5, -1, -1.0, -0.1, 0, 0.0, 0.1, 1, 1.0, 1.5]
for value in simple_values:
param = iap.Absolute(iap.Deterministic(value))
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
if ia.is_single_float(value):
assert abs(value) - eps < sample < abs(value) + eps
assert np.all(abs(value) - eps < samples)
assert np.all(samples < abs(value) + eps)
else:
assert sample == abs(value)
assert np.all(samples == abs(value))
param = iap.Absolute(iap.Choice([-3, -1, 1, 3]))
sample = param.draw_sample()
samples = param.draw_samples((10, 10))
samples_uq = np.sort(np.unique(samples))
assert sample.shape == tuple()
assert sample in [3, 1]
assert samples.shape == (10, 10)
assert len(samples_uq) == 2
assert samples_uq[0] == 1 and samples_uq[1] == 3
param = iap.Absolute(iap.Deterministic(0))
assert param.__str__() == param.__repr__() == "Absolute(Deterministic(int 0))"
def test_parameters_RandomSign():
reseed()
param = iap.RandomSign(iap.Deterministic(1))
samples = param.draw_samples((1000,))
n_positive = np.sum(samples == 1)
n_negative = np.sum(samples == -1)
assert samples.shape == (1000,)
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
assert sample.shape == tuple()
if sample == 1:
seen[1] += 1
else:
seen[0] += 1
n_negative, n_positive = seen
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
param = iap.RandomSign(iap.Choice([1, 2]))
samples = param.draw_samples((4000,))
seen = [0, 0, 0, 0]
seen[0] = np.sum(samples == -2)
seen[1] = np.sum(samples == -1)
seen[2] = np.sum(samples == 1)
seen[3] = np.sum(samples == 2)
assert np.sum(seen) == 4000
assert all([700 < v < 1300 for v in seen])
param = iap.RandomSign(iap.Choice([1, 2]))
samples1 = param.draw_samples((100, 10), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((100, 10), random_state=np.random.RandomState(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
assert np.sum(samples == -2) > 50
assert np.sum(samples == -1) > 50
assert np.sum(samples == 1) > 50
assert np.sum(samples == 2) > 50
param = iap.RandomSign(iap.Deterministic(0), 0.5)
assert param.__str__() == param.__repr__() == "RandomSign(Deterministic(int 0), 0.50)"
def test_parameters_ForceSign():
reseed()
param = iap.ForceSign(iap.Deterministic(1), positive=True, mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == 1
param = iap.ForceSign(iap.Deterministic(1), positive=False, mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == -1
param = iap.ForceSign(iap.Deterministic(1), positive=True, mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
param = iap.ForceSign(iap.Deterministic(1), positive=False, mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
param = iap.ForceSign(iap.Deterministic(-1), positive=True, mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
param = iap.ForceSign(iap.Deterministic(-1), positive=False, mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True, mode="invert")
samples = param.draw_samples(1000)
assert samples.shape == (1000,)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert n_twos + n_ones == 1000
assert 200 < n_twos < 700
assert 200 < n_ones < 700
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True, mode="reroll")
samples = param.draw_samples(1000)
assert samples.shape == (1000,)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert n_twos + n_ones == 1000
assert n_twos > 0
assert n_ones > 0
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True, mode="reroll", reroll_count_max=100)
samples = param.draw_samples(100)
assert samples.shape == (100,)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert n_twos + n_ones == 100
assert n_twos < 5
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True, mode="invert")
samples1 = param.draw_samples((100, 10), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((100, 10), random_state=np.random.RandomState(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
param = iap.ForceSign(iap.Deterministic(0), True, "invert", 1)
assert param.__str__() == param.__repr__() == "ForceSign(Deterministic(int 0), True, invert, 1)"
def test_parameters_Positive():
reseed()
param = iap.Positive(iap.Deterministic(-1), mode="reroll", reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_parameters_Negative():
reseed()
param = iap.Negative(iap.Deterministic(1), mode="reroll", reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_parameters_IterativeNoiseAggregator():
reseed()
eps = np.finfo(np.float32).eps
param = iap.IterativeNoiseAggregator(iap.Deterministic(1), iterations=1, aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 1
assert np.all(samples == 1)
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=200, aggregation_method="avg")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert 25 - 10 < sample < 25 + 10
assert np.all(np.logical_and(25 - 10 < samples, samples < 25 + 10))
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=100, aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 50
assert np.all(samples == 50)
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=100, aggregation_method="min")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 0
assert np.all(samples == 0)
seen = [0, 0, 0]
for _ in sm.xrange(100):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=100, aggregation_method=["avg", "max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_25 = abs(25 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_25 < 10.0:
seen[0] += 1
elif diff_50 < eps:
seen[1] += 1
elif diff_0 < eps:
seen[2] += 1
else:
assert False
assert seen[2] < 5
assert 50 - 20 < seen[0] < 50 + 20
assert 50 - 20 < seen[1] < 50 + 20
# iterations as tuple
param = iap.IterativeNoiseAggregator(iap.Uniform(-1.0, 1.0), iterations=(1, 100), aggregation_method="avg")
diffs = []
for _ in sm.xrange(100):
samples = param.draw_samples((1, 1))
diff = abs(samples[0, 0] - 0.0)
diffs.append(diff)
nb_bins = 3
nb_iterations = 100
hist, _ = np.histogram(diffs, bins=nb_bins, range=(-1.0, 1.0), density=False)
#density_expected = 1.0/nb_bins
#density_tolerance = 0.1
#for nb_samples in hist:
# density = nb_samples / nb_iterations
# print(hist, nb_samples, nb_iterations, density)
# assert density_expected - density_tolerance < density < density_expected + density_tolerance
assert hist[1] > hist[0]
assert hist[1] > hist[2]
# iterations as list
seen = [0, 0]
for _ in sm.xrange(400):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=[1, 100], aggregation_method=["max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_50 < eps:
seen[0] += 1
elif diff_0 < eps:
seen[1] += 1
else:
assert False
assert 300 - 50 < seen[0] < 300 + 50
assert 100 - 50 < seen[1] < 100 + 50
# test ia.ALL as aggregation_method
# note that each method individually and list of methods are already tested, so no in depth
# test is needed here
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=100, aggregation_method=ia.ALL)
assert isinstance(param.aggregation_method, iap.Choice)
assert len(param.aggregation_method.a) == 3
assert [v in param.aggregation_method.a for v in ["min", "avg", "max"]]
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=2, aggregation_method="max")
samples = param.draw_samples((2, 1000))
nb_0 = np.sum(samples == 0)
nb_50 = np.sum(samples == 50)
assert nb_0 + nb_50 == 2 * 1000
assert 0.25 - 0.05 < nb_0 / (2 * 1000) < 0.25 + 0.05
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=5, aggregation_method="avg")
samples1 = param.draw_samples((100, 10), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((100, 10), random_state=np.random.RandomState(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.allclose(samples1, samples2)
# StochasticParameter as aggregation_method
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=5, aggregation_method=iap.Deterministic("max"))
assert isinstance(param.aggregation_method, iap.Deterministic)
assert param.aggregation_method.value == "max"
# bad datatype as aggregation_method
got_exception = False
try:
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=5, aggregation_method=False)
except Exception as exc:
assert "Expected aggregation_method to be" in str(exc)
got_exception = True
assert got_exception
# bad datatype as for iterations
got_exception = False
try:
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=False, aggregation_method="max")
except Exception as exc:
assert "Expected iterations to be" in str(exc)
got_exception = True
assert got_exception
param = iap.IterativeNoiseAggregator(iap.Deterministic(0), iterations=(1, 3), aggregation_method="max")
assert param.__str__() == param.__repr__() == "IterativeNoiseAggregator(Deterministic(int 0), DiscreteUniform(Deterministic(int 1), Deterministic(int 3)), Deterministic(max))"
def test_parameters_Sigmoid():
reseed()
eps = np.finfo(np.float32).eps
param = iap.Sigmoid(iap.Deterministic(5), add=0, mul=1, threshold=0.5, activated=True)
expected = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - eps < sample < expected + eps
assert np.all(np.logical_and(expected - eps < samples, samples < expected + eps))
param = iap.Sigmoid(iap.Deterministic(5), add=0, mul=1, threshold=0.5, activated=False)
expected = 5
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - eps < sample < expected + eps
assert np.all(np.logical_and(expected - eps < samples, samples < expected + eps))
param = iap.Sigmoid(iap.Deterministic(5), add=0, mul=1, threshold=0.5, activated=0.5)
expected_first = 5
expected_second = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < eps:
seen[0] += 1
elif diff_second < eps:
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
param = iap.Sigmoid(iap.Choice([1, 10]), add=0, mul=1, threshold=0.5, activated=True)
expected_first = 1 / (1 + np.exp(-(1 * 1 + 0 - 0.5)))
expected_second = 1 / (1 + np.exp(-(10 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < eps:
seen[0] += 1
elif diff_second < eps:
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
muls = [0.1, 1, 10.3]
adds = [-5.7, -1, -0.0734, 0, 0.0734, 1, 5.7]
vals = [-1, -0.7, 0, 0.7, 1]
threshs = [-5.7, -1, -0.0734, 0, 0.0734, 1, 5.7]
for mul in muls:
for add in adds:
for val in vals:
for thresh in threshs:
param = iap.Sigmoid(iap.Deterministic(val), add=add, mul=mul, threshold=thresh)
sample = param.draw_sample()
samples = param.draw_samples((2, 3))
assert sample.shape == tuple()
assert samples.shape == (2, 3)
expected = 1 / (1 + np.exp(-(val * mul + add - thresh)))
assert expected - eps < sample < expected + eps
assert np.all(np.logical_and(expected - eps < samples, samples < expected + eps))
param = iap.Sigmoid(iap.Choice([1, 10]), add=0, mul=1, threshold=0.5, activated=True)
samples1 = param.draw_samples((100, 10), random_state=np.random.RandomState(1234))
samples2 = param.draw_samples((100, 10), random_state=np.random.RandomState(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
param = iap.Sigmoid(iap.Deterministic(0), threshold=(-10, 10), activated=True, mul=1, add=0)
assert param.__str__() == param.__repr__() == "Sigmoid(Deterministic(int 0), Uniform(Deterministic(int -10), Deterministic(int 10)), Deterministic(int 1), 1, 0)"
def test_parameters_operators():
reseed()
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
# Multiply
param3 = param1 * param2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert param3.val == param2
param3 = param1 * 2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
param3 = 2 * param1
assert isinstance(param3, iap.Multiply)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
got_exception = False
try:
param3 = "test" * param1
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param3 = param1 * "test"
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
# Divide (__truediv__)
param3 = param1 / param2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
param3 = param1 / 2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
param3 = 2 / param1
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
got_exception = False
try:
param3 = "test" / param1
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param3 = param1 / "test"
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
# Divide (__div__)
param3 = param1.__div__(param2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
param3 = param1.__div__(2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
got_exception = False
try:
param3 = param1.__div__("test")
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
# Divide (__rdiv__)
param3 = param1.__rdiv__(2)
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
got_exception = False
try:
param3 = param1.__rdiv__("test")
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
# Divide (__floordiv__)
param1_int = iap.DiscreteUniform(0, 10)
param2_int = iap.Choice([1, 2])
param3 = param1_int // param2_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert param3.other_param.val == param2_int
param3 = param1_int // 2
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert isinstance(param3.other_param.val, iap.Deterministic)
assert param3.other_param.val.value == 2
param3 = 2 // param1_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert isinstance(param3.other_param.other_param, iap.Deterministic)
assert param3.other_param.other_param.value == 2
assert param3.other_param.val == param1_int
got_exception = False
try:
param3 = "test" // param1_int
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param3 = param1_int // "test"
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
# Add
param3 = param1 + param2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert param3.val == param2
param3 = param1 + 2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
param3 = 2 + param1
assert isinstance(param3, iap.Add)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
got_exception = False
try:
param3 = "test" + param1
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param3 = param1 + "test"
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
# Subtract
param3 = param1 - param2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert param3.val == param2
param3 = param1 - 2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
param3 = 2 - param1
assert isinstance(param3, iap.Subtract)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
got_exception = False
try:
param3 = "test" - param1
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param3 = param1 - "test"
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
# Power
param3 = param1 ** param2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert param3.val == param2
param3 = param1 ** 2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
param3 = 2 ** param1
assert isinstance(param3, iap.Power)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
got_exception = False
try:
param3 = "test" ** param1
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
got_exception = False
try:
param3 = param1 ** "test"
except Exception as exc:
assert "Invalid datatypes" in str(exc)
got_exception = True
assert got_exception
def test_parameters_copy():
reseed()
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.copy()
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
param.other_param.a[0] += 1
assert param_copy.other_param.a[0] == param.other_param.a[0]
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.deepcopy()
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
param.other_param.a[0] += 1
assert param_copy.other_param.a[0] != param.other_param.a[0]
def create_random_images(size):
return np.random.uniform(0, 255, size).astype(np.uint8)
def create_random_keypoints(size_images, nb_keypoints_per_img):
result = []
for i in sm.xrange(size_images[0]):
kps = []
height, width = size_images[1], size_images[2]
for i in sm.xrange(nb_keypoints_per_img):
x = np.random.randint(0, width-1)
y = np.random.randint(0, height-1)
kps.append(ia.Keypoint(x=x, y=y))
result.append(ia.KeypointsOnImage(kps, shape=size_images[1:]))
return result
def array_equal_lists(list1, list2):
assert isinstance(list1, list)
assert isinstance(list2, list)
if len(list1) != len(list2):
return False
for a, b in zip(list1, list2):
if not np.array_equal(a, b):
return False
return True
def keypoints_equal(kps1, kps2, eps=0.001):
if len(kps1) != len(kps2):
return False
for i in sm.xrange(len(kps1)):
a = kps1[i].keypoints
b = kps2[i].keypoints
if len(a) != len(b):
return False
for j in sm.xrange(len(a)):
x_equal = float(b[j].x) - eps <= float(a[j].x) <= float(b[j].x) + eps
y_equal = float(b[j].y) - eps <= float(a[j].y) <= float(b[j].y) + eps
if not x_equal or not y_equal:
return False
return True
def reseed(seed=0):
ia.seed(seed)
np.random.seed(seed)
random.seed(seed)
if __name__ == "__main__":
main()
| 37.803366
| 189
| 0.621744
| 81,829
| 563,875
| 4.132985
| 0.015813
| 0.009557
| 0.008436
| 0.00757
| 0.882589
| 0.853352
| 0.825862
| 0.803083
| 0.781421
| 0.758094
| 0
| 0.067731
| 0.239599
| 563,875
| 14,915
| 190
| 37.8059
| 0.721031
| 0.04966
| 0
| 0.701744
| 0
| 0.001117
| 0.018699
| 0.003572
| 0
| 0
| 0
| 0.000335
| 0.306675
| 1
| 0.017181
| false
| 0.001031
| 0.00146
| 0.002577
| 0.025513
| 0.000258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbc7b94b2b22071f0b5fb6ebb229878cd409ec2e
| 118
|
py
|
Python
|
tests/test_case.py
|
dpoehls/textual
|
f47b3e089c681275c48c0debc7a320b66a772a50
|
[
"MIT"
] | 6,706
|
2021-06-08T17:14:36.000Z
|
2022-01-05T09:53:23.000Z
|
tests/test_case.py
|
dpoehls/textual
|
f47b3e089c681275c48c0debc7a320b66a772a50
|
[
"MIT"
] | 97
|
2022-01-05T11:35:14.000Z
|
2022-03-30T19:58:48.000Z
|
tests/test_case.py
|
dpoehls/textual
|
f47b3e089c681275c48c0debc7a320b66a772a50
|
[
"MIT"
] | 166
|
2021-06-12T11:11:19.000Z
|
2022-01-04T05:32:32.000Z
|
from textual.case import camel_to_snake
def test_camel_to_snake():
assert camel_to_snake("FooBar") == "foo_bar"
| 19.666667
| 48
| 0.762712
| 19
| 118
| 4.315789
| 0.684211
| 0.256098
| 0.439024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 118
| 5
| 49
| 23.6
| 0.803922
| 0
| 0
| 0
| 0
| 0
| 0.110169
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
dbdb4ab7cf41f0b474d7de7f1c06a177d6fbe314
| 640
|
py
|
Python
|
numeric-data.py
|
N-S-Young/aws_restart
|
83cdba5a1c475a3af2cef7e9c7d11309b2a042d6
|
[
"MIT"
] | null | null | null |
numeric-data.py
|
N-S-Young/aws_restart
|
83cdba5a1c475a3af2cef7e9c7d11309b2a042d6
|
[
"MIT"
] | null | null | null |
numeric-data.py
|
N-S-Young/aws_restart
|
83cdba5a1c475a3af2cef7e9c7d11309b2a042d6
|
[
"MIT"
] | null | null | null |
print("Python has three numeric types: int, float, and complex")
myValue=1
print(myValue)
print(type(myValue))
print(str(myValue) + " is of the data type " + str(type(myValue)))
myValue=3.14
print(myValue)
print(type(myValue))
print(str(myValue) + " is of the data type " + str(type(myValue)))
myValue=5j
print(myValue)
print(type(myValue))
print(str(myValue) + " is of the data type " + str(type(myValue)))
myValue=True
print(myValue)
print(type(myValue))
print(str(myValue) + " is of the data type " + str(type(myValue)))
myValue=False
print(myValue)
print(type(myValue))
print(str(myValue) + " is of the data type " + str(type(myValue)))
| 30.47619
| 66
| 0.717188
| 101
| 640
| 4.544554
| 0.227723
| 0.261438
| 0.185185
| 0.228758
| 0.845316
| 0.845316
| 0.845316
| 0.845316
| 0.845316
| 0.845316
| 0
| 0.008897
| 0.121875
| 640
| 21
| 67
| 30.47619
| 0.807829
| 0
| 0
| 0.714286
| 0
| 0
| 0.24961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.761905
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
917b95d5c9bc1dabf703b7021026ef61d2a7b037
| 138,196
|
py
|
Python
|
sdk/timeseriesinsights/azure-mgmt-timeseriesinsights/azure/mgmt/timeseriesinsights/models/_models_py3.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/timeseriesinsights/azure-mgmt-timeseriesinsights/azure/mgmt/timeseriesinsights/models/_models_py3.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/timeseriesinsights/azure-mgmt-timeseriesinsights/azure/mgmt/timeseriesinsights/models/_models_py3.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._time_series_insights_client_enums import *
class AccessPolicyCreateOrUpdateParameters(msrest.serialization.Model):
"""AccessPolicyCreateOrUpdateParameters.
:param principal_object_id: The objectId of the principal in Azure Active Directory.
:type principal_object_id: str
:param description: An description of the access policy.
:type description: str
:param roles: The list of roles the principal is assigned on the environment.
:type roles: list[str or ~azure.mgmt.timeseriesinsights.models.AccessPolicyRole]
"""
_attribute_map = {
'principal_object_id': {'key': 'properties.principalObjectId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'roles': {'key': 'properties.roles', 'type': '[str]'},
}
def __init__(
self,
*,
principal_object_id: Optional[str] = None,
description: Optional[str] = None,
roles: Optional[List[Union[str, "AccessPolicyRole"]]] = None,
**kwargs
):
super(AccessPolicyCreateOrUpdateParameters, self).__init__(**kwargs)
self.principal_object_id = principal_object_id
self.description = description
self.roles = roles
class AccessPolicyListResponse(msrest.serialization.Model):
"""The response of the List access policies operation.
:param value: Result of the List access policies operation.
:type value: list[~azure.mgmt.timeseriesinsights.models.AccessPolicyResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccessPolicyResource]'},
}
def __init__(
self,
*,
value: Optional[List["AccessPolicyResource"]] = None,
**kwargs
):
super(AccessPolicyListResponse, self).__init__(**kwargs)
self.value = value
class Resource(msrest.serialization.Model):
"""Time Series Insights resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AccessPolicyResource(Resource):
"""An access policy is used to grant users and applications access to the environment. Roles are assigned to service principals in Azure Active Directory. These roles define the actions the principal can perform through the Time Series Insights data plane APIs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param principal_object_id: The objectId of the principal in Azure Active Directory.
:type principal_object_id: str
:param description: An description of the access policy.
:type description: str
:param roles: The list of roles the principal is assigned on the environment.
:type roles: list[str or ~azure.mgmt.timeseriesinsights.models.AccessPolicyRole]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'principal_object_id': {'key': 'properties.principalObjectId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'roles': {'key': 'properties.roles', 'type': '[str]'},
}
def __init__(
self,
*,
principal_object_id: Optional[str] = None,
description: Optional[str] = None,
roles: Optional[List[Union[str, "AccessPolicyRole"]]] = None,
**kwargs
):
super(AccessPolicyResource, self).__init__(**kwargs)
self.principal_object_id = principal_object_id
self.description = description
self.roles = roles
class AccessPolicyUpdateParameters(msrest.serialization.Model):
"""AccessPolicyUpdateParameters.
:param description: An description of the access policy.
:type description: str
:param roles: The list of roles the principal is assigned on the environment.
:type roles: list[str or ~azure.mgmt.timeseriesinsights.models.AccessPolicyRole]
"""
_attribute_map = {
'description': {'key': 'properties.description', 'type': 'str'},
'roles': {'key': 'properties.roles', 'type': '[str]'},
}
def __init__(
self,
*,
description: Optional[str] = None,
roles: Optional[List[Union[str, "AccessPolicyRole"]]] = None,
**kwargs
):
super(AccessPolicyUpdateParameters, self).__init__(**kwargs)
self.description = description
self.roles = roles
class ResourceProperties(msrest.serialization.Model):
"""Properties that are common to all tracked resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ResourceProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.creation_time = None
class EventSourceCommonProperties(ResourceProperties):
"""Properties of the event source.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
}
def __init__(
self,
*,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(EventSourceCommonProperties, self).__init__(**kwargs)
self.timestamp_property_name = timestamp_property_name
class AzureEventSourceProperties(EventSourceCommonProperties):
"""Properties of an event source that reads events from an event broker in Azure.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'eventSourceResourceId', 'type': 'str'},
}
def __init__(
self,
*,
event_source_resource_id: str,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(AzureEventSourceProperties, self).__init__(timestamp_property_name=timestamp_property_name, **kwargs)
self.event_source_resource_id = event_source_resource_id
class CloudErrorBody(msrest.serialization.Model):
"""Describes a particular API error with an error code and a message.
:param code: An error code that describes the error condition more precisely than an HTTP
status code. Can be used to programmatically handle specific error cases.
:type code: str
:param message: A message that describes the error in detail and provides debugging
information.
:type message: str
:param target: The target of the particular error (for example, the name of the property in
error).
:type target: str
:param details: Contains nested errors that are related to this error.
:type details: list[~azure.mgmt.timeseriesinsights.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class CreateOrUpdateTrackedResourceProperties(msrest.serialization.Model):
"""Properties required to create any resource tracked by Azure Resource Manager.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(CreateOrUpdateTrackedResourceProperties, self).__init__(**kwargs)
self.location = location
self.tags = tags
class EnvironmentCreateOrUpdateParameters(CreateOrUpdateTrackedResourceProperties):
"""Parameters supplied to the CreateOrUpdate Environment operation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Gen1EnvironmentCreateOrUpdateParameters, Gen2EnvironmentCreateOrUpdateParameters.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
:param kind: Required. The kind of the environment.Constant filled by server. Possible values
include: "Gen1", "Gen2".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EnvironmentKind
:param sku: Required. The sku determines the type of environment, either Gen1 (S1 or S2) or
Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the
ingress rate, and the billing rate.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
"""
_validation = {
'location': {'required': True},
'kind': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
_subtype_map = {
'kind': {'Gen1': 'Gen1EnvironmentCreateOrUpdateParameters', 'Gen2': 'Gen2EnvironmentCreateOrUpdateParameters'}
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(EnvironmentCreateOrUpdateParameters, self).__init__(location=location, tags=tags, **kwargs)
self.kind = 'EnvironmentCreateOrUpdateParameters' # type: str
self.sku = sku
class EnvironmentListResponse(msrest.serialization.Model):
"""The response of the List Environments operation.
:param value: Result of the List Environments operation.
:type value: list[~azure.mgmt.timeseriesinsights.models.EnvironmentResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EnvironmentResource]'},
}
def __init__(
self,
*,
value: Optional[List["EnvironmentResource"]] = None,
**kwargs
):
super(EnvironmentListResponse, self).__init__(**kwargs)
self.value = value
class TrackedResource(Resource):
"""Time Series Insights resource that is tracked by Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
class EnvironmentResource(TrackedResource):
"""An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Gen1EnvironmentResource, Gen2EnvironmentResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Required. The sku determines the type of environment, either Gen1 (S1 or S2) or
Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the
ingress rate, and the billing rate.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
:param kind: Required. The kind of the environment.Constant filled by server. Possible values
include: "Gen1", "Gen2".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EnvironmentResourceKind
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'Gen1': 'Gen1EnvironmentResource', 'Gen2': 'Gen2EnvironmentResource'}
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(EnvironmentResource, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.kind = 'EnvironmentResource' # type: str
class EnvironmentResourceProperties(ResourceProperties):
"""Properties of the environment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:ivar data_access_id: An id used to access the environment data, e.g. to query the
environment's events or upload reference data for the environment.
:vartype data_access_id: str
:ivar data_access_fqdn: The fully qualified domain name used to access the environment data,
e.g. to query the environment's events or upload reference data for the environment.
:vartype data_access_fqdn: str
:ivar status: An object that represents the status of the environment, and its internal state
in the Time Series Insights service.
:vartype status: ~azure.mgmt.timeseriesinsights.models.EnvironmentStatus
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'data_access_id': {'readonly': True},
'data_access_fqdn': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'data_access_id': {'key': 'dataAccessId', 'type': 'str'},
'data_access_fqdn': {'key': 'dataAccessFqdn', 'type': 'str'},
'status': {'key': 'status', 'type': 'EnvironmentStatus'},
}
def __init__(
self,
**kwargs
):
super(EnvironmentResourceProperties, self).__init__(**kwargs)
self.data_access_id = None
self.data_access_fqdn = None
self.status = None
class EnvironmentStateDetails(msrest.serialization.Model):
"""An object that contains the details about an environment's state.
:param code: Contains the code that represents the reason of an environment being in a
particular state. Can be used to programmatically handle specific cases.
:type code: str
:param message: A message that describes the state in detail.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(EnvironmentStateDetails, self).__init__(**kwargs)
self.code = code
self.message = message
class EnvironmentStatus(msrest.serialization.Model):
"""An object that represents the status of the environment, and its internal state in the Time Series Insights service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ingress: An object that represents the status of ingress on an environment.
:vartype ingress: ~azure.mgmt.timeseriesinsights.models.IngressEnvironmentStatus
:ivar warm_storage: An object that represents the status of warm storage on an environment.
:vartype warm_storage: ~azure.mgmt.timeseriesinsights.models.WarmStorageEnvironmentStatus
"""
_validation = {
'ingress': {'readonly': True},
'warm_storage': {'readonly': True},
}
_attribute_map = {
'ingress': {'key': 'ingress', 'type': 'IngressEnvironmentStatus'},
'warm_storage': {'key': 'warmStorage', 'type': 'WarmStorageEnvironmentStatus'},
}
def __init__(
self,
**kwargs
):
super(EnvironmentStatus, self).__init__(**kwargs)
self.ingress = None
self.warm_storage = None
class EnvironmentUpdateParameters(msrest.serialization.Model):
"""Parameters supplied to the Update Environment operation.
:param tags: A set of tags. Key-value pairs of additional properties for the environment.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(EnvironmentUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class EventHubEventSourceCommonProperties(AzureEventSourceProperties):
"""Properties of the EventHub event source.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param service_bus_namespace: Required. The name of the service bus that contains the event
hub.
:type service_bus_namespace: str
:param event_hub_name: Required. The name of the event hub.
:type event_hub_name: str
:param consumer_group_name: Required. The name of the event hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the SAS key that grants the Time Series Insights service
access to the event hub. The shared access policies for this key must grant 'Listen'
permissions to the event hub.
:type key_name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'service_bus_namespace': {'required': True},
'event_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'eventSourceResourceId', 'type': 'str'},
'service_bus_namespace': {'key': 'serviceBusNamespace', 'type': 'str'},
'event_hub_name': {'key': 'eventHubName', 'type': 'str'},
'consumer_group_name': {'key': 'consumerGroupName', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
*,
event_source_resource_id: str,
service_bus_namespace: str,
event_hub_name: str,
consumer_group_name: str,
key_name: str,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(EventHubEventSourceCommonProperties, self).__init__(timestamp_property_name=timestamp_property_name, event_source_resource_id=event_source_resource_id, **kwargs)
self.service_bus_namespace = service_bus_namespace
self.event_hub_name = event_hub_name
self.consumer_group_name = consumer_group_name
self.key_name = key_name
class EventSourceCreateOrUpdateParameters(CreateOrUpdateTrackedResourceProperties):
"""Parameters supplied to the Create or Update Event Source operation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EventHubEventSourceCreateOrUpdateParameters, IoTHubEventSourceCreateOrUpdateParameters.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
:param kind: Required. The kind of the event source.Constant filled by server. Possible values
include: "Microsoft.EventHub", "Microsoft.IoTHub".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EventSourceKind
:param local_timestamp: An object that represents the local timestamp property. It contains the
format of local timestamp that needs to be used and the corresponding timezone offset
information. If a value isn't specified for localTimestamp, or if null, then the local
timestamp will not be ingressed with the events.
:type local_timestamp: ~azure.mgmt.timeseriesinsights.models.LocalTimestamp
"""
_validation = {
'location': {'required': True},
'kind': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'local_timestamp': {'key': 'localTimestamp', 'type': 'LocalTimestamp'},
}
_subtype_map = {
'kind': {'Microsoft.EventHub': 'EventHubEventSourceCreateOrUpdateParameters', 'Microsoft.IoTHub': 'IoTHubEventSourceCreateOrUpdateParameters'}
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
local_timestamp: Optional["LocalTimestamp"] = None,
**kwargs
):
super(EventSourceCreateOrUpdateParameters, self).__init__(location=location, tags=tags, **kwargs)
self.kind = 'EventSourceCreateOrUpdateParameters' # type: str
self.local_timestamp = local_timestamp
class EventHubEventSourceCreateOrUpdateParameters(EventSourceCreateOrUpdateParameters):
"""Parameters supplied to the Create or Update Event Source operation for an EventHub event source.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
:param kind: Required. The kind of the event source.Constant filled by server. Possible values
include: "Microsoft.EventHub", "Microsoft.IoTHub".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EventSourceKind
:param local_timestamp: An object that represents the local timestamp property. It contains the
format of local timestamp that needs to be used and the corresponding timezone offset
information. If a value isn't specified for localTimestamp, or if null, then the local
timestamp will not be ingressed with the events.
:type local_timestamp: ~azure.mgmt.timeseriesinsights.models.LocalTimestamp
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param service_bus_namespace: Required. The name of the service bus that contains the event
hub.
:type service_bus_namespace: str
:param event_hub_name: Required. The name of the event hub.
:type event_hub_name: str
:param consumer_group_name: Required. The name of the event hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the SAS key that grants the Time Series Insights service
access to the event hub. The shared access policies for this key must grant 'Listen'
permissions to the event hub.
:type key_name: str
:param shared_access_key: Required. The value of the shared access key that grants the Time
Series Insights service read access to the event hub. This property is not shown in event
source responses.
:type shared_access_key: str
"""
_validation = {
'location': {'required': True},
'kind': {'required': True},
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'service_bus_namespace': {'required': True},
'event_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
'shared_access_key': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'local_timestamp': {'key': 'localTimestamp', 'type': 'LocalTimestamp'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'properties.timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'properties.eventSourceResourceId', 'type': 'str'},
'service_bus_namespace': {'key': 'properties.serviceBusNamespace', 'type': 'str'},
'event_hub_name': {'key': 'properties.eventHubName', 'type': 'str'},
'consumer_group_name': {'key': 'properties.consumerGroupName', 'type': 'str'},
'key_name': {'key': 'properties.keyName', 'type': 'str'},
'shared_access_key': {'key': 'properties.sharedAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
event_source_resource_id: str,
service_bus_namespace: str,
event_hub_name: str,
consumer_group_name: str,
key_name: str,
shared_access_key: str,
tags: Optional[Dict[str, str]] = None,
local_timestamp: Optional["LocalTimestamp"] = None,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(EventHubEventSourceCreateOrUpdateParameters, self).__init__(location=location, tags=tags, local_timestamp=local_timestamp, **kwargs)
self.kind = 'Microsoft.EventHub' # type: str
self.provisioning_state = None
self.creation_time = None
self.timestamp_property_name = timestamp_property_name
self.event_source_resource_id = event_source_resource_id
self.service_bus_namespace = service_bus_namespace
self.event_hub_name = event_hub_name
self.consumer_group_name = consumer_group_name
self.key_name = key_name
self.shared_access_key = shared_access_key
class EventHubEventSourceCreationProperties(EventHubEventSourceCommonProperties):
"""Properties of the EventHub event source that are required on create or update requests.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param service_bus_namespace: Required. The name of the service bus that contains the event
hub.
:type service_bus_namespace: str
:param event_hub_name: Required. The name of the event hub.
:type event_hub_name: str
:param consumer_group_name: Required. The name of the event hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the SAS key that grants the Time Series Insights service
access to the event hub. The shared access policies for this key must grant 'Listen'
permissions to the event hub.
:type key_name: str
:param shared_access_key: Required. The value of the shared access key that grants the Time
Series Insights service read access to the event hub. This property is not shown in event
source responses.
:type shared_access_key: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'service_bus_namespace': {'required': True},
'event_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
'shared_access_key': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'eventSourceResourceId', 'type': 'str'},
'service_bus_namespace': {'key': 'serviceBusNamespace', 'type': 'str'},
'event_hub_name': {'key': 'eventHubName', 'type': 'str'},
'consumer_group_name': {'key': 'consumerGroupName', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
'shared_access_key': {'key': 'sharedAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
event_source_resource_id: str,
service_bus_namespace: str,
event_hub_name: str,
consumer_group_name: str,
key_name: str,
shared_access_key: str,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(EventHubEventSourceCreationProperties, self).__init__(timestamp_property_name=timestamp_property_name, event_source_resource_id=event_source_resource_id, service_bus_namespace=service_bus_namespace, event_hub_name=event_hub_name, consumer_group_name=consumer_group_name, key_name=key_name, **kwargs)
self.shared_access_key = shared_access_key
class EventSourceMutableProperties(msrest.serialization.Model):
"""An object that represents a set of mutable event source resource properties.
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param local_timestamp: An object that represents the local timestamp property. It contains the
format of local timestamp that needs to be used and the corresponding timezone offset
information. If a value isn't specified for localTimestamp, or if null, then the local
timestamp will not be ingressed with the events.
:type local_timestamp: ~azure.mgmt.timeseriesinsights.models.LocalTimestamp
"""
_attribute_map = {
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'local_timestamp': {'key': 'localTimestamp', 'type': 'LocalTimestamp'},
}
def __init__(
self,
*,
timestamp_property_name: Optional[str] = None,
local_timestamp: Optional["LocalTimestamp"] = None,
**kwargs
):
super(EventSourceMutableProperties, self).__init__(**kwargs)
self.timestamp_property_name = timestamp_property_name
self.local_timestamp = local_timestamp
class EventHubEventSourceMutableProperties(EventSourceMutableProperties):
"""An object that represents a set of mutable EventHub event source resource properties.
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param local_timestamp: An object that represents the local timestamp property. It contains the
format of local timestamp that needs to be used and the corresponding timezone offset
information. If a value isn't specified for localTimestamp, or if null, then the local
timestamp will not be ingressed with the events.
:type local_timestamp: ~azure.mgmt.timeseriesinsights.models.LocalTimestamp
:param shared_access_key: The value of the shared access key that grants the Time Series
Insights service read access to the event hub. This property is not shown in event source
responses.
:type shared_access_key: str
"""
_attribute_map = {
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'local_timestamp': {'key': 'localTimestamp', 'type': 'LocalTimestamp'},
'shared_access_key': {'key': 'sharedAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
timestamp_property_name: Optional[str] = None,
local_timestamp: Optional["LocalTimestamp"] = None,
shared_access_key: Optional[str] = None,
**kwargs
):
super(EventHubEventSourceMutableProperties, self).__init__(timestamp_property_name=timestamp_property_name, local_timestamp=local_timestamp, **kwargs)
self.shared_access_key = shared_access_key
class EventSourceResource(TrackedResource):
"""An environment receives data from one or more event sources. Each event source has associated connection info that allows the Time Series Insights ingress pipeline to connect to and pull data from the event source.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EventHubEventSourceResource, IoTHubEventSourceResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. The kind of the event source.Constant filled by server. Possible values
include: "Microsoft.EventHub", "Microsoft.IoTHub".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EventSourceResourceKind
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'Microsoft.EventHub': 'EventHubEventSourceResource', 'Microsoft.IoTHub': 'IoTHubEventSourceResource'}
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(EventSourceResource, self).__init__(location=location, tags=tags, **kwargs)
self.kind = 'EventSourceResource' # type: str
class EventHubEventSourceResource(EventSourceResource):
"""An event source that receives its data from an Azure EventHub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. The kind of the event source.Constant filled by server. Possible values
include: "Microsoft.EventHub", "Microsoft.IoTHub".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EventSourceResourceKind
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param service_bus_namespace: Required. The name of the service bus that contains the event
hub.
:type service_bus_namespace: str
:param event_hub_name: Required. The name of the event hub.
:type event_hub_name: str
:param consumer_group_name: Required. The name of the event hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the SAS key that grants the Time Series Insights service
access to the event hub. The shared access policies for this key must grant 'Listen'
permissions to the event hub.
:type key_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'kind': {'required': True},
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'service_bus_namespace': {'required': True},
'event_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'properties.timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'properties.eventSourceResourceId', 'type': 'str'},
'service_bus_namespace': {'key': 'properties.serviceBusNamespace', 'type': 'str'},
'event_hub_name': {'key': 'properties.eventHubName', 'type': 'str'},
'consumer_group_name': {'key': 'properties.consumerGroupName', 'type': 'str'},
'key_name': {'key': 'properties.keyName', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
event_source_resource_id: str,
service_bus_namespace: str,
event_hub_name: str,
consumer_group_name: str,
key_name: str,
tags: Optional[Dict[str, str]] = None,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(EventHubEventSourceResource, self).__init__(location=location, tags=tags, **kwargs)
self.kind = 'Microsoft.EventHub' # type: str
self.provisioning_state = None
self.creation_time = None
self.timestamp_property_name = timestamp_property_name
self.event_source_resource_id = event_source_resource_id
self.service_bus_namespace = service_bus_namespace
self.event_hub_name = event_hub_name
self.consumer_group_name = consumer_group_name
self.key_name = key_name
class EventHubEventSourceResourceProperties(EventHubEventSourceCommonProperties):
"""Properties of the EventHub event source resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param service_bus_namespace: Required. The name of the service bus that contains the event
hub.
:type service_bus_namespace: str
:param event_hub_name: Required. The name of the event hub.
:type event_hub_name: str
:param consumer_group_name: Required. The name of the event hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the SAS key that grants the Time Series Insights service
access to the event hub. The shared access policies for this key must grant 'Listen'
permissions to the event hub.
:type key_name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'service_bus_namespace': {'required': True},
'event_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'eventSourceResourceId', 'type': 'str'},
'service_bus_namespace': {'key': 'serviceBusNamespace', 'type': 'str'},
'event_hub_name': {'key': 'eventHubName', 'type': 'str'},
'consumer_group_name': {'key': 'consumerGroupName', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
*,
event_source_resource_id: str,
service_bus_namespace: str,
event_hub_name: str,
consumer_group_name: str,
key_name: str,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(EventHubEventSourceResourceProperties, self).__init__(timestamp_property_name=timestamp_property_name, event_source_resource_id=event_source_resource_id, service_bus_namespace=service_bus_namespace, event_hub_name=event_hub_name, consumer_group_name=consumer_group_name, key_name=key_name, **kwargs)
class EventSourceUpdateParameters(msrest.serialization.Model):
"""Parameters supplied to the Update Event Source operation.
:param tags: A set of tags. Key-value pairs of additional properties for the event source.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(EventSourceUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class EventHubEventSourceUpdateParameters(EventSourceUpdateParameters):
"""Parameters supplied to the Update Event Source operation to update an EventHub event source.
:param tags: A set of tags. Key-value pairs of additional properties for the event source.
:type tags: dict[str, str]
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param local_timestamp: An object that represents the local timestamp property. It contains the
format of local timestamp that needs to be used and the corresponding timezone offset
information. If a value isn't specified for localTimestamp, or if null, then the local
timestamp will not be ingressed with the events.
:type local_timestamp: ~azure.mgmt.timeseriesinsights.models.LocalTimestamp
:param shared_access_key: The value of the shared access key that grants the Time Series
Insights service read access to the event hub. This property is not shown in event source
responses.
:type shared_access_key: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'timestamp_property_name': {'key': 'properties.timestampPropertyName', 'type': 'str'},
'local_timestamp': {'key': 'properties.localTimestamp', 'type': 'LocalTimestamp'},
'shared_access_key': {'key': 'properties.sharedAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
timestamp_property_name: Optional[str] = None,
local_timestamp: Optional["LocalTimestamp"] = None,
shared_access_key: Optional[str] = None,
**kwargs
):
super(EventHubEventSourceUpdateParameters, self).__init__(tags=tags, **kwargs)
self.timestamp_property_name = timestamp_property_name
self.local_timestamp = local_timestamp
self.shared_access_key = shared_access_key
class EventSourceListResponse(msrest.serialization.Model):
"""The response of the List EventSources operation.
:param value: Result of the List EventSources operation.
:type value: list[~azure.mgmt.timeseriesinsights.models.EventSourceResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EventSourceResource]'},
}
def __init__(
self,
*,
value: Optional[List["EventSourceResource"]] = None,
**kwargs
):
super(EventSourceListResponse, self).__init__(**kwargs)
self.value = value
class Gen1EnvironmentCreateOrUpdateParameters(EnvironmentCreateOrUpdateParameters):
"""Parameters supplied to the Create or Update Environment operation for a Gen1 environment.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
:param kind: Required. The kind of the environment.Constant filled by server. Possible values
include: "Gen1", "Gen2".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EnvironmentKind
:param sku: Required. The sku determines the type of environment, either Gen1 (S1 or S2) or
Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the
ingress rate, and the billing rate.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
:param data_retention_time: Required. ISO8601 timespan specifying the minimum number of days
the environment's events will be available for query.
:type data_retention_time: ~datetime.timedelta
:param storage_limit_exceeded_behavior: The behavior the Time Series Insights service should
take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new
events will not be read from the event source. If "PurgeOldData" is specified, new events will
continue to be read and old events will be deleted from the environment. The default behavior
is PurgeOldData. Possible values include: "PurgeOldData", "PauseIngress".
:type storage_limit_exceeded_behavior: str or
~azure.mgmt.timeseriesinsights.models.StorageLimitExceededBehavior
:param partition_key_properties: The list of event properties which will be used to partition
data in the environment. Currently, only a single partition key property is supported.
:type partition_key_properties:
list[~azure.mgmt.timeseriesinsights.models.TimeSeriesIdProperty]
"""
_validation = {
'location': {'required': True},
'kind': {'required': True},
'sku': {'required': True},
'data_retention_time': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'data_retention_time': {'key': 'properties.dataRetentionTime', 'type': 'duration'},
'storage_limit_exceeded_behavior': {'key': 'properties.storageLimitExceededBehavior', 'type': 'str'},
'partition_key_properties': {'key': 'properties.partitionKeyProperties', 'type': '[TimeSeriesIdProperty]'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
data_retention_time: datetime.timedelta,
tags: Optional[Dict[str, str]] = None,
storage_limit_exceeded_behavior: Optional[Union[str, "StorageLimitExceededBehavior"]] = None,
partition_key_properties: Optional[List["TimeSeriesIdProperty"]] = None,
**kwargs
):
super(Gen1EnvironmentCreateOrUpdateParameters, self).__init__(location=location, tags=tags, sku=sku, **kwargs)
self.kind = 'Gen1' # type: str
self.data_retention_time = data_retention_time
self.storage_limit_exceeded_behavior = storage_limit_exceeded_behavior
self.partition_key_properties = partition_key_properties
class Gen1EnvironmentCreationProperties(msrest.serialization.Model):
"""Properties used to create a Gen1 environment.
All required parameters must be populated in order to send to Azure.
:param data_retention_time: Required. ISO8601 timespan specifying the minimum number of days
the environment's events will be available for query.
:type data_retention_time: ~datetime.timedelta
:param storage_limit_exceeded_behavior: The behavior the Time Series Insights service should
take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new
events will not be read from the event source. If "PurgeOldData" is specified, new events will
continue to be read and old events will be deleted from the environment. The default behavior
is PurgeOldData. Possible values include: "PurgeOldData", "PauseIngress".
:type storage_limit_exceeded_behavior: str or
~azure.mgmt.timeseriesinsights.models.StorageLimitExceededBehavior
:param partition_key_properties: The list of event properties which will be used to partition
data in the environment. Currently, only a single partition key property is supported.
:type partition_key_properties:
list[~azure.mgmt.timeseriesinsights.models.TimeSeriesIdProperty]
"""
_validation = {
'data_retention_time': {'required': True},
}
_attribute_map = {
'data_retention_time': {'key': 'dataRetentionTime', 'type': 'duration'},
'storage_limit_exceeded_behavior': {'key': 'storageLimitExceededBehavior', 'type': 'str'},
'partition_key_properties': {'key': 'partitionKeyProperties', 'type': '[TimeSeriesIdProperty]'},
}
def __init__(
self,
*,
data_retention_time: datetime.timedelta,
storage_limit_exceeded_behavior: Optional[Union[str, "StorageLimitExceededBehavior"]] = None,
partition_key_properties: Optional[List["TimeSeriesIdProperty"]] = None,
**kwargs
):
super(Gen1EnvironmentCreationProperties, self).__init__(**kwargs)
self.data_retention_time = data_retention_time
self.storage_limit_exceeded_behavior = storage_limit_exceeded_behavior
self.partition_key_properties = partition_key_properties
class Gen1EnvironmentResource(EnvironmentResource):
"""An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource. Gen1 environments have data retention limits.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Required. The sku determines the type of environment, either Gen1 (S1 or S2) or
Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the
ingress rate, and the billing rate.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
:param kind: Required. The kind of the environment.Constant filled by server. Possible values
include: "Gen1", "Gen2".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EnvironmentResourceKind
:param data_retention_time: Required. ISO8601 timespan specifying the minimum number of days
the environment's events will be available for query.
:type data_retention_time: ~datetime.timedelta
:param storage_limit_exceeded_behavior: The behavior the Time Series Insights service should
take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new
events will not be read from the event source. If "PurgeOldData" is specified, new events will
continue to be read and old events will be deleted from the environment. The default behavior
is PurgeOldData. Possible values include: "PurgeOldData", "PauseIngress".
:type storage_limit_exceeded_behavior: str or
~azure.mgmt.timeseriesinsights.models.StorageLimitExceededBehavior
:param partition_key_properties: The list of event properties which will be used to partition
data in the environment. Currently, only a single partition key property is supported.
:type partition_key_properties:
list[~azure.mgmt.timeseriesinsights.models.TimeSeriesIdProperty]
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:ivar data_access_id: An id used to access the environment data, e.g. to query the
environment's events or upload reference data for the environment.
:vartype data_access_id: str
:ivar data_access_fqdn: The fully qualified domain name used to access the environment data,
e.g. to query the environment's events or upload reference data for the environment.
:vartype data_access_fqdn: str
:ivar status: An object that represents the status of the environment, and its internal state
in the Time Series Insights service.
:vartype status: ~azure.mgmt.timeseriesinsights.models.EnvironmentStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'kind': {'required': True},
'data_retention_time': {'required': True},
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'data_access_id': {'readonly': True},
'data_access_fqdn': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
'data_retention_time': {'key': 'properties.dataRetentionTime', 'type': 'duration'},
'storage_limit_exceeded_behavior': {'key': 'properties.storageLimitExceededBehavior', 'type': 'str'},
'partition_key_properties': {'key': 'properties.partitionKeyProperties', 'type': '[TimeSeriesIdProperty]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'data_access_id': {'key': 'properties.dataAccessId', 'type': 'str'},
'data_access_fqdn': {'key': 'properties.dataAccessFqdn', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'EnvironmentStatus'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
data_retention_time: datetime.timedelta,
tags: Optional[Dict[str, str]] = None,
storage_limit_exceeded_behavior: Optional[Union[str, "StorageLimitExceededBehavior"]] = None,
partition_key_properties: Optional[List["TimeSeriesIdProperty"]] = None,
**kwargs
):
super(Gen1EnvironmentResource, self).__init__(location=location, tags=tags, sku=sku, **kwargs)
self.kind = 'Gen1' # type: str
self.data_retention_time = data_retention_time
self.storage_limit_exceeded_behavior = storage_limit_exceeded_behavior
self.partition_key_properties = partition_key_properties
self.provisioning_state = None
self.creation_time = None
self.data_access_id = None
self.data_access_fqdn = None
self.status = None
class Gen1EnvironmentResourceProperties(Gen1EnvironmentCreationProperties, EnvironmentResourceProperties):
"""Properties of the Gen1 environment.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:ivar data_access_id: An id used to access the environment data, e.g. to query the
environment's events or upload reference data for the environment.
:vartype data_access_id: str
:ivar data_access_fqdn: The fully qualified domain name used to access the environment data,
e.g. to query the environment's events or upload reference data for the environment.
:vartype data_access_fqdn: str
:ivar status: An object that represents the status of the environment, and its internal state
in the Time Series Insights service.
:vartype status: ~azure.mgmt.timeseriesinsights.models.EnvironmentStatus
:param data_retention_time: Required. ISO8601 timespan specifying the minimum number of days
the environment's events will be available for query.
:type data_retention_time: ~datetime.timedelta
:param storage_limit_exceeded_behavior: The behavior the Time Series Insights service should
take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new
events will not be read from the event source. If "PurgeOldData" is specified, new events will
continue to be read and old events will be deleted from the environment. The default behavior
is PurgeOldData. Possible values include: "PurgeOldData", "PauseIngress".
:type storage_limit_exceeded_behavior: str or
~azure.mgmt.timeseriesinsights.models.StorageLimitExceededBehavior
:param partition_key_properties: The list of event properties which will be used to partition
data in the environment. Currently, only a single partition key property is supported.
:type partition_key_properties:
list[~azure.mgmt.timeseriesinsights.models.TimeSeriesIdProperty]
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'data_access_id': {'readonly': True},
'data_access_fqdn': {'readonly': True},
'status': {'readonly': True},
'data_retention_time': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'data_access_id': {'key': 'dataAccessId', 'type': 'str'},
'data_access_fqdn': {'key': 'dataAccessFqdn', 'type': 'str'},
'status': {'key': 'status', 'type': 'EnvironmentStatus'},
'data_retention_time': {'key': 'dataRetentionTime', 'type': 'duration'},
'storage_limit_exceeded_behavior': {'key': 'storageLimitExceededBehavior', 'type': 'str'},
'partition_key_properties': {'key': 'partitionKeyProperties', 'type': '[TimeSeriesIdProperty]'},
}
def __init__(
self,
*,
data_retention_time: datetime.timedelta,
storage_limit_exceeded_behavior: Optional[Union[str, "StorageLimitExceededBehavior"]] = None,
partition_key_properties: Optional[List["TimeSeriesIdProperty"]] = None,
**kwargs
):
super(Gen1EnvironmentResourceProperties, self).__init__(data_retention_time=data_retention_time, storage_limit_exceeded_behavior=storage_limit_exceeded_behavior, partition_key_properties=partition_key_properties, **kwargs)
self.provisioning_state = None
self.creation_time = None
self.data_access_id = None
self.data_access_fqdn = None
self.status = None
self.data_retention_time = data_retention_time
self.storage_limit_exceeded_behavior = storage_limit_exceeded_behavior
self.partition_key_properties = partition_key_properties
class Gen1EnvironmentUpdateParameters(EnvironmentUpdateParameters):
"""Parameters supplied to the Update Environment operation to update a Gen1 environment.
:param tags: A set of tags. Key-value pairs of additional properties for the environment.
:type tags: dict[str, str]
:param sku: The sku of the environment.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
:param data_retention_time: ISO8601 timespan specifying the minimum number of days the
environment's events will be available for query.
:type data_retention_time: ~datetime.timedelta
:param storage_limit_exceeded_behavior: The behavior the Time Series Insights service should
take when the environment's capacity has been exceeded. If "PauseIngress" is specified, new
events will not be read from the event source. If "PurgeOldData" is specified, new events will
continue to be read and old events will be deleted from the environment. The default behavior
is PurgeOldData. Possible values include: "PurgeOldData", "PauseIngress".
:type storage_limit_exceeded_behavior: str or
~azure.mgmt.timeseriesinsights.models.StorageLimitExceededBehavior
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'data_retention_time': {'key': 'properties.dataRetentionTime', 'type': 'duration'},
'storage_limit_exceeded_behavior': {'key': 'properties.storageLimitExceededBehavior', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
data_retention_time: Optional[datetime.timedelta] = None,
storage_limit_exceeded_behavior: Optional[Union[str, "StorageLimitExceededBehavior"]] = None,
**kwargs
):
super(Gen1EnvironmentUpdateParameters, self).__init__(tags=tags, **kwargs)
self.sku = sku
self.data_retention_time = data_retention_time
self.storage_limit_exceeded_behavior = storage_limit_exceeded_behavior
class Gen2EnvironmentCreateOrUpdateParameters(EnvironmentCreateOrUpdateParameters):
"""Parameters supplied to the Create or Update Environment operation for a Gen2 environment.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
:param kind: Required. The kind of the environment.Constant filled by server. Possible values
include: "Gen1", "Gen2".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EnvironmentKind
:param sku: Required. The sku determines the type of environment, either Gen1 (S1 or S2) or
Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the
ingress rate, and the billing rate.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
:param time_series_id_properties: Required. The list of event properties which will be used to
define the environment's time series id.
:type time_series_id_properties:
list[~azure.mgmt.timeseriesinsights.models.TimeSeriesIdProperty]
:param storage_configuration: Required. The storage configuration provides the connection
details that allows the Time Series Insights service to connect to the customer storage account
that is used to store the environment's data.
:type storage_configuration:
~azure.mgmt.timeseriesinsights.models.Gen2StorageConfigurationInput
:param warm_store_configuration: The warm store configuration provides the details to create a
warm store cache that will retain a copy of the environment's data available for faster query.
:type warm_store_configuration:
~azure.mgmt.timeseriesinsights.models.WarmStoreConfigurationProperties
"""
_validation = {
'location': {'required': True},
'kind': {'required': True},
'sku': {'required': True},
'time_series_id_properties': {'required': True},
'storage_configuration': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'time_series_id_properties': {'key': 'properties.timeSeriesIdProperties', 'type': '[TimeSeriesIdProperty]'},
'storage_configuration': {'key': 'properties.storageConfiguration', 'type': 'Gen2StorageConfigurationInput'},
'warm_store_configuration': {'key': 'properties.warmStoreConfiguration', 'type': 'WarmStoreConfigurationProperties'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
time_series_id_properties: List["TimeSeriesIdProperty"],
storage_configuration: "Gen2StorageConfigurationInput",
tags: Optional[Dict[str, str]] = None,
warm_store_configuration: Optional["WarmStoreConfigurationProperties"] = None,
**kwargs
):
super(Gen2EnvironmentCreateOrUpdateParameters, self).__init__(location=location, tags=tags, sku=sku, **kwargs)
self.kind = 'Gen2' # type: str
self.time_series_id_properties = time_series_id_properties
self.storage_configuration = storage_configuration
self.warm_store_configuration = warm_store_configuration
class Gen2EnvironmentResource(EnvironmentResource):
"""An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource. Gen2 environments do not have set data retention limits.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Required. The sku determines the type of environment, either Gen1 (S1 or S2) or
Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the
ingress rate, and the billing rate.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
:param kind: Required. The kind of the environment.Constant filled by server. Possible values
include: "Gen1", "Gen2".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EnvironmentResourceKind
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:ivar data_access_id: An id used to access the environment data, e.g. to query the
environment's events or upload reference data for the environment.
:vartype data_access_id: str
:ivar data_access_fqdn: The fully qualified domain name used to access the environment data,
e.g. to query the environment's events or upload reference data for the environment.
:vartype data_access_fqdn: str
:ivar status: An object that represents the status of the environment, and its internal state
in the Time Series Insights service.
:vartype status: ~azure.mgmt.timeseriesinsights.models.EnvironmentStatus
:param time_series_id_properties: Required. The list of event properties which will be used to
define the environment's time series id.
:type time_series_id_properties:
list[~azure.mgmt.timeseriesinsights.models.TimeSeriesIdProperty]
:param storage_configuration: Required. The storage configuration provides the connection
details that allows the Time Series Insights service to connect to the customer storage account
that is used to store the environment's data.
:type storage_configuration:
~azure.mgmt.timeseriesinsights.models.Gen2StorageConfigurationOutput
:param warm_store_configuration: The warm store configuration provides the details to create a
warm store cache that will retain a copy of the environment's data available for faster query.
:type warm_store_configuration:
~azure.mgmt.timeseriesinsights.models.WarmStoreConfigurationProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'kind': {'required': True},
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'data_access_id': {'readonly': True},
'data_access_fqdn': {'readonly': True},
'status': {'readonly': True},
'time_series_id_properties': {'required': True},
'storage_configuration': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'data_access_id': {'key': 'properties.dataAccessId', 'type': 'str'},
'data_access_fqdn': {'key': 'properties.dataAccessFqdn', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'EnvironmentStatus'},
'time_series_id_properties': {'key': 'properties.timeSeriesIdProperties', 'type': '[TimeSeriesIdProperty]'},
'storage_configuration': {'key': 'properties.storageConfiguration', 'type': 'Gen2StorageConfigurationOutput'},
'warm_store_configuration': {'key': 'properties.warmStoreConfiguration', 'type': 'WarmStoreConfigurationProperties'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
time_series_id_properties: List["TimeSeriesIdProperty"],
storage_configuration: "Gen2StorageConfigurationOutput",
tags: Optional[Dict[str, str]] = None,
warm_store_configuration: Optional["WarmStoreConfigurationProperties"] = None,
**kwargs
):
super(Gen2EnvironmentResource, self).__init__(location=location, tags=tags, sku=sku, **kwargs)
self.kind = 'Gen2' # type: str
self.provisioning_state = None
self.creation_time = None
self.data_access_id = None
self.data_access_fqdn = None
self.status = None
self.time_series_id_properties = time_series_id_properties
self.storage_configuration = storage_configuration
self.warm_store_configuration = warm_store_configuration
class Gen2EnvironmentResourceProperties(EnvironmentResourceProperties):
"""Properties of the Gen2 environment.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:ivar data_access_id: An id used to access the environment data, e.g. to query the
environment's events or upload reference data for the environment.
:vartype data_access_id: str
:ivar data_access_fqdn: The fully qualified domain name used to access the environment data,
e.g. to query the environment's events or upload reference data for the environment.
:vartype data_access_fqdn: str
:ivar status: An object that represents the status of the environment, and its internal state
in the Time Series Insights service.
:vartype status: ~azure.mgmt.timeseriesinsights.models.EnvironmentStatus
:param time_series_id_properties: Required. The list of event properties which will be used to
define the environment's time series id.
:type time_series_id_properties:
list[~azure.mgmt.timeseriesinsights.models.TimeSeriesIdProperty]
:param storage_configuration: Required. The storage configuration provides the connection
details that allows the Time Series Insights service to connect to the customer storage account
that is used to store the environment's data.
:type storage_configuration:
~azure.mgmt.timeseriesinsights.models.Gen2StorageConfigurationOutput
:param warm_store_configuration: The warm store configuration provides the details to create a
warm store cache that will retain a copy of the environment's data available for faster query.
:type warm_store_configuration:
~azure.mgmt.timeseriesinsights.models.WarmStoreConfigurationProperties
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'data_access_id': {'readonly': True},
'data_access_fqdn': {'readonly': True},
'status': {'readonly': True},
'time_series_id_properties': {'required': True},
'storage_configuration': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'data_access_id': {'key': 'dataAccessId', 'type': 'str'},
'data_access_fqdn': {'key': 'dataAccessFqdn', 'type': 'str'},
'status': {'key': 'status', 'type': 'EnvironmentStatus'},
'time_series_id_properties': {'key': 'timeSeriesIdProperties', 'type': '[TimeSeriesIdProperty]'},
'storage_configuration': {'key': 'storageConfiguration', 'type': 'Gen2StorageConfigurationOutput'},
'warm_store_configuration': {'key': 'warmStoreConfiguration', 'type': 'WarmStoreConfigurationProperties'},
}
def __init__(
self,
*,
time_series_id_properties: List["TimeSeriesIdProperty"],
storage_configuration: "Gen2StorageConfigurationOutput",
warm_store_configuration: Optional["WarmStoreConfigurationProperties"] = None,
**kwargs
):
super(Gen2EnvironmentResourceProperties, self).__init__(**kwargs)
self.time_series_id_properties = time_series_id_properties
self.storage_configuration = storage_configuration
self.warm_store_configuration = warm_store_configuration
class Gen2EnvironmentUpdateParameters(EnvironmentUpdateParameters):
"""Parameters supplied to the Update Environment operation to update a Gen2 environment.
:param tags: A set of tags. Key-value pairs of additional properties for the environment.
:type tags: dict[str, str]
:param storage_configuration: The storage configuration provides the connection details that
allows the Time Series Insights service to connect to the customer storage account that is used
to store the environment's data.
:type storage_configuration:
~azure.mgmt.timeseriesinsights.models.Gen2StorageConfigurationMutableProperties
:param warm_store_configuration: The warm store configuration provides the details to create a
warm store cache that will retain a copy of the environment's data available for faster query.
:type warm_store_configuration:
~azure.mgmt.timeseriesinsights.models.WarmStoreConfigurationProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'storage_configuration': {'key': 'properties.storageConfiguration', 'type': 'Gen2StorageConfigurationMutableProperties'},
'warm_store_configuration': {'key': 'properties.warmStoreConfiguration', 'type': 'WarmStoreConfigurationProperties'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
storage_configuration: Optional["Gen2StorageConfigurationMutableProperties"] = None,
warm_store_configuration: Optional["WarmStoreConfigurationProperties"] = None,
**kwargs
):
super(Gen2EnvironmentUpdateParameters, self).__init__(tags=tags, **kwargs)
self.storage_configuration = storage_configuration
self.warm_store_configuration = warm_store_configuration
class Gen2StorageConfigurationInput(msrest.serialization.Model):
"""The storage configuration provides the connection details that allows the Time Series Insights service to connect to the customer storage account that is used to store the environment's data.
All required parameters must be populated in order to send to Azure.
:param account_name: Required. The name of the storage account that will hold the environment's
Gen2 data.
:type account_name: str
:param management_key: Required. The value of the management key that grants the Time Series
Insights service write access to the storage account. This property is not shown in environment
responses.
:type management_key: str
"""
_validation = {
'account_name': {'required': True},
'management_key': {'required': True},
}
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
'management_key': {'key': 'managementKey', 'type': 'str'},
}
def __init__(
self,
*,
account_name: str,
management_key: str,
**kwargs
):
super(Gen2StorageConfigurationInput, self).__init__(**kwargs)
self.account_name = account_name
self.management_key = management_key
class Gen2StorageConfigurationMutableProperties(msrest.serialization.Model):
"""The storage configuration provides the connection details that allows the Time Series Insights service to connect to the customer storage account that is used to store the environment's data.
All required parameters must be populated in order to send to Azure.
:param management_key: Required. The value of the management key that grants the Time Series
Insights service write access to the storage account. This property is not shown in environment
responses.
:type management_key: str
"""
_validation = {
'management_key': {'required': True},
}
_attribute_map = {
'management_key': {'key': 'managementKey', 'type': 'str'},
}
def __init__(
self,
*,
management_key: str,
**kwargs
):
super(Gen2StorageConfigurationMutableProperties, self).__init__(**kwargs)
self.management_key = management_key
class Gen2StorageConfigurationOutput(msrest.serialization.Model):
"""The storage configuration provides the non-secret connection details about the customer storage account that is used to store the environment's data.
All required parameters must be populated in order to send to Azure.
:param account_name: Required. The name of the storage account that will hold the environment's
Gen2 data.
:type account_name: str
"""
_validation = {
'account_name': {'required': True},
}
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
}
def __init__(
self,
*,
account_name: str,
**kwargs
):
super(Gen2StorageConfigurationOutput, self).__init__(**kwargs)
self.account_name = account_name
class IngressEnvironmentStatus(msrest.serialization.Model):
"""An object that represents the status of ingress on an environment.
Variables are only populated by the server, and will be ignored when sending a request.
:param state: This string represents the state of ingress operations on an environment. It can
be "Disabled", "Ready", "Running", "Paused" or "Unknown". Possible values include: "Disabled",
"Ready", "Running", "Paused", "Unknown".
:type state: str or ~azure.mgmt.timeseriesinsights.models.IngressState
:ivar state_details: An object that contains the details about an environment's state.
:vartype state_details: ~azure.mgmt.timeseriesinsights.models.EnvironmentStateDetails
"""
_validation = {
'state_details': {'readonly': True},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'state_details': {'key': 'stateDetails', 'type': 'EnvironmentStateDetails'},
}
def __init__(
self,
*,
state: Optional[Union[str, "IngressState"]] = None,
**kwargs
):
super(IngressEnvironmentStatus, self).__init__(**kwargs)
self.state = state
self.state_details = None
class IoTHubEventSourceCommonProperties(AzureEventSourceProperties):
"""Properties of the IoTHub event source.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param iot_hub_name: Required. The name of the iot hub.
:type iot_hub_name: str
:param consumer_group_name: Required. The name of the iot hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the Shared Access Policy key that grants the Time Series
Insights service access to the iot hub. This shared access policy key must grant 'service
connect' permissions to the iot hub.
:type key_name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'iot_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'eventSourceResourceId', 'type': 'str'},
'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},
'consumer_group_name': {'key': 'consumerGroupName', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
*,
event_source_resource_id: str,
iot_hub_name: str,
consumer_group_name: str,
key_name: str,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(IoTHubEventSourceCommonProperties, self).__init__(timestamp_property_name=timestamp_property_name, event_source_resource_id=event_source_resource_id, **kwargs)
self.iot_hub_name = iot_hub_name
self.consumer_group_name = consumer_group_name
self.key_name = key_name
class IoTHubEventSourceCreateOrUpdateParameters(EventSourceCreateOrUpdateParameters):
"""Parameters supplied to the Create or Update Event Source operation for an IoTHub event source.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
:param kind: Required. The kind of the event source.Constant filled by server. Possible values
include: "Microsoft.EventHub", "Microsoft.IoTHub".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EventSourceKind
:param local_timestamp: An object that represents the local timestamp property. It contains the
format of local timestamp that needs to be used and the corresponding timezone offset
information. If a value isn't specified for localTimestamp, or if null, then the local
timestamp will not be ingressed with the events.
:type local_timestamp: ~azure.mgmt.timeseriesinsights.models.LocalTimestamp
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param iot_hub_name: Required. The name of the iot hub.
:type iot_hub_name: str
:param consumer_group_name: Required. The name of the iot hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the Shared Access Policy key that grants the Time Series
Insights service access to the iot hub. This shared access policy key must grant 'service
connect' permissions to the iot hub.
:type key_name: str
:param shared_access_key: Required. The value of the Shared Access Policy key that grants the
Time Series Insights service read access to the iot hub. This property is not shown in event
source responses.
:type shared_access_key: str
"""
_validation = {
'location': {'required': True},
'kind': {'required': True},
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'iot_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
'shared_access_key': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'local_timestamp': {'key': 'localTimestamp', 'type': 'LocalTimestamp'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'properties.timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'properties.eventSourceResourceId', 'type': 'str'},
'iot_hub_name': {'key': 'properties.iotHubName', 'type': 'str'},
'consumer_group_name': {'key': 'properties.consumerGroupName', 'type': 'str'},
'key_name': {'key': 'properties.keyName', 'type': 'str'},
'shared_access_key': {'key': 'properties.sharedAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
event_source_resource_id: str,
iot_hub_name: str,
consumer_group_name: str,
key_name: str,
shared_access_key: str,
tags: Optional[Dict[str, str]] = None,
local_timestamp: Optional["LocalTimestamp"] = None,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(IoTHubEventSourceCreateOrUpdateParameters, self).__init__(location=location, tags=tags, local_timestamp=local_timestamp, **kwargs)
self.kind = 'Microsoft.IoTHub' # type: str
self.provisioning_state = None
self.creation_time = None
self.timestamp_property_name = timestamp_property_name
self.event_source_resource_id = event_source_resource_id
self.iot_hub_name = iot_hub_name
self.consumer_group_name = consumer_group_name
self.key_name = key_name
self.shared_access_key = shared_access_key
class IoTHubEventSourceCreationProperties(IoTHubEventSourceCommonProperties):
"""Properties of the IoTHub event source that are required on create or update requests.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param iot_hub_name: Required. The name of the iot hub.
:type iot_hub_name: str
:param consumer_group_name: Required. The name of the iot hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the Shared Access Policy key that grants the Time Series
Insights service access to the iot hub. This shared access policy key must grant 'service
connect' permissions to the iot hub.
:type key_name: str
:param shared_access_key: Required. The value of the Shared Access Policy key that grants the
Time Series Insights service read access to the iot hub. This property is not shown in event
source responses.
:type shared_access_key: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'iot_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
'shared_access_key': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'eventSourceResourceId', 'type': 'str'},
'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},
'consumer_group_name': {'key': 'consumerGroupName', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
'shared_access_key': {'key': 'sharedAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
event_source_resource_id: str,
iot_hub_name: str,
consumer_group_name: str,
key_name: str,
shared_access_key: str,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(IoTHubEventSourceCreationProperties, self).__init__(timestamp_property_name=timestamp_property_name, event_source_resource_id=event_source_resource_id, iot_hub_name=iot_hub_name, consumer_group_name=consumer_group_name, key_name=key_name, **kwargs)
self.shared_access_key = shared_access_key
class IoTHubEventSourceMutableProperties(EventSourceMutableProperties):
"""An object that represents a set of mutable IoTHub event source resource properties.
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param local_timestamp: An object that represents the local timestamp property. It contains the
format of local timestamp that needs to be used and the corresponding timezone offset
information. If a value isn't specified for localTimestamp, or if null, then the local
timestamp will not be ingressed with the events.
:type local_timestamp: ~azure.mgmt.timeseriesinsights.models.LocalTimestamp
:param shared_access_key: The value of the shared access key that grants the Time Series
Insights service read access to the iot hub. This property is not shown in event source
responses.
:type shared_access_key: str
"""
_attribute_map = {
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'local_timestamp': {'key': 'localTimestamp', 'type': 'LocalTimestamp'},
'shared_access_key': {'key': 'sharedAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
timestamp_property_name: Optional[str] = None,
local_timestamp: Optional["LocalTimestamp"] = None,
shared_access_key: Optional[str] = None,
**kwargs
):
super(IoTHubEventSourceMutableProperties, self).__init__(timestamp_property_name=timestamp_property_name, local_timestamp=local_timestamp, **kwargs)
self.shared_access_key = shared_access_key
class IoTHubEventSourceResource(EventSourceResource):
"""An event source that receives its data from an Azure IoTHub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. The kind of the event source.Constant filled by server. Possible values
include: "Microsoft.EventHub", "Microsoft.IoTHub".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EventSourceResourceKind
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param iot_hub_name: Required. The name of the iot hub.
:type iot_hub_name: str
:param consumer_group_name: Required. The name of the iot hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the Shared Access Policy key that grants the Time Series
Insights service access to the iot hub. This shared access policy key must grant 'service
connect' permissions to the iot hub.
:type key_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'kind': {'required': True},
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'iot_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'properties.timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'properties.eventSourceResourceId', 'type': 'str'},
'iot_hub_name': {'key': 'properties.iotHubName', 'type': 'str'},
'consumer_group_name': {'key': 'properties.consumerGroupName', 'type': 'str'},
'key_name': {'key': 'properties.keyName', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
event_source_resource_id: str,
iot_hub_name: str,
consumer_group_name: str,
key_name: str,
tags: Optional[Dict[str, str]] = None,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(IoTHubEventSourceResource, self).__init__(location=location, tags=tags, **kwargs)
self.kind = 'Microsoft.IoTHub' # type: str
self.provisioning_state = None
self.creation_time = None
self.timestamp_property_name = timestamp_property_name
self.event_source_resource_id = event_source_resource_id
self.iot_hub_name = iot_hub_name
self.consumer_group_name = consumer_group_name
self.key_name = key_name
class IoTHubEventSourceResourceProperties(IoTHubEventSourceCommonProperties):
"""Properties of the IoTHub event source resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param iot_hub_name: Required. The name of the iot hub.
:type iot_hub_name: str
:param consumer_group_name: Required. The name of the iot hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the Shared Access Policy key that grants the Time Series
Insights service access to the iot hub. This shared access policy key must grant 'service
connect' permissions to the iot hub.
:type key_name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'iot_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'timestamp_property_name': {'key': 'timestampPropertyName', 'type': 'str'},
'event_source_resource_id': {'key': 'eventSourceResourceId', 'type': 'str'},
'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},
'consumer_group_name': {'key': 'consumerGroupName', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
*,
event_source_resource_id: str,
iot_hub_name: str,
consumer_group_name: str,
key_name: str,
timestamp_property_name: Optional[str] = None,
**kwargs
):
super(IoTHubEventSourceResourceProperties, self).__init__(timestamp_property_name=timestamp_property_name, event_source_resource_id=event_source_resource_id, iot_hub_name=iot_hub_name, consumer_group_name=consumer_group_name, key_name=key_name, **kwargs)
class IoTHubEventSourceUpdateParameters(EventSourceUpdateParameters):
"""Parameters supplied to the Update Event Source operation to update an IoTHub event source.
:param tags: A set of tags. Key-value pairs of additional properties for the event source.
:type tags: dict[str, str]
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param local_timestamp: An object that represents the local timestamp property. It contains the
format of local timestamp that needs to be used and the corresponding timezone offset
information. If a value isn't specified for localTimestamp, or if null, then the local
timestamp will not be ingressed with the events.
:type local_timestamp: ~azure.mgmt.timeseriesinsights.models.LocalTimestamp
:param shared_access_key: The value of the shared access key that grants the Time Series
Insights service read access to the iot hub. This property is not shown in event source
responses.
:type shared_access_key: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'timestamp_property_name': {'key': 'properties.timestampPropertyName', 'type': 'str'},
'local_timestamp': {'key': 'properties.localTimestamp', 'type': 'LocalTimestamp'},
'shared_access_key': {'key': 'properties.sharedAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
timestamp_property_name: Optional[str] = None,
local_timestamp: Optional["LocalTimestamp"] = None,
shared_access_key: Optional[str] = None,
**kwargs
):
super(IoTHubEventSourceUpdateParameters, self).__init__(tags=tags, **kwargs)
self.timestamp_property_name = timestamp_property_name
self.local_timestamp = local_timestamp
self.shared_access_key = shared_access_key
class LocalTimestamp(msrest.serialization.Model):
"""An object that represents the local timestamp property. It contains the format of local timestamp that needs to be used and the corresponding timezone offset information. If a value isn't specified for localTimestamp, or if null, then the local timestamp will not be ingressed with the events.
:param format: An enum that represents the format of the local timestamp property that needs to
be set. Possible values include: "Embedded".
:type format: str or ~azure.mgmt.timeseriesinsights.models.LocalTimestampFormat
:param time_zone_offset: An object that represents the offset information for the local
timestamp format specified. Should not be specified for LocalTimestampFormat - Embedded.
:type time_zone_offset: ~azure.mgmt.timeseriesinsights.models.LocalTimestampTimeZoneOffset
"""
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'time_zone_offset': {'key': 'timeZoneOffset', 'type': 'LocalTimestampTimeZoneOffset'},
}
def __init__(
self,
*,
format: Optional[Union[str, "LocalTimestampFormat"]] = None,
time_zone_offset: Optional["LocalTimestampTimeZoneOffset"] = None,
**kwargs
):
super(LocalTimestamp, self).__init__(**kwargs)
self.format = format
self.time_zone_offset = time_zone_offset
class LocalTimestampTimeZoneOffset(msrest.serialization.Model):
"""An object that represents the offset information for the local timestamp format specified. Should not be specified for LocalTimestampFormat - Embedded.
:param property_name: The event property that will be contain the offset information to
calculate the local timestamp. When the LocalTimestampFormat is Iana, the property name will
contain the name of the column which contains IANA Timezone Name (eg: Americas/Los Angeles).
When LocalTimestampFormat is Timespan, it contains the name of property which contains values
representing the offset (eg: P1D or 1.00:00:00).
:type property_name: str
"""
_attribute_map = {
'property_name': {'key': 'propertyName', 'type': 'str'},
}
def __init__(
self,
*,
property_name: Optional[str] = None,
**kwargs
):
super(LocalTimestampTimeZoneOffset, self).__init__(**kwargs)
self.property_name = property_name
class Operation(msrest.serialization.Model):
"""A Time Series Insights REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation being performed on this particular object.
:vartype name: str
:ivar display: Contains the localized display information for this particular operation /
action.
:vartype display: ~azure.mgmt.timeseriesinsights.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
'display': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = None
class OperationDisplay(msrest.serialization.Model):
"""Contains the localized display information for this particular operation / action.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: The localized friendly form of the resource provider name.
:vartype provider: str
:ivar resource: The localized friendly form of the resource type related to this
action/operation.
:vartype resource: str
:ivar operation: The localized friendly name for the operation.
:vartype operation: str
:ivar description: The localized friendly description for the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list Time Series Insights operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Time Series Insights operations supported by the
Microsoft.TimeSeriesInsights resource provider.
:vartype value: list[~azure.mgmt.timeseriesinsights.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ReferenceDataSetCreateOrUpdateParameters(CreateOrUpdateTrackedResourceProperties):
"""ReferenceDataSetCreateOrUpdateParameters.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource.
:type location: str
:param tags: A set of tags. Key-value pairs of additional properties for the resource.
:type tags: dict[str, str]
:param key_properties: Required. The list of key properties for the reference data set.
:type key_properties: list[~azure.mgmt.timeseriesinsights.models.ReferenceDataSetKeyProperty]
:param data_string_comparison_behavior: The reference data set key comparison behavior can be
set using this property. By default, the value is 'Ordinal' - which means case sensitive key
comparison will be performed while joining reference data with events or while adding new
reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used.
Possible values include: "Ordinal", "OrdinalIgnoreCase".
:type data_string_comparison_behavior: str or
~azure.mgmt.timeseriesinsights.models.DataStringComparisonBehavior
"""
_validation = {
'location': {'required': True},
'key_properties': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'key_properties': {'key': 'properties.keyProperties', 'type': '[ReferenceDataSetKeyProperty]'},
'data_string_comparison_behavior': {'key': 'properties.dataStringComparisonBehavior', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
key_properties: List["ReferenceDataSetKeyProperty"],
tags: Optional[Dict[str, str]] = None,
data_string_comparison_behavior: Optional[Union[str, "DataStringComparisonBehavior"]] = None,
**kwargs
):
super(ReferenceDataSetCreateOrUpdateParameters, self).__init__(location=location, tags=tags, **kwargs)
self.key_properties = key_properties
self.data_string_comparison_behavior = data_string_comparison_behavior
class ReferenceDataSetCreationProperties(msrest.serialization.Model):
"""Properties used to create a reference data set.
All required parameters must be populated in order to send to Azure.
:param key_properties: Required. The list of key properties for the reference data set.
:type key_properties: list[~azure.mgmt.timeseriesinsights.models.ReferenceDataSetKeyProperty]
:param data_string_comparison_behavior: The reference data set key comparison behavior can be
set using this property. By default, the value is 'Ordinal' - which means case sensitive key
comparison will be performed while joining reference data with events or while adding new
reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used.
Possible values include: "Ordinal", "OrdinalIgnoreCase".
:type data_string_comparison_behavior: str or
~azure.mgmt.timeseriesinsights.models.DataStringComparisonBehavior
"""
_validation = {
'key_properties': {'required': True},
}
_attribute_map = {
'key_properties': {'key': 'keyProperties', 'type': '[ReferenceDataSetKeyProperty]'},
'data_string_comparison_behavior': {'key': 'dataStringComparisonBehavior', 'type': 'str'},
}
def __init__(
self,
*,
key_properties: List["ReferenceDataSetKeyProperty"],
data_string_comparison_behavior: Optional[Union[str, "DataStringComparisonBehavior"]] = None,
**kwargs
):
super(ReferenceDataSetCreationProperties, self).__init__(**kwargs)
self.key_properties = key_properties
self.data_string_comparison_behavior = data_string_comparison_behavior
class ReferenceDataSetKeyProperty(msrest.serialization.Model):
"""A key property for the reference data set. A reference data set can have multiple key properties.
:param name: The name of the key property.
:type name: str
:param type: The type of the key property. Possible values include: "String", "Double", "Bool",
"DateTime".
:type type: str or ~azure.mgmt.timeseriesinsights.models.ReferenceDataKeyPropertyType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[Union[str, "ReferenceDataKeyPropertyType"]] = None,
**kwargs
):
super(ReferenceDataSetKeyProperty, self).__init__(**kwargs)
self.name = name
self.type = type
class ReferenceDataSetListResponse(msrest.serialization.Model):
"""The response of the List Reference Data Sets operation.
:param value: Result of the List Reference Data Sets operation.
:type value: list[~azure.mgmt.timeseriesinsights.models.ReferenceDataSetResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ReferenceDataSetResource]'},
}
def __init__(
self,
*,
value: Optional[List["ReferenceDataSetResource"]] = None,
**kwargs
):
super(ReferenceDataSetListResponse, self).__init__(**kwargs)
self.value = value
class ReferenceDataSetResource(TrackedResource):
"""A reference data set provides metadata about the events in an environment. Metadata in the reference data set will be joined with events as they are read from event sources. The metadata that makes up the reference data set is uploaded or modified through the Time Series Insights data plane APIs.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param key_properties: The list of key properties for the reference data set.
:type key_properties: list[~azure.mgmt.timeseriesinsights.models.ReferenceDataSetKeyProperty]
:param data_string_comparison_behavior: The reference data set key comparison behavior can be
set using this property. By default, the value is 'Ordinal' - which means case sensitive key
comparison will be performed while joining reference data with events or while adding new
reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used.
Possible values include: "Ordinal", "OrdinalIgnoreCase".
:type data_string_comparison_behavior: str or
~azure.mgmt.timeseriesinsights.models.DataStringComparisonBehavior
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'key_properties': {'key': 'properties.keyProperties', 'type': '[ReferenceDataSetKeyProperty]'},
'data_string_comparison_behavior': {'key': 'properties.dataStringComparisonBehavior', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
key_properties: Optional[List["ReferenceDataSetKeyProperty"]] = None,
data_string_comparison_behavior: Optional[Union[str, "DataStringComparisonBehavior"]] = None,
**kwargs
):
super(ReferenceDataSetResource, self).__init__(location=location, tags=tags, **kwargs)
self.key_properties = key_properties
self.data_string_comparison_behavior = data_string_comparison_behavior
self.provisioning_state = None
self.creation_time = None
class ReferenceDataSetResourceProperties(ReferenceDataSetCreationProperties, ResourceProperties):
"""Properties of the reference data set.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param key_properties: Required. The list of key properties for the reference data set.
:type key_properties: list[~azure.mgmt.timeseriesinsights.models.ReferenceDataSetKeyProperty]
:param data_string_comparison_behavior: The reference data set key comparison behavior can be
set using this property. By default, the value is 'Ordinal' - which means case sensitive key
comparison will be performed while joining reference data with events or while adding new
reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used.
Possible values include: "Ordinal", "OrdinalIgnoreCase".
:type data_string_comparison_behavior: str or
~azure.mgmt.timeseriesinsights.models.DataStringComparisonBehavior
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'key_properties': {'required': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'key_properties': {'key': 'keyProperties', 'type': '[ReferenceDataSetKeyProperty]'},
'data_string_comparison_behavior': {'key': 'dataStringComparisonBehavior', 'type': 'str'},
}
def __init__(
self,
*,
key_properties: List["ReferenceDataSetKeyProperty"],
data_string_comparison_behavior: Optional[Union[str, "DataStringComparisonBehavior"]] = None,
**kwargs
):
super(ReferenceDataSetResourceProperties, self).__init__(key_properties=key_properties, data_string_comparison_behavior=data_string_comparison_behavior, **kwargs)
self.provisioning_state = None
self.creation_time = None
self.key_properties = key_properties
self.data_string_comparison_behavior = data_string_comparison_behavior
class ReferenceDataSetUpdateParameters(msrest.serialization.Model):
"""Parameters supplied to the Update Reference Data Set operation.
:param tags: A set of tags. Key-value pairs of additional properties for the reference data
set.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ReferenceDataSetUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class Sku(msrest.serialization.Model):
"""The sku determines the type of environment, either Gen1 (S1 or S2) or Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the ingress rate, and the billing rate.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of this SKU. Possible values include: "S1", "S2", "P1", "L1".
:type name: str or ~azure.mgmt.timeseriesinsights.models.SkuName
:param capacity: Required. The capacity of the sku. For Gen1 environments, this value can be
changed to support scale out of environments after they have been created.
:type capacity: int
"""
_validation = {
'name': {'required': True},
'capacity': {'required': True, 'maximum': 10, 'minimum': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
*,
name: Union[str, "SkuName"],
capacity: int,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.capacity = capacity
class TimeSeriesIdProperty(msrest.serialization.Model):
"""The structure of the property that a time series id can have. An environment can have multiple such properties.
:param name: The name of the property.
:type name: str
:param type: The type of the property. Possible values include: "String".
:type type: str or ~azure.mgmt.timeseriesinsights.models.PropertyType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[Union[str, "PropertyType"]] = None,
**kwargs
):
super(TimeSeriesIdProperty, self).__init__(**kwargs)
self.name = name
self.type = type
class WarmStorageEnvironmentStatus(msrest.serialization.Model):
"""An object that represents the status of warm storage on an environment.
:param state: This string represents the state of warm storage properties usage. It can be
"Ok", "Error", "Unknown". Possible values include: "Ok", "Error", "Unknown".
:type state: str or ~azure.mgmt.timeseriesinsights.models.WarmStoragePropertiesState
:param current_count: A value that represents the number of properties used by the environment
for S1/S2 SKU and number of properties used by Warm Store for PAYG SKU.
:type current_count: int
:param max_count: A value that represents the maximum number of properties used allowed by the
environment for S1/S2 SKU and maximum number of properties allowed by Warm Store for PAYG SKU.
:type max_count: int
"""
_validation = {
'current_count': {'maximum': 10, 'minimum': 1},
'max_count': {'maximum': 10, 'minimum': 1},
}
_attribute_map = {
'state': {'key': 'propertiesUsage.state', 'type': 'str'},
'current_count': {'key': 'propertiesUsage.stateDetails.currentCount', 'type': 'int'},
'max_count': {'key': 'propertiesUsage.stateDetails.maxCount', 'type': 'int'},
}
def __init__(
self,
*,
state: Optional[Union[str, "WarmStoragePropertiesState"]] = None,
current_count: Optional[int] = None,
max_count: Optional[int] = None,
**kwargs
):
super(WarmStorageEnvironmentStatus, self).__init__(**kwargs)
self.state = state
self.current_count = current_count
self.max_count = max_count
class WarmStoreConfigurationProperties(msrest.serialization.Model):
"""The warm store configuration provides the details to create a warm store cache that will retain a copy of the environment's data available for faster query.
All required parameters must be populated in order to send to Azure.
:param data_retention: Required. ISO8601 timespan specifying the number of days the
environment's events will be available for query from the warm store.
:type data_retention: ~datetime.timedelta
"""
_validation = {
'data_retention': {'required': True},
}
_attribute_map = {
'data_retention': {'key': 'dataRetention', 'type': 'duration'},
}
def __init__(
self,
*,
data_retention: datetime.timedelta,
**kwargs
):
super(WarmStoreConfigurationProperties, self).__init__(**kwargs)
self.data_retention = data_retention
| 44.208573
| 313
| 0.684325
| 15,908
| 138,196
| 5.783254
| 0.032499
| 0.018793
| 0.023283
| 0.03587
| 0.865989
| 0.857152
| 0.843076
| 0.825315
| 0.807891
| 0.796359
| 0
| 0.002281
| 0.210158
| 138,196
| 3,125
| 314
| 44.22272
| 0.840572
| 0.481931
| 0
| 0.778626
| 0
| 0
| 0.268031
| 0.094091
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041985
| false
| 0
| 0.002545
| 0
| 0.159033
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5341750d15eaffc0f2887cfec034383a4e40f9c1
| 14,192
|
py
|
Python
|
code/architecture/architectures_utils.py
|
EMBEDDIA/NER_BERT_Multitask
|
9f8dc7530875bbba5c6ac43063d3998aff5b0773
|
[
"MIT"
] | 1
|
2021-11-17T02:58:09.000Z
|
2021-11-17T02:58:09.000Z
|
code/architecture/architectures_utils.py
|
EMBEDDIA/NER_BERT_Multitask
|
9f8dc7530875bbba5c6ac43063d3998aff5b0773
|
[
"MIT"
] | null | null | null |
code/architecture/architectures_utils.py
|
EMBEDDIA/NER_BERT_Multitask
|
9f8dc7530875bbba5c6ac43063d3998aff5b0773
|
[
"MIT"
] | null | null | null |
import torch
from tqdm import tqdm
from torch.utils import data
import os
from utils.EarlyStopper import EarlyStopper
def train_bert_model_multitask(model, experiment_name, epochs, optimizer, scheduler, train_batcher,
train_batching_params, dev_dataloader,
evaluation_function, saving_path, early_stop=0, use_gpu=True, gpu_device="cuda:0",
masking=False, update_masking=False, dev_aligner=None, multi_gpu=False, bert_hidden_size=768, uppercase_percentage=0.0):
using_cuda = False
if torch.cuda.is_available() and use_gpu:
device = torch.device(gpu_device)
model.to(device)
using_cuda = True
else:
device = torch.device("cpu")
gradient_accumulation_steps = 1
step = 0
eval_score = 0.0
report = ""
train_dataloader = data.DataLoader(train_batcher, **train_batching_params, collate_fn=train_batcher.collate_fn)
early_stopper = EarlyStopper(patience=early_stop)
for epoch in range(epochs):
if epoch > 0 and (masking is True or uppercase_percentage > 0.0):
if update_masking and eval_score >= 0.90:
print("Updating masking")
train_batcher.updateMasking(None, False)
update_masking = False
early_stop = 5
print("Batching training dataset")
train_batcher.createBatches()
train_dataloader = data.DataLoader(train_batcher, **train_batching_params,
collate_fn=train_batcher.collate_fn)
print(f"\nTraining Model: {epoch + 1}")
running_loss = 0.0
model.train()
for step, batch in enumerate(tqdm(train_dataloader, total=len(train_dataloader))):
tokens, attention_masks, token_type_ids, tags, tokens_mask, labelling_mask, lm_mask, lm_labels, labels_boundaries = batch
batch_size, sequence_size = tokens.shape
valid_output_tonkens = torch.zeros(batch_size, sequence_size, bert_hidden_size, dtype=torch.float32, device=device)
valid_output_predict = None
if lm_mask is not None:
valid_output_predict = torch.zeros(batch_size, sequence_size, bert_hidden_size, dtype=torch.float32, device=device)
if using_cuda:
tokens = tokens.to(device)
tags = tags.to(device)
attention_masks = attention_masks.to(device)
labelling_mask = labelling_mask.to(device)
tokens_mask = tokens_mask.to(device)
token_type_ids = token_type_ids.to(device)
labels_boundaries = labels_boundaries.to(device)
if lm_mask is not None:
lm_mask = lm_mask.to(device)
lm_labels = lm_labels.to(device)
loss, _ = model(tokens, token_type_ids=token_type_ids, attention_mask=attention_masks, labels=tags,
tokens_mask=tokens_mask, labelling_mask=labelling_mask, lm_mask=lm_mask,
lm_labels=lm_labels,
labels_boundaries=labels_boundaries, valid_output_tokens=valid_output_tonkens,
valid_output_predict=valid_output_predict)
if multi_gpu:
loss.sum().backward()
running_loss += loss.sum().item()
else:
loss.backward()
running_loss += loss.item()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if (step + 1) % gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
if step == 0:
step = 1
print(f"Loss:\t{running_loss / step}")
if dev_dataloader is not None:
print("Evaluating Model with Dev")
model.eval()
eval_score, report, loss = predict(model, dev_dataloader, tagged=True, evaluation_function=evaluation_function,
calculate_loss=True, multi_gpu=multi_gpu, bert_hidden_size=bert_hidden_size,
use_gpu=use_gpu, gpu_device=gpu_device, test_aligner=dev_aligner)
print(f"F-score: {eval_score}\tLoss: {loss}")
if saving_path is not None and early_stop > 0:
if early_stopper.checkImprovement(loss, eval_score):
print(early_stopper.getCounter())
if not os.path.exists(f"{saving_path}/{experiment_name}/"):
os.makedirs(f"{saving_path}/{experiment_name}/")
if multi_gpu:
model.module.save_pretrained(f"{saving_path}/{experiment_name}/")
else:
model.save_pretrained(f"{saving_path}/{experiment_name}/")
with open(f"{saving_path}/{experiment_name}/dev-{experiment_name}-results.txt", "w") as output_file:
output_file.write(report)
output_file.write("\n")
with open(f"{saving_path}/{experiment_name}/best-{experiment_name}-epoch.txt", "w") as output_file:
output_file.write(f"best: {epoch + 1}\n")
else:
print(early_stopper.getCounter())
if early_stopper.stopTraining():
with open(f"{saving_path}/{experiment_name}/best-{experiment_name}-epoch.txt", "a") as output_file:
output_file.write(f"last: {epoch + 1}\n")
output_file.write(f"reason: {early_stopper.getCounter()}\n")
print(f"Early stop as there have been {early_stop} epochs withouth change")
break
if early_stop == 0:
if not os.path.exists(f"{saving_path}/{experiment_name}/"):
os.makedirs(f"{saving_path}/{experiment_name}/")
if multi_gpu:
model.module.save_pretrained(f"{saving_path}/{experiment_name}/")
else:
model.save_pretrained(f"{saving_path}/{experiment_name}/")
if dev_dataloader is not None:
with open(f"{saving_path}/{experiment_name}/dev-{experiment_name}-results.txt", "w") as output_file:
output_file.write(report)
output_file.write("\n")
def train_bert_model(model, experiment_name, epochs, optimizer, scheduler, train_batcher, train_batching_params,
dev_dataloader,
evaluation_function, saving_path, early_stop=0, use_gpu=True, gpu_device="cuda:0", masking=False,
update_masking=False, dev_aligner=None, multi_gpu=False, bert_hidden_size=768, uppercase_percentage=0.0):
using_cuda = False
if torch.cuda.is_available() and use_gpu:
device = torch.device(gpu_device)
model.to(device)
using_cuda = True
else:
device = torch.device("cpu")
gradient_accumulation_steps = 1
step = 0
eval_score = 0.0
report = ""
train_dataloader = data.DataLoader(train_batcher, **train_batching_params, collate_fn=train_batcher.collate_fn)
early_stopper = EarlyStopper(patience=early_stop)
for epoch in range(epochs):
if epoch > 0 and (masking is True or uppercase_percentage > 0.0):
if update_masking and eval_score >= 0.90:
print("Updating masking")
train_batcher.updateMasking(None, False)
update_masking = False
early_stop = 5
print("Batching training dataset")
train_batcher.createBatches()
train_dataloader = data.DataLoader(train_batcher, **train_batching_params,
collate_fn=train_batcher.collate_fn)
print(f"\nTraining Model: {epoch + 1}")
running_loss = 0.0
model.train()
for step, batch in enumerate(tqdm(train_dataloader, total=len(train_dataloader))):
tokens, attention_masks, token_type_ids, tags, tokens_mask, labelling_mask, lm_mask, lm_labels = batch
batch_size, sequence_size = tokens.shape
valid_output_tonkens = torch.zeros(batch_size, sequence_size, bert_hidden_size, dtype=torch.float32, device=device)
valid_output_predict = None
if lm_mask is not None:
valid_output_predict = torch.zeros(batch_size, sequence_size, bert_hidden_size, dtype=torch.float32, device=device)
if using_cuda:
tokens = tokens.to(device)
tags = tags.to(device)
attention_masks = attention_masks.to(device)
labelling_mask = labelling_mask.to(device)
tokens_mask = tokens_mask.to(device)
token_type_ids = token_type_ids.to(device)
if lm_mask is not None:
lm_mask = lm_mask.to(device)
lm_labels = lm_labels.to(device)
loss, _ = model(tokens, token_type_ids=token_type_ids, attention_mask=attention_masks, labels=tags,
tokens_mask=tokens_mask, labelling_mask=labelling_mask, lm_mask=lm_mask,
lm_labels=lm_labels, valid_output_tokens=valid_output_tonkens,
valid_output_predict=valid_output_predict)
if multi_gpu:
loss.sum().backward()
running_loss += loss.sum().item()
else:
loss.backward()
running_loss += loss.item()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if (step + 1) % gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
step += 1
print(f"Loss:\t{running_loss / step}")
if dev_dataloader is not None:
print("Evaluating Model with Dev")
model.eval()
eval_score, report, loss = predict(model, dev_dataloader, tagged=True, evaluation_function=evaluation_function,
calculate_loss=True, multi_gpu=multi_gpu, bert_hidden_size=bert_hidden_size,
use_gpu=use_gpu, gpu_device=gpu_device, test_aligner=dev_aligner)
print(f"F-score: {eval_score}\tLoss: {loss}")
if saving_path is not None and early_stop > 0:
if early_stopper.checkImprovement(loss, eval_score):
print(early_stopper.getCounter())
if not os.path.exists(f"{saving_path}/{experiment_name}/"):
os.makedirs(f"{saving_path}/{experiment_name}/")
if multi_gpu:
model.module.save_pretrained(f"{saving_path}/{experiment_name}/")
else:
model.save_pretrained(f"{saving_path}/{experiment_name}/")
with open(f"{saving_path}/{experiment_name}/dev-{experiment_name}-results.txt", "w") as output_file:
output_file.write(report)
output_file.write("\n")
with open(f"{saving_path}/{experiment_name}/best-{experiment_name}-epoch.txt", "w") as output_file:
output_file.write(f"best: {epoch + 1}\n")
else:
print(early_stopper.getCounter())
if early_stopper.stopTraining():
with open(f"{saving_path}/{experiment_name}/best-{experiment_name}-epoch.txt", "a") as output_file:
output_file.write(f"last: {epoch + 1}\n")
output_file.write(f"reason: {early_stopper.getCounter()}\n")
print(f"Early stop as there have been {early_stop} epochs withouth change")
break
if early_stop == 0:
if not os.path.exists(f"{saving_path}/{experiment_name}/"):
os.makedirs(f"{saving_path}/{experiment_name}/")
if multi_gpu:
model.module.save_pretrained(f"{saving_path}/{experiment_name}/")
else:
model.save_pretrained(f"{saving_path}/{experiment_name}/")
if dev_dataloader is not None:
with open(f"{saving_path}/{experiment_name}/dev-{experiment_name}-results.txt", "w") as output_file:
output_file.write(report)
output_file.write("\n")
def predict(model, test_dataloader, tagged=False, evaluation_function=None, use_gpu=False, calculate_loss=False,
gpu_device="cuda:0", test_aligner=None, multi_gpu=False, bert_hidden_size=768):
predictions = []
gold_standard = []
using_cuda = False
if torch.cuda.is_available() and use_gpu:
device = torch.device(gpu_device)
model.to(device)
using_cuda = True
else:
device = torch.device("cpu")
model.eval()
if multi_gpu:
hasCRF = model.module.hasCRF()
else:
hasCRF = model.hasCRF()
for step, batch in enumerate(tqdm(test_dataloader, total=len(test_dataloader))):
tokens, attention_masks, token_type_ids, tags, tokens_mask, labelling_mask = batch
batch_size, sequence_size = tokens.shape
valid_output_tonkens = torch.zeros(batch_size, sequence_size, bert_hidden_size, dtype=torch.float32, device=device)
if using_cuda:
tokens = tokens.to(device)
attention_masks = attention_masks.to(device)
if calculate_loss:
labelling_mask = labelling_mask.to(device)
tags = tags.to(device)
tokens_mask = tokens_mask.to(device)
token_type_ids = token_type_ids.to(device)
with torch.no_grad():
if calculate_loss:
loss, logits = model(tokens, token_type_ids=token_type_ids, attention_mask=attention_masks,
labels=tags, tokens_mask=tokens_mask, labelling_mask=labelling_mask,
valid_output_tokens=valid_output_tonkens)
if multi_gpu:
loss = loss.sum()
else:
logits = model(tokens, token_type_ids=token_type_ids, attention_mask=attention_masks,
labels=None, tokens_mask=tokens_mask, labelling_mask=None,
valid_output_tokens=valid_output_tonkens)
if hasCRF:
if multi_gpu:
logits = model.module.getCRFtags(logits, labelling_mask.to(device))
else:
logits = model.getCRFtags(logits, labelling_mask.to(device))
predictions.extend(logits)
else:
logits = torch.argmax(torch.log_softmax(logits, dim=2), dim=2)
logits = logits.detach().cpu().numpy()
if tagged:
if using_cuda and calculate_loss:
labelling_mask = labelling_mask.detach().cpu()
tags = tags.detach().cpu()
for i in range(len(tags)):
active_tokens = labelling_mask[i] == 1
active_tags = ((tags[i])[active_tokens])
gold_standard.append(active_tags.tolist())
if not hasCRF:
active_logits = ((logits[i])[active_tokens])
predictions.append(active_logits.tolist())
elif not hasCRF:
for i in range(len(logits)):
active_tokens = labelling_mask[i] == 1
active_logits = ((logits[i])[active_tokens])
predictions.append(active_logits.tolist())
if test_aligner is not None:
predictions, gold_standard = sentenceAligner(test_aligner, predictions, gold_standard)
if tagged and evaluation_function is not None:
eval_score, report, david_metrics = evaluation_function(predictions, gold_standard)
if calculate_loss:
return eval_score, report, loss / (step + 1)
else:
return predictions, eval_score, report, david_metrics
return predictions
def sentenceAligner(aligner, predictions, gold_standard):
sentence_offset = 0
final_predictions = []
final_gold_standard = []
print("Aligning predictions")
for sentence_id, sentence in enumerate(tqdm(aligner, total=len(aligner))):
sentence_prediction = []
sentence_gold = []
while len(sentence_prediction) < aligner[sentence_id]:
sentence_prediction.extend(predictions[sentence_id + sentence_offset])
if len(gold_standard) > 0:
sentence_gold.extend(gold_standard[sentence_id + sentence_offset])
if len(sentence_prediction) < aligner[sentence_id]:
sentence_offset += 1
assert (len(sentence_prediction) == aligner[sentence_id])
if len(gold_standard) > 0:
assert (len(sentence_gold) == aligner[sentence_id])
final_gold_standard.append(sentence_gold)
final_predictions.append(sentence_prediction)
predictions = final_predictions
if len(gold_standard) > 0:
gold_standard = final_gold_standard
return predictions, gold_standard
| 40.664756
| 130
| 0.734428
| 1,978
| 14,192
| 4.9909
| 0.094034
| 0.048217
| 0.026742
| 0.051053
| 0.844712
| 0.831442
| 0.798926
| 0.776033
| 0.772387
| 0.768233
| 0
| 0.007068
| 0.152621
| 14,192
| 348
| 131
| 40.781609
| 0.81382
| 0.004157
| 0
| 0.748408
| 0
| 0
| 0.119533
| 0.076999
| 0
| 0
| 0
| 0
| 0.006369
| 1
| 0.012739
| false
| 0
| 0.015924
| 0
| 0.041401
| 0.06051
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5365d6e6f52dcd4bf82e9cb162d3762ee140ce75
| 3,602
|
py
|
Python
|
biskivy/behaviors.py
|
manahter/biskivy
|
8fe87c94b42d9563a8d8939a517401c8221542aa
|
[
"MIT"
] | null | null | null |
biskivy/behaviors.py
|
manahter/biskivy
|
8fe87c94b42d9563a8d8939a517401c8221542aa
|
[
"MIT"
] | null | null | null |
biskivy/behaviors.py
|
manahter/biskivy
|
8fe87c94b42d9563a8d8939a517401c8221542aa
|
[
"MIT"
] | null | null | null |
"""
.. versionchanged:: 18.05.2020
Verisyon takip tarihi eklendi
"""
from kivy.properties import BooleanProperty, ObjectProperty
from kivy.core.window import Window
class HoverBehaviorS1(object):
"""
on_enter -> Fare, ata'ın üstüne geldiğinde 1 kere..
on_leave -> Fare, ata'ın üstünden ayrıldığında 1 kere ... çalışır
.. versionchanged:: 18.05.2020
Verisyon takip tarihi eklendi
"""
disabled = BooleanProperty(False)
hovered = BooleanProperty(False)
border_point = ObjectProperty(None)
def __init__(self, **kwargs):
super(HoverBehaviorS1, self).__init__(**kwargs)
self.register_event_type('on_enter')
self.register_event_type('on_leave')
self.on_disabled()
def on_disabled(self, *args):
"""Eğer pasifse, boşuna çalışıpta kaynak tüketmesin"""
if not self.disabled:
Window.bind(mouse_pos=self.on_mouse_pos)
else:
Window.unbind(mouse_pos=self.on_mouse_pos)
def on_mouse_pos(self, *args):
if not self.get_root_window():
return # do proceed if I'm not displayed <=> If have no parent
pos = args[1]
# Next line to_widget allow to compensate for relative layout
inside = self.collide_point(*self.to_widget(*pos))
if self.hovered == inside:
# We have already done what was needed
return
self.border_point = pos
self.hovered = inside
if inside:
self.dispatch('on_enter')
else:
self.dispatch('on_leave')
def on_enter(self):
pass
def on_leave(self):
pass
class HoverBehaviorS2(object):
"""
on_enter -> Fare, ata'ın üstüne geldiğinde 1 kere..
on_leave -> Fare, ata'ın üstünden ayrıldığında 1 kere ... çalışır
.. versionchanged:: 18.05.2020
Verisyon takip tarihi eklendi
"""
# disabled = BooleanProperty(False)
hovered = BooleanProperty(False)
border_point = ObjectProperty(None)
hover_mesafe_x = 0
hover_mesafe_y = 0
def __init__(self, **kwargs):
super(HoverBehaviorS2, self).__init__(**kwargs)
self.register_event_type('on_enter')
self.register_event_type('on_leave')
self.on_disabled()
def collide_point_yeni(self, x, y):
return self.x - self.hover_mesafe_x <= x <= self.right + self.hover_mesafe_x and \
self.y - self.hover_mesafe_y <= y <= self.top + self.hover_mesafe_y
def on_disabled(self, *args):
"""Eğer pasifse, boşuna çalışıpta kaynak tüketmesin"""
if not self.disabled:
Window.bind(mouse_pos=self.on_mouse_pos)
else:
Window.unbind(mouse_pos=self.on_mouse_pos)
def on_mouse_pos(self, *args):
if not self.get_root_window():
return # do proceed if I'm not displayed <=> If have no parent
pos = args[1]
# Next line to_widget allow to compensate for relative layout
inside = self.collide_point_yeni(*self.to_widget(*pos))
if self.hovered == inside:
# We have already done what was needed
return
self.border_point = pos
self.hovered = inside
if inside:
self.dispatch('on_enter')
else:
self.dispatch('on_leave')
def on_enter(self):
pass
def on_leave(self):
pass
from kivy.graphics import Color, Line
from kivy.factory import Factory
Factory.register('HoverBehaviorS1', HoverBehaviorS1)
Factory.register('HoverBehaviorS2', HoverBehaviorS2)
| 29.768595
| 90
| 0.630483
| 445
| 3,602
| 4.903371
| 0.231461
| 0.036664
| 0.032997
| 0.038497
| 0.791476
| 0.771311
| 0.771311
| 0.771311
| 0.749313
| 0.749313
| 0
| 0.015285
| 0.273459
| 3,602
| 120
| 91
| 30.016667
| 0.818494
| 0.248473
| 0
| 0.75
| 0
| 0
| 0.035755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152778
| false
| 0.055556
| 0.055556
| 0.013889
| 0.402778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
727fac96e7f8afad729fc8c8d294b2db64b25490
| 1,191
|
py
|
Python
|
typing-server/api/utilities/filter.py
|
aditya02acharya/TypingUI-lite
|
9246d45779d87820d056fdfe4d58782ef6737b24
|
[
"MIT"
] | null | null | null |
typing-server/api/utilities/filter.py
|
aditya02acharya/TypingUI-lite
|
9246d45779d87820d056fdfe4d58782ef6737b24
|
[
"MIT"
] | null | null | null |
typing-server/api/utilities/filter.py
|
aditya02acharya/TypingUI-lite
|
9246d45779d87820d056fdfe4d58782ef6737b24
|
[
"MIT"
] | null | null | null |
import logging
class InfoFilter(logging.Filter):
"""
Simple Filter class
"""
def __init__(self):
"""
Constructor
"""
super().__init__(name='filter_info_logs')
def filter(self, record):
"""
Return Log Record Object based on condition - Return only info logs
Args:
-----
record: Log Record Object
Return:
-------
record: Log Record Object
"""
assert isinstance(record, logging.LogRecord)
if record.levelno == logging.INFO:
return record
class DebugFilter(logging.Filter):
"""
Simple Filter class
"""
def __init__(self):
"""
Constructor
"""
super().__init__(name='filter_debug_logs')
def filter(self, record):
"""
Return Log Record Object based on condition - Return only debug logs
Args:
-----
record: Log Record Object
Return:
-------
record: Log Record Object
"""
assert isinstance(record, logging.LogRecord)
if record.levelno == logging.DEBUG:
return record
| 20.534483
| 76
| 0.528128
| 110
| 1,191
| 5.536364
| 0.272727
| 0.08867
| 0.147783
| 0.137931
| 0.844007
| 0.844007
| 0.844007
| 0.844007
| 0.844007
| 0.844007
| 0
| 0
| 0.366079
| 1,191
| 57
| 77
| 20.894737
| 0.806623
| 0.31906
| 0
| 0.533333
| 0
| 0
| 0.055743
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.266667
| false
| 0
| 0.066667
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
f44d8e5b9871dd2bc0ba1c7388bd0db1847b0c7a
| 4,325
|
py
|
Python
|
tests/dashboard/test_shipping.py
|
jslegend/python3-django-saleor
|
4b93add64e6f612ee9ce4ea3108effab65c2ad31
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T13:28:29.000Z
|
2021-01-29T13:28:29.000Z
|
tests/dashboard/test_shipping.py
|
jslegend/python3-django-saleor
|
4b93add64e6f612ee9ce4ea3108effab65c2ad31
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dashboard/test_shipping.py
|
jslegend/python3-django-saleor
|
4b93add64e6f612ee9ce4ea3108effab65c2ad31
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from django.urls import reverse
from saleor.shipping.models import ShippingMethod, ShippingMethodCountry
def test_shipping_method_list(admin_client, shipping_method):
url = reverse('dashboard:shipping-methods')
response = admin_client.get(url)
assert response.status_code == 200
def test_shipping_method_add(admin_client):
assert len(ShippingMethod.objects.all()) == 0
url = reverse('dashboard:shipping-method-add')
data = {'name': 'Zium', 'description': 'Fastest zium'}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ShippingMethod.objects.all()) == 1
def test_shipping_method_add_not_valid(admin_client):
assert len(ShippingMethod.objects.all()) == 0
url = reverse('dashboard:shipping-method-add')
data = {}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ShippingMethod.objects.all()) == 0
def test_shipping_method_edit(admin_client, shipping_method):
assert len(ShippingMethod.objects.all()) == 1
url = reverse('dashboard:shipping-method-update',
kwargs={'pk': shipping_method.pk})
data = {'name': 'Flash', 'description': 'In a flash!'}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ShippingMethod.objects.all()) == 1
assert ShippingMethod.objects.all()[0].name == 'Flash'
def test_shipping_method_detail(admin_client, shipping_method):
assert len(ShippingMethod.objects.all()) == 1
url = reverse('dashboard:shipping-method-detail',
kwargs={'pk': shipping_method.pk})
response = admin_client.post(url, follow=True)
assert response.status_code == 200
def test_shipping_method_delete(admin_client, shipping_method):
assert len(ShippingMethod.objects.all()) == 1
url = reverse('dashboard:shipping-method-delete',
kwargs={'pk': shipping_method.pk})
response = admin_client.post(url, follow=True)
assert response.status_code == 200
assert len(ShippingMethod.objects.all()) == 0
def test_shipping_method_country_add(admin_client, shipping_method):
assert len(ShippingMethodCountry.objects.all()) == 1
url = reverse('dashboard:shipping-method-country-add',
kwargs={'shipping_method_pk': shipping_method.pk})
data = {'country_code': 'FR', 'price': '50',
'shipping_method': shipping_method.pk}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ShippingMethodCountry.objects.all()) == 2
def test_shipping_method_country_add_not_valid(admin_client, shipping_method):
assert len(ShippingMethodCountry.objects.all()) == 1
url = reverse('dashboard:shipping-method-country-add',
kwargs={'shipping_method_pk': shipping_method.pk})
data = {}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ShippingMethodCountry.objects.all()) == 1
def test_shipping_method_country_edit(admin_client, shipping_method):
assert len(ShippingMethodCountry.objects.all()) == 1
country = shipping_method.price_per_country.all()[0]
assert country.price.gross == 10
url = reverse('dashboard:shipping-method-country-edit',
kwargs={'shipping_method_pk': shipping_method.pk,
'country_pk': country.pk})
data = {'country_code': '', 'price': '50',
'shipping_method': shipping_method.pk}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ShippingMethodCountry.objects.all()) == 1
assert shipping_method.price_per_country.all()[0].price.gross == 50
def test_shipping_method_country_delete(admin_client, shipping_method):
assert len(ShippingMethodCountry.objects.all()) == 1
country = shipping_method.price_per_country.all()[0]
url = reverse('dashboard:shipping-method-country-delete',
kwargs={'shipping_method_pk': shipping_method.pk,
'country_pk': country.pk})
response = admin_client.post(url, follow=True)
assert response.status_code == 200
assert len(ShippingMethodCountry.objects.all()) == 0
| 41.586538
| 78
| 0.703353
| 523
| 4,325
| 5.609943
| 0.112811
| 0.214724
| 0.070893
| 0.071575
| 0.850375
| 0.821404
| 0.791411
| 0.747785
| 0.747785
| 0.724267
| 0
| 0.016504
| 0.17341
| 4,325
| 103
| 79
| 41.990291
| 0.804196
| 0
| 0
| 0.646341
| 0
| 0
| 0.131098
| 0.076763
| 0
| 0
| 0
| 0
| 0.365854
| 1
| 0.121951
| false
| 0
| 0.036585
| 0
| 0.158537
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be4cb70b4b4eb2082f452c79c38b8355fcad5357
| 269
|
py
|
Python
|
config-example.py
|
rciam/rciam-sync-client-names
|
ab4a11618704380ea79d9e02e7b824d6c4f93274
|
[
"Apache-2.0"
] | null | null | null |
config-example.py
|
rciam/rciam-sync-client-names
|
ab4a11618704380ea79d9e02e7b824d6c4f93274
|
[
"Apache-2.0"
] | null | null | null |
config-example.py
|
rciam/rciam-sync-client-names
|
ab4a11618704380ea79d9e02e7b824d6c4f93274
|
[
"Apache-2.0"
] | 1
|
2021-07-12T12:36:25.000Z
|
2021-07-12T12:36:25.000Z
|
mitreid_config = {
"dbname": "example_db",
"user": "example_user",
"host": "example_address",
"password": "secret"
}
proxystats_config = {
"dbname": "example_db",
"user": "example_user",
"host": "example_address",
"password": "secret"
}
| 20.692308
| 30
| 0.598513
| 26
| 269
| 5.884615
| 0.423077
| 0.156863
| 0.248366
| 0.27451
| 0.888889
| 0.888889
| 0.888889
| 0.888889
| 0.888889
| 0.888889
| 0
| 0
| 0.208178
| 269
| 13
| 31
| 20.692308
| 0.71831
| 0
| 0
| 0.666667
| 0
| 0
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
be781ca96cdef4e0c19ab246b401ea64df43364f
| 17,401
|
py
|
Python
|
fhirclient/models/organization_tests.py
|
NematiLab/Streaming-Sepsis-Prediction-System-for-Intensive-Care-Units
|
fb5ad260fb8d264d85aea9e6c895d1700eea4d11
|
[
"Apache-2.0"
] | 2
|
2019-05-16T16:41:22.000Z
|
2021-04-22T22:06:49.000Z
|
fhirclient/models/organization_tests.py
|
NematiLab/Streaming-Sepsis-Prediction-System-for-Intensive-Care-Units
|
fb5ad260fb8d264d85aea9e6c895d1700eea4d11
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/models/organization_tests.py
|
NematiLab/Streaming-Sepsis-Prediction-System-for-Intensive-Care-Units
|
fb5ad260fb8d264d85aea9e6c895d1700eea4d11
|
[
"Apache-2.0"
] | 3
|
2019-03-26T01:39:18.000Z
|
2020-02-02T19:06:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import os
import io
import unittest
import json
from . import organization
from .fhirdate import FHIRDate
class OrganizationTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Organization", js["resourceType"])
return organization.Organization(js)
def testOrganization1(self):
inst = self.instantiate_from("organization-example-f001-burgers.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization1(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization1(inst2)
def implOrganization1(self, inst):
self.assertEqual(inst.address[0].city, "Den Burg")
self.assertEqual(inst.address[0].country, "NLD")
self.assertEqual(inst.address[0].line[0], "Galapagosweg 91")
self.assertEqual(inst.address[0].postalCode, "9105 PZ")
self.assertEqual(inst.address[0].use, "work")
self.assertEqual(inst.address[1].city, "Den Burg")
self.assertEqual(inst.address[1].country, "NLD")
self.assertEqual(inst.address[1].line[0], "PO Box 2311")
self.assertEqual(inst.address[1].postalCode, "9100 AA")
self.assertEqual(inst.address[1].use, "work")
self.assertEqual(inst.contact[0].purpose.coding[0].code, "PRESS")
self.assertEqual(inst.contact[0].purpose.coding[0].system, "http://hl7.org/fhir/contactentity-type")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].value, "022-655 2334")
self.assertEqual(inst.contact[1].purpose.coding[0].code, "PATINF")
self.assertEqual(inst.contact[1].purpose.coding[0].system, "http://hl7.org/fhir/contactentity-type")
self.assertEqual(inst.contact[1].telecom[0].system, "phone")
self.assertEqual(inst.contact[1].telecom[0].value, "022-655 2335")
self.assertEqual(inst.id, "f001")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.528.1")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "91654")
self.assertEqual(inst.identifier[1].system, "urn:oid:2.16.840.1.113883.2.4.6.1")
self.assertEqual(inst.identifier[1].use, "usual")
self.assertEqual(inst.identifier[1].value, "17-0112278")
self.assertEqual(inst.name, "Burgers University Medical Center")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "022-655 2300")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "V6")
self.assertEqual(inst.type.coding[0].display, "University Medical Hospital")
self.assertEqual(inst.type.coding[0].system, "urn:oid:2.16.840.1.113883.2.4.15.1060")
self.assertEqual(inst.type.coding[1].code, "prov")
self.assertEqual(inst.type.coding[1].display, "Healthcare Provider")
self.assertEqual(inst.type.coding[1].system, "http://hl7.org/fhir/organization-type")
def testOrganization2(self):
inst = self.instantiate_from("organization-example-f002-burgers-card.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization2(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization2(inst2)
def implOrganization2(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].line[0], "South Wing, floor 2")
self.assertEqual(inst.contact[0].address.line[0], "South Wing, floor 2")
self.assertEqual(inst.contact[0].name.text, "mevr. D. de Haan")
self.assertEqual(inst.contact[0].purpose.coding[0].code, "ADMIN")
self.assertEqual(inst.contact[0].purpose.coding[0].system, "http://hl7.org/fhir/contactentity-type")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].value, "022-655 2321")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].value, "cardio@burgersumc.nl")
self.assertEqual(inst.contact[0].telecom[2].system, "fax")
self.assertEqual(inst.contact[0].telecom[2].value, "022-655 2322")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.name, "Burgers UMC Cardiology unit")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "022-655 2320")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "dept")
self.assertEqual(inst.type.coding[0].display, "Hospital Department")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/organization-type")
def testOrganization3(self):
inst = self.instantiate_from("organization-example-f003-burgers-ENT.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization3(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization3(inst2)
def implOrganization3(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].line[0], "West Wing, floor 5")
self.assertEqual(inst.contact[0].address.line[0], "West Wing, floor 5")
self.assertEqual(inst.contact[0].name.text, "mr. F. de Hond")
self.assertEqual(inst.contact[0].purpose.coding[0].code, "ADMIN")
self.assertEqual(inst.contact[0].purpose.coding[0].system, "http://hl7.org/fhir/contactentity-type")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].value, "022-655 7654")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].value, "KNO@burgersumc.nl")
self.assertEqual(inst.contact[0].telecom[2].system, "fax")
self.assertEqual(inst.contact[0].telecom[2].value, "022-655 0998")
self.assertEqual(inst.id, "f003")
self.assertEqual(inst.name, "Burgers UMC Ear,Nose,Throat unit")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "022-655 6780")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "dept")
self.assertEqual(inst.type.coding[0].display, "Hospital Department")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/organization-type")
def testOrganization4(self):
inst = self.instantiate_from("organization-example-f201-aumc.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization4(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization4(inst2)
def implOrganization4(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].city, "Den Helder")
self.assertEqual(inst.address[0].country, "NLD")
self.assertEqual(inst.address[0].line[0], "Walvisbaai 3")
self.assertEqual(inst.address[0].postalCode, "2333ZA")
self.assertEqual(inst.address[0].use, "work")
self.assertEqual(inst.contact[0].address.city, "Den helder")
self.assertEqual(inst.contact[0].address.country, "NLD")
self.assertEqual(inst.contact[0].address.line[0], "Walvisbaai 3")
self.assertEqual(inst.contact[0].address.line[1], "Gebouw 2")
self.assertEqual(inst.contact[0].address.postalCode, "2333ZA")
self.assertEqual(inst.contact[0].name.family[0], "Brand")
self.assertEqual(inst.contact[0].name.given[0], "Ronald")
self.assertEqual(inst.contact[0].name.prefix[0], "Prof.Dr.")
self.assertEqual(inst.contact[0].name.text, "Professor Brand")
self.assertEqual(inst.contact[0].name.use, "official")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].use, "work")
self.assertEqual(inst.contact[0].telecom[0].value, "+31715269702")
self.assertEqual(inst.id, "f201")
self.assertEqual(inst.identifier[0].system, "http://www.zorgkaartnederland.nl/")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "Artis University Medical Center")
self.assertEqual(inst.name, "Artis University Medical Center (AUMC)")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "+31715269111")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "405608006")
self.assertEqual(inst.type.coding[0].display, "Academic Medical Center")
self.assertEqual(inst.type.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.type.coding[1].code, "V6")
self.assertEqual(inst.type.coding[1].display, "University Medical Hospital")
self.assertEqual(inst.type.coding[1].system, "urn:oid:2.16.840.1.113883.2.4.15.1060")
self.assertEqual(inst.type.coding[2].code, "prov")
self.assertEqual(inst.type.coding[2].display, "Healthcare Provider")
self.assertEqual(inst.type.coding[2].system, "http://hl7.org/fhir/organization-type")
def testOrganization5(self):
inst = self.instantiate_from("organization-example-f203-bumc.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization5(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization5(inst2)
def implOrganization5(self, inst):
self.assertTrue(inst.active)
self.assertEqual(inst.address[0].city, "Blijdorp")
self.assertEqual(inst.address[0].country, "NLD")
self.assertEqual(inst.address[0].line[0], "apenrots 230")
self.assertEqual(inst.address[0].postalCode, "3056BE")
self.assertEqual(inst.address[0].use, "work")
self.assertEqual(inst.id, "f203")
self.assertEqual(inst.identifier[0].system, "http://www.zorgkaartnederland.nl/")
self.assertEqual(inst.identifier[0].type.text, "Zorginstelling naam")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "Blijdorp MC")
self.assertEqual(inst.name, "Blijdorp Medisch Centrum (BUMC)")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "+31107040704")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "405608006")
self.assertEqual(inst.type.coding[0].display, "Academic Medical Center")
self.assertEqual(inst.type.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.type.coding[1].code, "prov")
self.assertEqual(inst.type.coding[1].system, "http://hl7.org/fhir/organization-type")
def testOrganization6(self):
inst = self.instantiate_from("organization-example-gastro.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization6(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization6(inst2)
def implOrganization6(self, inst):
self.assertEqual(inst.id, "1")
self.assertEqual(inst.identifier[0].system, "http://www.acme.org.au/units")
self.assertEqual(inst.identifier[0].value, "Gastro")
self.assertEqual(inst.name, "Gastroenterology")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "mobile")
self.assertEqual(inst.telecom[0].value, "+1 555 234 3523")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "work")
self.assertEqual(inst.telecom[1].value, "gastro@acme.org")
self.assertEqual(inst.text.status, "generated")
def testOrganization7(self):
inst = self.instantiate_from("organization-example-good-health-care.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization7(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization7(inst2)
def implOrganization7(self, inst):
self.assertEqual(inst.id, "2.16.840.1.113883.19.5")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "2.16.840.1.113883.19.5")
self.assertEqual(inst.name, "Good Health Clinic")
self.assertEqual(inst.text.status, "generated")
def testOrganization8(self):
inst = self.instantiate_from("organization-example-insurer.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization8(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization8(inst2)
def implOrganization8(self, inst):
self.assertEqual(inst.id, "2")
self.assertEqual(inst.identifier[0].system, "urn:oid:2.16.840.1.113883.3.19.2.3")
self.assertEqual(inst.identifier[0].value, "666666")
self.assertEqual(inst.name, "XYZ Insurance")
self.assertEqual(inst.text.status, "generated")
def testOrganization9(self):
inst = self.instantiate_from("organization-example-lab.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization9(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization9(inst2)
def implOrganization9(self, inst):
self.assertEqual(inst.id, "1832473e-2fe0-452d-abe9-3cdb9879522f")
self.assertEqual(inst.identifier[0].system, "http://www.acme.org.au/units")
self.assertEqual(inst.identifier[0].value, "ClinLab")
self.assertEqual(inst.name, "Clinical Lab")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].use, "work")
self.assertEqual(inst.telecom[0].value, "+1 555 234 1234")
self.assertEqual(inst.telecom[1].system, "email")
self.assertEqual(inst.telecom[1].use, "work")
self.assertEqual(inst.telecom[1].value, "contact@labs.acme.org")
self.assertEqual(inst.text.status, "generated")
def testOrganization10(self):
inst = self.instantiate_from("organization-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Organization instance")
self.implOrganization10(inst)
js = inst.as_json()
self.assertEqual("Organization", js["resourceType"])
inst2 = organization.Organization(js)
self.implOrganization10(inst2)
def implOrganization10(self, inst):
self.assertEqual(inst.address[0].city, "Ann Arbor")
self.assertEqual(inst.address[0].country, "USA")
self.assertEqual(inst.address[0].line[0], "3300 Washtenaw Avenue, Suite 227")
self.assertEqual(inst.address[0].postalCode, "48104")
self.assertEqual(inst.address[0].state, "MI")
self.assertEqual(inst.extension[0].url, "http://hl7.org/fhir/StructureDefinition/organization-alias")
self.assertEqual(inst.extension[0].valueString, "HL7 International")
self.assertEqual(inst.id, "hl7")
self.assertEqual(inst.name, "Health Level Seven International")
self.assertEqual(inst.telecom[0].system, "phone")
self.assertEqual(inst.telecom[0].value, "(+1) 734-677-7777")
self.assertEqual(inst.telecom[1].system, "fax")
self.assertEqual(inst.telecom[1].value, "(+1) 734-677-6622")
self.assertEqual(inst.telecom[2].system, "email")
self.assertEqual(inst.telecom[2].value, "hq@HL7.org")
self.assertEqual(inst.text.status, "generated")
| 53.377301
| 109
| 0.669329
| 2,108
| 17,401
| 5.514232
| 0.132827
| 0.243892
| 0.29095
| 0.091707
| 0.809532
| 0.775637
| 0.716449
| 0.622075
| 0.585685
| 0.558156
| 0
| 0.050031
| 0.178725
| 17,401
| 325
| 110
| 53.541538
| 0.763348
| 0.006494
| 0
| 0.396491
| 1
| 0.014035
| 0.200613
| 0.035702
| 0
| 0
| 0
| 0
| 0.712281
| 1
| 0.073684
| false
| 0
| 0.021053
| 0
| 0.101754
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe422ca8de52b95188cf6912d231c14d0e5b24aa
| 4,567
|
py
|
Python
|
tests/test_decorators.py
|
markkorput/pyevento
|
909955dc9219a5d47c0fddc3ddc7d29ddecc6482
|
[
"MIT"
] | 5
|
2016-11-08T05:27:22.000Z
|
2021-01-19T10:02:39.000Z
|
tests/test_decorators.py
|
markkorput/pyevento
|
909955dc9219a5d47c0fddc3ddc7d29ddecc6482
|
[
"MIT"
] | 3
|
2016-11-08T05:28:44.000Z
|
2018-08-21T08:58:22.000Z
|
tests/test_decorators.py
|
markkorput/pyevento
|
909955dc9219a5d47c0fddc3ddc7d29ddecc6482
|
[
"MIT"
] | 1
|
2021-01-20T15:44:12.000Z
|
2021-01-20T15:44:12.000Z
|
#!/usr/bin/env python
import unittest
from evento import triggers_before_event, triggers_after_event, triggers_beforeafter_events
class TestDecorators(unittest.TestCase):
def test_triggers_before_event(self):
# add before events to a method with this decorator
@triggers_before_event
def some_action():
self.value += 'a'
def observer(param):
self.observed_param = param
self.value += 'b'
some_action.beforeEvent += observer
self.value = ''
some_action()
self.assertEqual(self.value, 'ba')
self.assertEqual(self.observed_param, some_action.beforeEvent)
def test_triggers_after_event(self):
# add before events to a method with this decorator
@triggers_after_event
def some_action():
self.value += 'a'
def observer(param):
self.observed_param = param
self.value += 'b'
some_action.afterEvent += observer
self.value = ''
some_action()
self.assertEqual(self.value, 'ab')
self.assertEqual(self.observed_param, some_action.afterEvent)
def test_triggers_beforeafter_events(self):
# add before events to a method with this decorator
@triggers_beforeafter_events
def some_action():
self.value += 'a'
def observer(param):
if param == some_action.beforeEvent:
self.value += 'before-'
if param == some_action.afterEvent:
self.value += '-after'
some_action.beforeEvent += observer
some_action.afterEvent += observer
self.value = ''
some_action()
self.assertEqual(self.value, 'before-a-after')
def test_before_subscribtion(self):
@triggers_before_event
def some_action():
self.value += 'a'
def before(event):
pass
self.assertEqual(some_action.beforeEvent.hasSubscriber(before), False)
# some_action.subscribe(before)
some_action += before
self.assertEqual(some_action.beforeEvent.hasSubscriber(before), True)
some_action -= before
self.assertEqual(some_action.beforeEvent.hasSubscriber(before), False)
# magic methods explained
# this lets you do some_action += before
self.assertEqual(some_action.__iadd__, some_action.subscribe)
# this lets you do some_action -= before
self.assertEqual(some_action.__isub__, some_action.unsubscribe)
def test_after_subscribtion(self):
@triggers_after_event
def some_action():
self.value += 'a'
def after(event):
pass
self.assertEqual(some_action.afterEvent.hasSubscriber(after), False)
# some_action.subscribe(before)
some_action += after
self.assertEqual(some_action.afterEvent.hasSubscriber(after), True)
some_action -= after
self.assertEqual(some_action.afterEvent.hasSubscriber(after), False)
# magic methods explained
# this lets you do some_action += before
self.assertEqual(some_action.__iadd__, some_action.subscribe)
# this lets you do some_action -= before
self.assertEqual(some_action.__isub__, some_action.unsubscribe)
def test_beforeafter_subscribe(self):
@triggers_beforeafter_events
def some_action():
self.value += 'a'
def before(event):
pass
def after(event):
pass
self.assertEqual(some_action.beforeEvent.hasSubscriber(before), False)
self.assertEqual(some_action.afterEvent.hasSubscriber(after), False)
some_action.subscribe(before, after)
self.assertEqual(some_action.beforeEvent.hasSubscriber(before), True)
self.assertEqual(some_action.afterEvent.hasSubscriber(after), True)
def test_beforeafter_subscribe(self):
@triggers_beforeafter_events
def some_action():
self.value += 'a'
def before(event):
pass
def after(event):
pass
self.assertEqual(some_action.beforeEvent.hasSubscriber(before), False)
self.assertEqual(some_action.afterEvent.hasSubscriber(after), False)
some_action.subscribe(before, after)
self.assertEqual(some_action.beforeEvent.hasSubscriber(before), True)
self.assertEqual(some_action.afterEvent.hasSubscriber(after), True)
# run just the tests in this file
if __name__ == '__main__':
unittest.main()
| 33.094203
| 91
| 0.649004
| 494
| 4,567
| 5.763158
| 0.12753
| 0.182648
| 0.120126
| 0.158061
| 0.83386
| 0.83386
| 0.83386
| 0.795574
| 0.787847
| 0.765367
| 0
| 0
| 0.261879
| 4,567
| 137
| 92
| 33.335766
| 0.844557
| 0.102036
| 0
| 0.757895
| 0
| 0
| 0.011742
| 0
| 0
| 0
| 0
| 0
| 0.242105
| 1
| 0.242105
| false
| 0.063158
| 0.021053
| 0
| 0.273684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
fe8c65e579c4b6eea466fd9954f2b1ffd2d68fce
| 8,729
|
py
|
Python
|
tests/components/directv/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/directv/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/directv/test_config_flow.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Test the DirecTV config flow."""
from unittest.mock import patch
from aiohttp import ClientError as HTTPClientError
from openpeerpower.components.directv.const import CONF_RECEIVER_ID, DOMAIN
from openpeerpower.components.ssdp import ATTR_UPNP_SERIAL
from openpeerpower.config_entries import SOURCE_SSDP, SOURCE_USER
from openpeerpower.const import CONF_HOST, CONF_NAME, CONF_SOURCE
from openpeerpower.core import OpenPeerPower
from openpeerpower.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.components.directv import (
HOST,
MOCK_SSDP_DISCOVERY_INFO,
MOCK_USER_INPUT,
RECEIVER_ID,
UPNP_SERIAL,
mock_connection,
setup_integration,
)
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_show_user_form(opp: OpenPeerPower) -> None:
"""Test that the user set up form is served."""
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["step_id"] == "user"
assert result["type"] == RESULT_TYPE_FORM
async def test_show_ssdp_form(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test that the ssdp confirmation form is served."""
mock_connection(aioclient_mock)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await opp.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=discovery_info
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "ssdp_confirm"
assert result["description_placeholders"] == {CONF_NAME: HOST}
async def test_cannot_connect(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we show user form on connection error."""
aioclient_mock.get("http://127.0.0.1:8080/info/getVersion", exc=HTTPClientError)
user_input = MOCK_USER_INPUT.copy()
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_ssdp_cannot_connect(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow on connection error."""
aioclient_mock.get("http://127.0.0.1:8080/info/getVersion", exc=HTTPClientError)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_ssdp_confirm_cannot_connect(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow on connection error."""
aioclient_mock.get("http://127.0.0.1:8080/info/getVersion", exc=HTTPClientError)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP, CONF_HOST: HOST, CONF_NAME: HOST},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_user_device_exists_abort(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort user flow if DirecTV receiver already configured."""
await setup_integration(opp, aioclient_mock, skip_entry_setup=True)
user_input = MOCK_USER_INPUT.copy()
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_device_exists_abort(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow if DirecTV receiver already configured."""
await setup_integration(opp, aioclient_mock, skip_entry_setup=True)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_with_receiver_id_device_exists_abort(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow if DirecTV receiver already configured."""
await setup_integration(opp, aioclient_mock, skip_entry_setup=True)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
discovery_info[ATTR_UPNP_SERIAL] = UPNP_SERIAL
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_unknown_error(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we show user form on unknown error."""
user_input = MOCK_USER_INPUT.copy()
with patch(
"openpeerpower.components.directv.config_flow.DIRECTV.update",
side_effect=Exception,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_ssdp_unknown_error(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow on unknown error."""
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
with patch(
"openpeerpower.components.directv.config_flow.DIRECTV.update",
side_effect=Exception,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_ssdp_confirm_unknown_error(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we abort SSDP flow on unknown error."""
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
with patch(
"openpeerpower.components.directv.config_flow.DIRECTV.update",
side_effect=Exception,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_SSDP, CONF_HOST: HOST, CONF_NAME: HOST},
data=discovery_info,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_full_user_flow_implementation(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the full manual user flow from start to finish."""
mock_connection(aioclient_mock)
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
user_input = MOCK_USER_INPUT.copy()
with patch("openpeerpower.components.directv.async_setup_entry", return_value=True):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
user_input=user_input,
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"]
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_RECEIVER_ID] == RECEIVER_ID
async def test_full_ssdp_flow_implementation(
opp: OpenPeerPower, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the full SSDP flow from start to finish."""
mock_connection(aioclient_mock)
discovery_info = MOCK_SSDP_DISCOVERY_INFO.copy()
result = await opp.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=discovery_info
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "ssdp_confirm"
assert result["description_placeholders"] == {CONF_NAME: HOST}
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"]
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_RECEIVER_ID] == RECEIVER_ID
| 32.210332
| 88
| 0.706381
| 1,065
| 8,729
| 5.50892
| 0.100469
| 0.079768
| 0.035793
| 0.051133
| 0.843702
| 0.841316
| 0.835691
| 0.817283
| 0.817283
| 0.805522
| 0
| 0.004252
| 0.191775
| 8,729
| 270
| 89
| 32.32963
| 0.827356
| 0.003322
| 0
| 0.718593
| 0
| 0
| 0.092627
| 0.034422
| 0
| 0
| 0
| 0
| 0.19598
| 1
| 0
| false
| 0
| 0.050251
| 0
| 0.050251
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe96f4ba148696c7027e73e5f035a565d387cddf
| 2,882
|
py
|
Python
|
dependencies/PyMesh/python/pymesh/tests/test_minkowski_sum.py
|
aprieels/3D-watermarking-spectral-decomposition
|
dcab78857d0bb201563014e58900917545ed4673
|
[
"MIT"
] | 5
|
2018-06-04T19:52:02.000Z
|
2022-01-22T09:04:00.000Z
|
dependencies/PyMesh/python/pymesh/tests/test_minkowski_sum.py
|
aprieels/3D-watermarking-spectral-decomposition
|
dcab78857d0bb201563014e58900917545ed4673
|
[
"MIT"
] | null | null | null |
dependencies/PyMesh/python/pymesh/tests/test_minkowski_sum.py
|
aprieels/3D-watermarking-spectral-decomposition
|
dcab78857d0bb201563014e58900917545ed4673
|
[
"MIT"
] | null | null | null |
from pymesh.TestCase import TestCase
from pymesh.meshutils import generate_box_mesh
from pymesh import minkowski_sum, detect_self_intersection
import numpy as np
class MinkowskiSumTest(TestCase):
def test_simple(self):
input_mesh = generate_box_mesh(
np.array([0, 0, 0]), np.array([1, 1, 1]));
path = np.array([ [0, 0, 0], [1, 1, 1] ]);
output_mesh = minkowski_sum(input_mesh, path);
self.assertTrue(output_mesh.is_closed());
self.assertTrue(output_mesh.is_oriented());
self.assertTrue(output_mesh.num_boundary_edges == 0);
input_bbox_min, input_bbox_max = input_mesh.bbox;
output_bbox_min, output_bbox_max = output_mesh.bbox;
self.assert_array_equal(input_bbox_min, output_bbox_min);
self.assert_array_equal([1, 1, 1], output_bbox_max - input_bbox_max);
def test_coplanar(self):
input_mesh = generate_box_mesh(
np.array([0, 0, 0]), np.array([1, 1, 1]));
path = np.array([ [0, 0, 0], [1e-12, 0, 0] ]);
output_mesh = minkowski_sum(input_mesh, path);
self.assertTrue(output_mesh.is_closed());
self.assertTrue(output_mesh.is_oriented());
self.assertTrue(output_mesh.num_boundary_edges == 0);
input_bbox_min, input_bbox_max = input_mesh.bbox;
output_bbox_min, output_bbox_max = output_mesh.bbox;
self.assert_array_equal(input_bbox_min, output_bbox_min);
self.assert_array_almost_equal([1e-12, 0, 0],
output_bbox_max - input_bbox_max);
def test_near_coplanar(self):
input_mesh = generate_box_mesh(
np.array([0, 0, 0]), np.array([1, 1, 1]));
path = np.array([ [0, 0, 0], [100, 1e-3, 0] ]);
output_mesh = minkowski_sum(input_mesh, path);
self.assertTrue(output_mesh.is_closed());
self.assertTrue(output_mesh.is_oriented());
self.assertTrue(output_mesh.num_boundary_edges == 0);
input_bbox_min, input_bbox_max = input_mesh.bbox;
output_bbox_min, output_bbox_max = output_mesh.bbox;
self.assert_array_equal(input_bbox_min, output_bbox_min);
self.assert_array_almost_equal([100, 1e-3, 0],
output_bbox_max - input_bbox_max);
def test_chain(self):
input_mesh = generate_box_mesh(
np.array([0, 0, 0]), np.array([1, 1, 1]));
path = np.array([
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
]);
output_mesh = minkowski_sum(input_mesh, path);
self.assertTrue(output_mesh.is_closed());
self.assertTrue(output_mesh.is_oriented());
self.assertEqual(1, output_mesh.num_components);
self_intersections = detect_self_intersection(output_mesh);
self.assertEqual(0, len(self_intersections));
| 37.921053
| 77
| 0.632894
| 407
| 2,882
| 4.152334
| 0.125307
| 0.036686
| 0.031953
| 0.156213
| 0.811243
| 0.798817
| 0.798817
| 0.798817
| 0.779882
| 0.729586
| 0
| 0.041058
| 0.239417
| 2,882
| 75
| 78
| 38.426667
| 0.729927
| 0
| 0
| 0.586207
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.327586
| 1
| 0.068966
| false
| 0
| 0.068966
| 0
| 0.155172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe9745d22e8022cff18cbab7c0aa8cbfd69ac4f0
| 32,841
|
py
|
Python
|
html_parts/Narrator/Content/content_list_quotes.py
|
Tibblue/Darkest-Dungeon-Wiki-Scrapper
|
0901f375c26d332b38c181c2988470dd3a4815eb
|
[
"MIT"
] | 2
|
2020-07-21T20:43:22.000Z
|
2020-08-02T14:44:00.000Z
|
html_parts/Narrator/Content/content_list_quotes.py
|
Tibblue/Darkest-Dungeon-Wiki-Scrapper
|
0901f375c26d332b38c181c2988470dd3a4815eb
|
[
"MIT"
] | null | null | null |
html_parts/Narrator/Content/content_list_quotes.py
|
Tibblue/Darkest-Dungeon-Wiki-Scrapper
|
0901f375c26d332b38c181c2988470dd3a4815eb
|
[
"MIT"
] | null | null | null |
['There is a place, beneath those ancient ruins, in the moor, that calls out to the boldest among them... "We are the Flame!" they cry, "And Darkness fears us!" They descend, spurred on by fantasies of riches and redemption, to lay bare whatever blasphemous abnormality may slumber restlessly in that unholy abyss... But Darkness is insidious. Terror and Madness can find cracks in the sturdiest of armors, the most resolute of minds... And below, in that limitless chasm of Chaos, they will realize the truth of it. "We are not the Flame!" they will cry out, "We are but moths and we are DOOMED!" And their screams will echo amidst the pitiless cyclopean stones... Of the Darkest Dungeon.', 'Ruin has come to our family. You remember our venerable house, opulent and imperial. Gazing proudly from its stoic perch above the moor. I lived all my years in that ancient, rumor shadowed manor. Fattened by decadence and luxury. And yet, I began to tire of conventional extravagance. Singular, unsettling tales suggested the mansion itself was a gateway to some fabulous and unnamable power. With relic and ritual, I bent every effort towards the excavation and recovery of those long buried secrets, exhausting what remained of our family fortune on swarthy workmen and sturdy shovels. At last, in the salt-soaked crags beneath the lowest foundations we unearthed that damnable portal of antediluvian evil. Our every step unsettled the ancient earth but we were in a realm of death and madness! In the end, I alone fled laughing and wailing through those blackened arcades of antiquity. Until consciousness failed me. You remember our venerable house, opulent and imperial. It is a festering abomination! I beg you, return home, claim your birthright, and deliver our family from the ravenous clutching shadows of the ', 'You will arrive along the old road. It winds with a troubling, serpent-like suggestion through the corrupted countryside. Leading only, I fear, to ever more tenebrous places. There is a sickness in the ancient pitted cobbles of the old road and on its writhing path you will face viciousness, violence, and perhaps other damnably transcendent terrors. So steel yourself and remember: there can be no bravery without madness. The old road will take you to hell, but in that gaping abyss we will find our redemption.', 'Brigands have run of these lanes. Keep to the side path; the Hamlet is just ahead.', 'Dispatch this thug in brutal fashion, that all may hear of your arrival!', 'Leave nothing unchecked, there is much to be found in forgotten places.', 'An ambush! Send ', 'Welcome home, such as it is. This squalid hamlet, these corrupted lands, they are yours now, and you are bound to them.', 'This sprawling estate, a Mecca of madness and morbidity. Your work begins...', 'The cost of preparedness - measured now in gold, later in blood.', 'Women and men; soldiers and outlaws; fools and corpses. All will find their way to us now that the road is clear.', 'Fresh kegs, cards, and curtained rooms promise solace to the weary and the broken alike.', 'The cobwebs have been dusted, the pews set straight. The Abbey calls to the faithful...', 'The bellows blast once again! The forge stands ready to make weapons of war.', 'Make no mistake, we will face ever greater threats. Our soldiers must be ready.', 'At home in wild places, she is a stalwart survivor, and a strict instructor.', 'Trinkets and charms, gathered from all the forgotten corners of the earth...', 'Most will end up here, covered in the poisoned earth, awaiting merciful oblivion.', 'In time, you will know the tragic extent of my failings...', 'I remember days when the sun shone, and laughter could be heard from the tavern.', 'I was lord of this place, before the crows and rats made it their domain.', 'In truth I cannot tell how much time has passed since I sent that letter.', 'Once, our estate was the envy of this land...', 'Our family name, once so well regarded, is now barely whispered aloud by decent folk.', 'I see something long-absent in the sunken faces of passersby - a glimmer of hope.', 'The poor Caretaker, I fear his long-standing duties here have ...affected him.', 'The degeneracy of the Hamlet is nothing, I fear, when compared to the condition of surrounding acres.', 'My obsession caused this great foulness, and it is shameful that I must rely upon you to set it right.', 'I can still see their angry faces as they stormed the manor, but I was dead before they found me, and the letter was on its way.', 'There is a great horror beneath the manor: a ', 'Curiosity, interest, and obsession — mile markers on my road to damnation.', 'Trouble yourself not with the cost of this crusade - its noble end affords you broad tolerance in your choice of means.', 'Let me share with you the terrible wonders I have come to know...', 'You answered the letter — now like me, you are part of this place.', 'We dug for months, years — an eternity. And we were rewarded with madness.', 'The plume and the pistol — a fitting end to my folly, and a curse upon us all.', 'Can you feel it? The walls between the sane world and that unplumbed dimension of delirium are tenuously thin here...', 'All my life, I could feel an insistent gnawing in the back of my mind. It was a yearning, a thirst for discovery that could be neither numbed, nor sated.', 'An eternity of futile struggle — a penance for my unspeakable transgressions.', 'All the decadent horrors I have seen pale in comparison with that final, crowning thing. I could not look, nor could I look away!', 'Great heroes can be found even here, in the mud and rain.', 'More arrive, foolishly seeking fortune and glory in this domain of the damned.', 'Word is travelling. Ambition is stirring in distant cities. We can use this.', 'With enough ale, maybe they can be inured against the horrors below.', 'All manner of diversion and dalliance await those who cross the threshold with coin in hand.', 'Strong drink, a game of chance, and companionship. The rush of life.', 'A little hope, however desperate, is never without worth.', 'Gilded icons and dogmatic rituals... for some, a tonic against the bloodshed.', 'A man in a robe, claiming communion with the divine. Madness.', 'In the end, every plan relies upon a strong arm, and tempered steel.', "A sharper sword, a stronger shield. Anything to prolong a soldier's life.", 'Fan the flames! Mold the metal! We are raising an army!', 'Some may fall, but their knowledge lives on.', 'Every creature has a weakness. The wise hero trains for what she will face.', 'A strict regimen is paramount, if one is to master the brutal arithmetic of combat.', 'Alone in the woods or tunnels, survival is the same. Prepare, persist, and overcome.', 'Success depends on survival.', 'They must learn more than brutal bloodletting — they must learn to survive!', 'Rarities and curios, sold at a profit, of course.', 'Idol, amulet, or lucky charm — the simplest object can be a talisman against evil.', 'An increasing stockpile of curious ', 'The front line of this war is not in the dungeon, but rather, inside the mind.', 'Experimental techniques and tonics can overcome things a sharpened sword cannot.', 'Curious methodologies and apparatus can calm even the most tormented soul.', 'Tortured and reclusive... this man is more dangerous than he seems...', 'She searches where others will not go... and sees what others will not see.', 'Shoot, bandage and pillage: the dancing steps of war.', 'The thrill of the hunt... The promise of payment...', 'A mighty sword-arm anchored by a holy purpose. A zealous warrior.', "To those with a keen eye, gold gleams like a dagger's point.", 'Barbaric rage and unrelenting savagery make for a powerful ally.', 'Elusive, evasive, persistent. Righteous traits for a rogue.', 'A lawman and his faithful beast. A bond forged by battle and bloodshed.', 'He will be laughing still... at the end.', 'This man understands that adversity and existence are one and the same.', 'The raw strength of youth may be spent, but his eyes hold the secrets of a hundred campaigns.', 'To fight the abyss, one must know it...', 'What better laboratory than the blood-soaked battlefield?', 'A sister of battle. Pious and unrelenting.', 'A champion markswoman keen for a new kind of challenge.', 'This one has become vestigial, useless.', 'Suffer not the lame horse... nor the broken man.', 'Another soul battered and broken, cast aside like a spent torch.', 'Those without the stomach for this place must move on.', 'It is done. Turn yourself now to the conditions of those poor devils who remain.', 'Send this one to journey elsewhere, for we have need of sterner stock.', 'Slumped shoulders, wild eyes, and a stumbling gait - this one is no more good to us.', 'The task ahead is terrible, and weakness cannot be tolerated.', 'Excavations beneath the manor were well underway, when a particular ragged indigent arrived in the hamlet. This filthy, toothless miscreant boasted an uncanny knowledge of my ambitions and prognosticated publicly that left unchecked, I would soon unleash doom upon the world.', 'This raving creature had to be silenced, but doing so proved maddeningly impossible. How had he survived the stockades, the icy waters, and the knives I delivered so enthusiastically into his back? How had he returned time and time again to rouse the townsfolk with his wild speculations and prophecies?', 'Finally, resigned to his uncommon corporeal resilience, I lured him to the dig. There, I showed him the Thing, and detailed the full extent of my plans. Triumphantly, I watched as he tore his eyes from their sockets, and ran shrieking into the shadows - wailing maniacally that the end was upon us all.', 'Mastery over life and death was chief among my early pursuits. I began in humility, but my ambition was limitless. Who could have divined the prophetic import of something as unremarkable as a twitch in the leg of a dead rat?', 'I entertained a delegation of experts from overseas, eager to plumb the depths of their knowledge and share with them certain techniques and alchemical processes I had found to yield wondrous and terrifying results. Having learned all I could from my visiting guests, I murdered them as they slept.', 'I brought my colleagues back with much of their intellect intact, a remarkable triumph for even the most experienced necromancer. Freed from the trappings of their humanity, they plied their terrible trade anew - the dead reviving the dead, on and on down the years... forever.', 'I had collected many rare and elusive volumes on ancient herbal properties, and was set to enjoy several weeks immersed in comfortable study. My work was interrupted, however, by a singularly striking young woman who insisted on repeated calls to the house.', 'Her knowledge of horticulturalism, and its role in various arcane practices impressed me greatly. My licentious impulse gave way to a genuine, professional respect, and together, we began to plant, harvest, and brew.', 'As time wore on, her wild policy of self-experimentation grew intolerable. She quaffed all manner of strange fungii, herbs and concoctions, intent on gaining some insight into the horror we both knew to be growing beneath us. The change in her was appalling, and, no longer able to stomach it, I sent her to live in the Weald, where her wildness would be welcomed.', 'Simple folk are by their nature loquacious, and the denizens of the Hamlet were no exception. It was not long before rumors of my morbid genius and secretive excavations began to fuel local legend. In the face of my increasingly egregious flaunting of public taboos, awe turned to ire, and demonstrations were held in the town square.', 'The wild whispers of heresy roused the rabble to violent action. Such was the general air of rebellion that even my generous offer of gold to the local constabulary was rebuffed! To reassert my rule, I sought out unscrupulous men skilled in the application of force. Tight-lipped and terrifying, these mercenaries brought with them a war machine of terrible implication.', 'Eager to end the tiresome domestic distraction, I instructed my newly formed militia of hardened bandits, brigands and killers to go forth and do their work. Compliance and order were restored, and the noisome population of the Hamlet was culled to more... manageable numbers.', 'The ways and rituals of blood sacrifice are difficult to master. Those from beyond require a physical vessel if they are to make the crossing into our reality. The timing of the chants is imperative - without the proper utterances at precise intervals, the process can fail spectacularly.', "My first attempts at summoning were crude and the results, disappointing. I soon found however, that the type and condition of the host's meat was a critical factor. The best results came from pigs, whose flesh is most like that of man.", 'The Great Thing I had managed to bring through was brutish and stupid. Moreover, it required prodigious amounts of meat to sustain itself, but this was only a trifling concern – after all, I had a village full of it.', 'My zeal for blood rituals and summoning rites had begun to ebb as each attempt invariably brought only failure, and disappointment. Progress was halting, and the rapidly accumulating surplus of wasted flesh had become... burdensome.', 'I could not store such a prodigious amount of offal, nor could I rid myself of it easily, possessed as it was by unnameable things from outer spheres. When excavations beneath the Manor broke through into an ancient network of aqueducts and tunnels, I knew I had found a solution to the problem of disposal.', 'The spasmodically squirming, braying, and snorting half-corpses were heaped each upon the other until at last I was rid of them. The Warrens became a landfill of snout and hoof, gristle and bone – a mountainous, twitching mass of misshapen flesh fusing itself together in the darkness.', "My lofty position wasn't always accompanied by the fear of office, and there was a time I could walk the streets or raise a glass in the tavern without concern for molestation. Faithful as the tide, one precocious village waif made it her hobby to shadow my every errand. It was charming then, troublesome later.", 'In financial desperation, I struck a bargain with the ancient things that surfaced in search of sacrifice when the moon was right. Their price was the delivery of an obscure idol and one other item of more troubling portent. The pact struck, my newfound accomplices slipped silently beneath the brackish water. A fearful stirring at the edge of the torchlight betrayed a familiar witness, and gifted me with malign inspiration.', 'Under the blood moon, I lured my wide-eyed prey to the pier’s edge. Before she could properly appreciate her position, I clamped on a manacle, chaining her to the leering idol. A small push was sufficient to send both into the icy waters. And when at length the tide receded, jewels of the most magnificent grandeur lay scattered upon the shore.', 'Prying eyes had become a nuisance along the Old Road, and so I undertook to receive my most curious deliveries by way of marine shipments. A sheltered jetty was accessible by a narrow stone stair off the back of the manor, and a discreet system of pulleys could hoist even the heaviest prizes up the rock face from a securely tied dinghy below.', 'I employed a crew of particularly unsavory mariners, who, for a time, sailed the four corners at my behest - retrieving many valuable artifacts, relics and rare texts. Predictably, they increased their tariffs to counter my intense stipulations of secrecy. Such resources had long been exhausted, of course, and so I prepared an... alternative payment.', 'While the greedy dogs slept off their revelry, I hexed their anchor with every twisted incantation I could muster - imbuing it with the weight of my ambition, and my contempt for their crude extortion. At the witching hour, the anchor pulled with preternatural force, dragging craft and crew down into the depths. They must have cried out, but no sound escaped the swirling black waters...', 'Pace out the halls of your lineage, once familiar, now foreign.', 'The fiends must be driven back, and what better place to begin than the seat of our noble line?', 'Can the defiled be consecrated? Can the fallen find rest?', 'There is power in symbols. Collect the scattered scraps of faith and give comfort to the masses.', 'A devil walks these halls... only the mad or the desperate go in search of him.', 'The echoes of his mindless tittering reverberate maddeningly...', 'I knew all these paths once; now they are as twisted as my own ambitions.', 'Corruption has soaked the soil, sapping all good life from these groves - let us burn out this evil.', 'Excise the fungal tumors and the land may yet live.', 'Our land is remote and unneighbored. Every lost resource must be recovered.', 'There is method in the wild corruption here. It bears a form both wretched and malevolent.', 'The smell of sulfur and gunpowder hangs in the air, the war machine is close.', 'To prosecute our war against the Swine, we must first scout their squalid homes.', 'They breed quickly down there in the dark, but perhaps we can slay them even faster.', 'The Swine draw power from their horrid markings and crude idols - tear them down!', 'Even the fiercest beast will lay down when it has not eaten. Steal their food.', 'A nameless abomination, a testament to my failures - it must be destroyed!', 'The thing is more terrible than I can describe - an incoherent jumble of organ, sinew and bone.', 'The smell of rotting fish is almost unbearable...', 'These salt-soaked caverns are teeming with pelagic nightmares - they must be flushed out!', 'The flopping, fish-like things abhore the warding sigils. Let us claim this place anew!', 'Recover these lost shipments of rarities, that we may prevent them from falling into even less scrupulous hands...', 'I always wondered what became of the unfortunate little waif...', 'The poor devils, chained and drowning for eternity...', 'The shifted corridors and sloped vaults of our ancestry are beginning to feel familiar.', 'The great Ruins belong to us, and we will find whatever secrets they hold.', 'More bones returned to rest. Devils remanded to their abyss.', 'Room by room, hall by hall, we reclaim what is ours.', 'This day belongs to the Light!', 'Beacons in the darkness, stars in the emptiness of the void.', 'Tokens of hope, recovered from the encroaching dark.', 'The Abbot will be grateful - the trappings of his faith have been restored.', 'Did he foresee his own demise? I care not, so long as he remains dead.', 'In life, his claims to precognition were dubious at best, in death, they are ridiculous.', 'Even reanimated bones can fall; even the dead can die again.', 'With no living sinew to actuate them, will these walking bones finally fail?', 'Every cleared path and charted route reduces the isolation of our troubled estate.', 'Paths and roads bring soldiers and supplies, let them arrive unharried!', 'Driving out corruption is an endless battle, but one that must be fought.', 'The agents of pestilence will yet be driven from our woods!', 'Good fortune and hard work may yet arrest this plague.', 'Disinfection, at last.', 'These medicines will prevent the outbreak of epidemic at our struggling Hamlet.', 'These tonics and herbs will stave off infection and neutralize contagion.', 'The wood is still poisoned. The way is still blocked. But less people will be eaten.', 'Leave her corpse to rot, consumed by the spores she spawned.', 'A corpse of twisted metal and splintered wood - at home amongst the headstones.', 'The Brigands are undone - our family crest is once again a symbol of strength!', "The swinefolk's labyrinth may yet prove to be navigable.", 'The twisting tunnels seem a little less ...impossible.', 'Some experiments should have never happened. You are doing just work, ending them.', 'Their squeals fade, their confidence is shaken!', 'Ha ha ha! Let those dirty beasts worship the mud now!', 'Robbed of their writings, the Swine will grow ever more ignorant - if such a thing were possible.', 'These foodstuffs yield double benefit: the town may eat, and the Swine will not.', 'Our supplies are replenished, the soldiers will feast tonight.', 'It is as grotesque in death as it was in life...', 'Its destruction is a small consolation, given the implications of its terrible existence.', 'How many rats will it take to gnaw through a tonne of putrid flesh?', 'The thing is even more horrible in death. Liquefaction cannot come soon enough.', 'Despite its morbid aspect, this twisted, cavernous maze seems almost traversable.', 'We will find all manner of great and terrible things in this watery tomb...', 'The pungent odour abates! The things are driven back, for a time.', 'At last, wholesome marine life can flourish - if indeed there is such a thing.', 'Hideous matriarch, vile queen of the aphotic depths - she has no place in the sane world!', 'Seafaring trade, the lifeblood of any port, can resume again now that the routes are safe.', "Finally, a sailor's death for captain and crew. Fitting.", "They are cursed to float forever, deep in the swirling blackness, far beyond the light's reach.", 'A setback, but not the end of things!', 'Wounds to be tended; lessons to be learned.', 'Regroup. Reassemble. Evil is timeless, after all.', 'Failure tests the mettle of heart, brain, and body.', 'You will endure this loss, and learn from it.', 'You cannot learn a thing you think you know...', 'We fall so that we may learn to pick ourselves up once again.', 'Do not ruminate on this fleeting failure - the campaign is long, and victory will come.', 'Where there is no peril in the task, there can be no glory in its accomplishment.', 'Ignorance of your enemy and of yourself will invariably lead to defeat.', 'Great adversity has a beauty - it is the fire that tempers.', 'Towering, fierce, terrible. Nightmare made material!', 'The madman hides there, behind the pews, spouting his mindless drivel!', 'Twisted and maniacal - a slathering testament to the powers of corruption!', 'A marvel of technology - an engine of destruction!', 'It is a travesty - a blundering mountain of hatred and rage.', 'Squirming, contorting and ever-expanding...this horror must be unmade!', 'The aquatic devils have remade the poor girl in their image! She is their queen, and their slave!', 'Even in death, the captain shouts his orders, and the crew obeys...', 'A lifetime of pious toil, an eternity of suffering.', 'Ha! The poor fool still stands, battered and broken as his precious mill.', 'Fitting, that he find his rest upon the dirt he harrowed to fruitlessly.', 'Witness the woundrous fury of the Stars!', 'A star-spawned horror rattles its crystalline cage!', 'Shattered and unmade! Or, perhaps, reborn?', 'It will live again in another time, another place.', 'Who could fathom the hateful scorn of the swirling stars!', 'The twisted faces of the damned, piled high and cloaked in malice!', 'The sparkling eyes of youth - twisted and made merciless!', 'As the ghoulish collection scatters, the rats prepare to feast.', 'A predator is often blind to its own peril.', 'Behold the infinite malignity of the stars!', 'A star-spawned horror!', 'The space between worlds is no place for mortal men.', 'It could be dismissed as a fever dream, if not for the corpses.', 'A denizen of unconscionable alienage.', 'A shard of alien malignity!', 'A lurching composition of otherworldly death!', 'A shuddering crystalline bulk!', 'Born of the void, it dies in the Earth!', 'Banished in the void!', 'It came from the stars, let it return to them.', 'The match is struck. A blazing star is born!', 'The way is lit. The path is clear. We require only the strength to follow it.', 'In radiance may we find victory.', 'As the light gains purchase, spirits are lifted and purpose is made clear.', 'The light, the promise of safety!', 'The darkness holds much worse than mere trickery and bogeymen.', 'Darkness closes in, haunting the hearts of men.', 'Terrors may indeed stalk these shadows, but yonder – a glint of gold."', 'Secrets and wonders can be found in the most tenebrous corners of this place.', 'And now... the darkness holds dominion – black as death.', 'Glittering gold, trinkets and baubles - paid for in blood.', 'If only treasure could staunch the flow of otherworldly corruption...', 'Finding the stuff is only the first test - now it must be carried home.', 'Packs laden with loot are often low on supplies.', 'Wealth beyond measure, awarded to the brave and the foolhardy alike.', 'A fortune - waiting to be spent.', 'A handsome reward for a task well performed.', 'Circled in the dark, the battle may yet be won.', 'A spark without kindling is a goal without hope.', 'Gathered close in tenuous firelight, and uneasy companionship.', 'A moment of respite. A chance to steel oneself against the coming horrors.', 'Huddled together, furtive and vulnerable. Rats in a maze.', 'Cruel machinations spring to life with a singular purpose!', "Curious is the trap-maker's art... his efficacy unwitnessed by his own eyes.", 'Mechanical hazards, possessed by evil intent.', 'Ambushed by foul invention!', 'Ancient traps lie in wait, unsprung and thirsting for blood.', 'Carelessness will find no clemency in this place!', 'Watch your step.', 'Mind that such missteps are the exception, and not the rule.', 'Even the cold stone seems bent on preventing passage.', 'Such blockages are unsurprising – these tunnels predate even the earliest settlers.', 'Nature herself - a victim to this spreading corruption: malformed with misintent.', 'Another mariner... Another misfortune.', 'Without tools of iron, you must rely on flesh and indefatigable purpose.', 'Gnawing hunger sets in, turning the body against itself, weakening the mind…', 'To fall for such a little thing... a bite of bread...', 'Packs full of steel and war, but nary a thought given to the plow.', 'No force of will can overcome a failing body.', 'The requirements of survival cannot be met on an empty stomach.', 'Soothed, sedated.', 'A momentary abatement...', 'The wounds of war can be healed, but never hidden.', 'Compassion is a rarity in the fevered pitch of battle.', 'Surgical precision!', 'Vigor is restored!', 'The blood pumps, the limbs obey!', 'The flesh is knit!', 'Patched up, if only to bleed again.', 'Death cannot be escaped! But it can be postponed.', 'A death denied for now.', 'Death is patient, it will wait.', 'As the fiend falls, a faint hope blossoms.', 'Confidence surges as the enemy crumbles!', 'Press this advantage, give them no quarter!', 'Their formation is broken - maintain the offensive.', 'Continue the onslaught! Destroy. Them. All.', 'Executed with impunity!', 'Another abomination cleansed from our lands.', 'Begone, fiend!', 'Back to the pit!', 'Another one falls!', 'Decimated!', 'Obliterated!', 'Destroyed!', 'Eradicated!', 'Annihilated!', 'Prodigious size alone does not dissuade the sharpened blade.', 'Their cursed champion falls!', 'Monstrous size has no intrinsic merit, unless inordinate exsanguination be considered a virtue.', 'The bigger the beast, the greater the glory.', 'A victory - perhaps a turning point.', 'A death by inches...', 'Great is the weapon that cuts on its own!', 'Slowly, gently, this is how a life is taken...', 'The slow death - unforeseen, unforgiving.', 'A decisive pummelling!', 'A powerful blow!', 'A devastating blow!', 'Impressive!', 'The ground quakes!', 'A singular strike!', 'Well struck!', 'Precision and power!', 'Masterfully executed!', 'How quickly the tide turns!', 'Mortality clarified in a single strike!', 'Grievous injury, palpable fear...', 'Such a terrible assault cannot be left unanswered!', 'Death waits for the slightest lapse in concentration.', 'Exposed to a killing blow!', 'Ringing ears, blurred vision - the end approaches...', 'Dazed, reeling, about to break...', 'Unnerved, unbalanced...', 'A dizzying blow to body and brain!', 'Weakened!', 'Diminished!', 'The will to fight falters!', 'Confusion, nerves, and panic!', 'Gnawing uncertainty - the birthplace of dread.', 'Festering fear consumes the mind!', 'The horror...', 'The abyss returns even the boldest gaze.', 'The blood quickens!', 'A brilliant confluence of skill and purpose!', "A time to perform beyond one's limits!", 'Inspiration and improvement!', 'Perched at the very precipice of oblivion...', 'A hand-breadth from becoming unwound...', 'Teetering on the brink, facing the abyss...', 'And now the true test... hold fast, or expire?', 'As life ebbs, terrible vistas of emptiness reveal themselves.', 'Survival is a tenuous proposition in this sprawling tomb.', 'More blood soaks the soil, feeding the evil therein.', 'Another life wasted in the pursuit of glory and gold.', 'This is no place for the weak, or the foolhardy.', 'This is no place for the weak, or foolhardy.', 'More dust, more ashes, more disappointment.', 'These nightmarish creatures can be felled! They can be beaten!', "Seize this momentum! Push on to the task's end!", 'This expedition, at least, promises success.', 'As victories mount, so too will resistance.', 'Success so clearly in view... or is it merely a trick of the light?', 'Remind yourself that overconfidence is a slow and insidious killer.', 'A trifling victory, but a victory nonetheless.', 'Be wary - triumphant pride precipitates a dizzying fall...', 'Ghoulish horrors - brought low and driven into the mud!', 'Impressive haul! If you value such things.', 'Ornaments neatly ordered, lovingly admired.', 'Such a burden of finery risks life and limb.', 'A full pack often attracts unwanted attention.', 'True desperation is known only when escape is impossible.', 'Cornered! Trapped! And forced to fight on...', 'No chance for egress - will this be a massacre?', 'This skirmish may be lost, but the battle may yet be won.', 'A wise general cuts losses, and regroups.', 'The sin is not in being outmatched, but in failing to recognize it.', 'Injury and despondence set the stage for heroism... or cowardice.', "The human mind - fragile like a robin's egg.", 'Wherefore, heroism?', 'The mind cannot hope to withstand such an assault.', "Even the aged oak will fall to the tempest's winds.", 'Madness, our old friend!', 'One can sometimes find clarity in madness, but only rarely...', 'Madness - sublimity of the intelligence, or so it has been said.', 'The bulwarks of the mind have fallen!', 'The abyss is made manifest!', 'Frustration and fury, more destructive than a hundred cannons.', 'Fear and frailty finally claim their due.', 'The walls close in, the shadows whisper of conspiracy!', 'There can be no hope in this hell, no hope at all.', 'Self-preservation is paramount - at any cost!', 'Those who covet injury find it in no short supply.', 'Reeling, gasping, taken over the edge into madness!', 'A moment of valor shines brightest against a backdrop of despair.', 'Adversity can foster hope, and resilience.', 'A moment of clarity in the eye of the storm...', 'Anger is power - unleash it!', 'Many fall in the face of chaos; but not this one, not today.', 'In those younger years my home was a hive of unbridled hedonism, a roiling apiary where instinct and impulse were indulged with wild abandon. A bewitching predator slipped in amidst the swarm of tittering sycophants. Though outwardly urbane, I could sense in her a mocking thirst. Driven half-mad by cloying vulgarity I plotted to rid myself of this lurking threat in a grand display of sadistic sport. But as the moment of murder drew nigh, the gibbous moon revealed her inhuman desires in all their stultifying hideousness…', 'Mercifully, the morbid encounter resolved itself in my favor, and I set to work pursuing degeneracy in its most decadent forms. The air pulsed with anticipation as I revealed the unnatural terroir of the house vintage. But my exultation was cut short as the attending gentry turned upon themselves in an orgy of an indescribable frenzy. A single drop of that forbidden tannin gifted me with a dizzying glimpse of a hibernating horror beneath my feet, and in that moment, I understood the terrible truth of the world. I stood reborn, molted by newfound knowledge, my head throbbing to the growing whine of winged vermin come to drink the tainted blood… of The Darkest Dungeon.', 'You still foolishly consider yourself an entity separate from the whole. I know better. And I. Will. Show you.', 'The flesh is fluid, it can be changed, reshaped, remade!', 'The flesh is immortal, it is undying. Pray it does not take too hideous of form.', 'Behold the heart of the world! Progenitor of life, father and mother, alpha and omega! Our creator... and our destroyer.']
| 16,420.5
| 32,840
| 0.766024
| 5,396
| 32,841
| 4.666049
| 0.366753
| 0.007348
| 0.001589
| 0.00143
| 0.009532
| 0.009532
| 0.009532
| 0.00564
| 0
| 0
| 0
| 0
| 0.164672
| 32,841
| 1
| 32,841
| 32,841
| 0.917034
| 0
| 0
| 0
| 0
| 43
| 0.952833
| 0.000639
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 10
|
22d0993dd2f3e10c2da761ab53833eede5a2990e
| 335
|
py
|
Python
|
genesis/xcoin-hash-master/test.py
|
thepinkcoins2018/thepinkcoins
|
16446746386feeba80253371689bf314dbab2d86
|
[
"MIT"
] | 25
|
2018-09-17T04:11:51.000Z
|
2018-11-02T10:49:24.000Z
|
genesis/xcoin-hash-master/test.py
|
thepinkcoins2018/thepinkcoins
|
16446746386feeba80253371689bf314dbab2d86
|
[
"MIT"
] | 1
|
2017-12-06T01:22:46.000Z
|
2018-05-11T08:10:55.000Z
|
genesis/xcoin-hash-master/test.py
|
thepinkcoins2018/thepinkcoins
|
16446746386feeba80253371689bf314dbab2d86
|
[
"MIT"
] | 6
|
2018-01-03T06:07:17.000Z
|
2021-05-31T01:43:38.000Z
|
import xcoin_hash
from binascii import unhexlify
teststart = '700000005d385ba114d079970b29a9418fd0549e7d68a95c7f168621a314201000000000578586d149fd07b22f3a8a347c516de7052f034d2b76ff68e0d6ecff9b77a45489e3fd511732011df0731000';
testbin = unhexlify(teststart)
hash_bin = xcoin_hash.getPoWHash(testbin)
print str(hash_bin.encode('hex'))
| 33.5
| 175
| 0.886567
| 24
| 335
| 12.208333
| 0.625
| 0.061433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.374603
| 0.059701
| 335
| 9
| 176
| 37.222222
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0.486567
| 0.477612
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.166667
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
fe0771147f64a705960ea428671be8d8b9fdca55
| 1,141
|
py
|
Python
|
data/train/python/fe0771147f64a705960ea428671be8d8b9fdca55admin.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/fe0771147f64a705960ea428671be8d8b9fdca55admin.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/fe0771147f64a705960ea428671be8d8b9fdca55admin.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from django.contrib import admin
#
#
#
class CreatedByBaseAdmin(admin.ModelAdmin):
"""
Base class for handling created by stuff
"""
readonly_fields = ('created_by', 'created_date')
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
for instance in instances:
if not change:
instance.created_by = request.user
instance.save()
formset.save()
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.save()
class Meta:
abstract = True
class FullAuditBaseAdmin(admin.ModelAdmin):
"""
Base class for handling created by stuff
"""
readonly_fields = ('created_by', 'created_date', 'modified_by', 'modified_date', 'deleted_by', 'deleted_date')
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
for instance in instances:
if not change:
instance.created_by = request.user
instance.save()
formset.save()
def save_model(self, request, obj, form, change):
if not change:
obj.created_by = request.user
obj.save()
class Meta:
abstract = True
| 21.528302
| 111
| 0.709904
| 149
| 1,141
| 5.315436
| 0.268456
| 0.090909
| 0.055556
| 0.10101
| 0.858586
| 0.858586
| 0.858586
| 0.858586
| 0.858586
| 0.858586
| 0
| 0
| 0.176161
| 1,141
| 52
| 112
| 21.942308
| 0.842553
| 0.07099
| 0
| 0.83871
| 0
| 0
| 0.086873
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.032258
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe1a1cd8f6cf0201645f65dad3280a8df3e7d039
| 30,187
|
py
|
Python
|
sdk/python/pulumi_google_native/monitoring/v3/alert_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/monitoring/v3/alert_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/monitoring/v3/alert_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['AlertPolicyArgs', 'AlertPolicy']
@pulumi.input_type
class AlertPolicyArgs:
def __init__(__self__, *,
alert_strategy: Optional[pulumi.Input['AlertStrategyArgs']] = None,
combiner: Optional[pulumi.Input['AlertPolicyCombiner']] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]] = None,
creation_record: Optional[pulumi.Input['MutationRecordArgs']] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input['DocumentationArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
mutation_record: Optional[pulumi.Input['MutationRecordArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validity: Optional[pulumi.Input['StatusArgs']] = None):
"""
The set of arguments for constructing a AlertPolicy resource.
:param pulumi.Input['AlertStrategyArgs'] alert_strategy: Control over how this alert policy's notification channels are notified.
:param pulumi.Input['AlertPolicyCombiner'] combiner: How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED.
:param pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]] conditions: A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition.
:param pulumi.Input['MutationRecordArgs'] creation_record: A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.
:param pulumi.Input['DocumentationArgs'] documentation: Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. On write, the default interpretation if unset is that the policy is enabled. On read, clients should not make any assumption about the state if it has not been populated. The field should always be populated on List and Get operations, unless a field projection has been specified that strips it out.
:param pulumi.Input['MutationRecordArgs'] mutation_record: A read-only record of the most recent change to the alerting policy. If provided in a call to create or update, this field will be ignored.
:param pulumi.Input[str] name: Required if the policy exists. The resource name for this policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the ListNotificationChannels method. The format of the entries in this field is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.
:param pulumi.Input['StatusArgs'] validity: Read-only description of how the alert policy is invalid. OK if the alert policy is valid. If not OK, the alert policy will not generate incidents.
"""
if alert_strategy is not None:
pulumi.set(__self__, "alert_strategy", alert_strategy)
if combiner is not None:
pulumi.set(__self__, "combiner", combiner)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if creation_record is not None:
pulumi.set(__self__, "creation_record", creation_record)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if documentation is not None:
pulumi.set(__self__, "documentation", documentation)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if mutation_record is not None:
pulumi.set(__self__, "mutation_record", mutation_record)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_channels is not None:
pulumi.set(__self__, "notification_channels", notification_channels)
if project is not None:
pulumi.set(__self__, "project", project)
if user_labels is not None:
pulumi.set(__self__, "user_labels", user_labels)
if validity is not None:
pulumi.set(__self__, "validity", validity)
@property
@pulumi.getter(name="alertStrategy")
def alert_strategy(self) -> Optional[pulumi.Input['AlertStrategyArgs']]:
"""
Control over how this alert policy's notification channels are notified.
"""
return pulumi.get(self, "alert_strategy")
@alert_strategy.setter
def alert_strategy(self, value: Optional[pulumi.Input['AlertStrategyArgs']]):
pulumi.set(self, "alert_strategy", value)
@property
@pulumi.getter
def combiner(self) -> Optional[pulumi.Input['AlertPolicyCombiner']]:
"""
How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED.
"""
return pulumi.get(self, "combiner")
@combiner.setter
def combiner(self, value: Optional[pulumi.Input['AlertPolicyCombiner']]):
pulumi.set(self, "combiner", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]]:
"""
A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="creationRecord")
def creation_record(self) -> Optional[pulumi.Input['MutationRecordArgs']]:
"""
A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored.
"""
return pulumi.get(self, "creation_record")
@creation_record.setter
def creation_record(self, value: Optional[pulumi.Input['MutationRecordArgs']]):
pulumi.set(self, "creation_record", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def documentation(self) -> Optional[pulumi.Input['DocumentationArgs']]:
"""
Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation.
"""
return pulumi.get(self, "documentation")
@documentation.setter
def documentation(self, value: Optional[pulumi.Input['DocumentationArgs']]):
pulumi.set(self, "documentation", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not the policy is enabled. On write, the default interpretation if unset is that the policy is enabled. On read, clients should not make any assumption about the state if it has not been populated. The field should always be populated on List and Get operations, unless a field projection has been specified that strips it out.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="mutationRecord")
def mutation_record(self) -> Optional[pulumi.Input['MutationRecordArgs']]:
"""
A read-only record of the most recent change to the alerting policy. If provided in a call to create or update, this field will be ignored.
"""
return pulumi.get(self, "mutation_record")
@mutation_record.setter
def mutation_record(self, value: Optional[pulumi.Input['MutationRecordArgs']]):
pulumi.set(self, "mutation_record", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Required if the policy exists. The resource name for this policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationChannels")
def notification_channels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the ListNotificationChannels method. The format of the entries in this field is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
"""
return pulumi.get(self, "notification_channels")
@notification_channels.setter
def notification_channels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_channels", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.
"""
return pulumi.get(self, "user_labels")
@user_labels.setter
def user_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "user_labels", value)
@property
@pulumi.getter
def validity(self) -> Optional[pulumi.Input['StatusArgs']]:
"""
Read-only description of how the alert policy is invalid. OK if the alert policy is valid. If not OK, the alert policy will not generate incidents.
"""
return pulumi.get(self, "validity")
@validity.setter
def validity(self, value: Optional[pulumi.Input['StatusArgs']]):
pulumi.set(self, "validity", value)
class AlertPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alert_strategy: Optional[pulumi.Input[pulumi.InputType['AlertStrategyArgs']]] = None,
combiner: Optional[pulumi.Input['AlertPolicyCombiner']] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConditionArgs']]]]] = None,
creation_record: Optional[pulumi.Input[pulumi.InputType['MutationRecordArgs']]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input[pulumi.InputType['DocumentationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
mutation_record: Optional[pulumi.Input[pulumi.InputType['MutationRecordArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validity: Optional[pulumi.Input[pulumi.InputType['StatusArgs']]] = None,
__props__=None):
"""
Creates a new alerting policy.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AlertStrategyArgs']] alert_strategy: Control over how this alert policy's notification channels are notified.
:param pulumi.Input['AlertPolicyCombiner'] combiner: How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConditionArgs']]]] conditions: A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition.
:param pulumi.Input[pulumi.InputType['MutationRecordArgs']] creation_record: A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.
:param pulumi.Input[pulumi.InputType['DocumentationArgs']] documentation: Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. On write, the default interpretation if unset is that the policy is enabled. On read, clients should not make any assumption about the state if it has not been populated. The field should always be populated on List and Get operations, unless a field projection has been specified that strips it out.
:param pulumi.Input[pulumi.InputType['MutationRecordArgs']] mutation_record: A read-only record of the most recent change to the alerting policy. If provided in a call to create or update, this field will be ignored.
:param pulumi.Input[str] name: Required if the policy exists. The resource name for this policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the ListNotificationChannels method. The format of the entries in this field is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.
:param pulumi.Input[pulumi.InputType['StatusArgs']] validity: Read-only description of how the alert policy is invalid. OK if the alert policy is valid. If not OK, the alert policy will not generate incidents.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[AlertPolicyArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new alerting policy.
:param str resource_name: The name of the resource.
:param AlertPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AlertPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alert_strategy: Optional[pulumi.Input[pulumi.InputType['AlertStrategyArgs']]] = None,
combiner: Optional[pulumi.Input['AlertPolicyCombiner']] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConditionArgs']]]]] = None,
creation_record: Optional[pulumi.Input[pulumi.InputType['MutationRecordArgs']]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input[pulumi.InputType['DocumentationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
mutation_record: Optional[pulumi.Input[pulumi.InputType['MutationRecordArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
validity: Optional[pulumi.Input[pulumi.InputType['StatusArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AlertPolicyArgs.__new__(AlertPolicyArgs)
__props__.__dict__["alert_strategy"] = alert_strategy
__props__.__dict__["combiner"] = combiner
__props__.__dict__["conditions"] = conditions
__props__.__dict__["creation_record"] = creation_record
__props__.__dict__["display_name"] = display_name
__props__.__dict__["documentation"] = documentation
__props__.__dict__["enabled"] = enabled
__props__.__dict__["mutation_record"] = mutation_record
__props__.__dict__["name"] = name
__props__.__dict__["notification_channels"] = notification_channels
__props__.__dict__["project"] = project
__props__.__dict__["user_labels"] = user_labels
__props__.__dict__["validity"] = validity
super(AlertPolicy, __self__).__init__(
'google-native:monitoring/v3:AlertPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AlertPolicy':
"""
Get an existing AlertPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AlertPolicyArgs.__new__(AlertPolicyArgs)
__props__.__dict__["alert_strategy"] = None
__props__.__dict__["combiner"] = None
__props__.__dict__["conditions"] = None
__props__.__dict__["creation_record"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["documentation"] = None
__props__.__dict__["enabled"] = None
__props__.__dict__["mutation_record"] = None
__props__.__dict__["name"] = None
__props__.__dict__["notification_channels"] = None
__props__.__dict__["user_labels"] = None
__props__.__dict__["validity"] = None
return AlertPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="alertStrategy")
def alert_strategy(self) -> pulumi.Output['outputs.AlertStrategyResponse']:
"""
Control over how this alert policy's notification channels are notified.
"""
return pulumi.get(self, "alert_strategy")
@property
@pulumi.getter
def combiner(self) -> pulumi.Output[str]:
"""
How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED.
"""
return pulumi.get(self, "combiner")
@property
@pulumi.getter
def conditions(self) -> pulumi.Output[Sequence['outputs.ConditionResponse']]:
"""
A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition.
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter(name="creationRecord")
def creation_record(self) -> pulumi.Output['outputs.MutationRecordResponse']:
"""
A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored.
"""
return pulumi.get(self, "creation_record")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def documentation(self) -> pulumi.Output['outputs.DocumentationResponse']:
"""
Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation.
"""
return pulumi.get(self, "documentation")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[bool]:
"""
Whether or not the policy is enabled. On write, the default interpretation if unset is that the policy is enabled. On read, clients should not make any assumption about the state if it has not been populated. The field should always be populated on List and Get operations, unless a field projection has been specified that strips it out.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="mutationRecord")
def mutation_record(self) -> pulumi.Output['outputs.MutationRecordResponse']:
"""
A read-only record of the most recent change to the alerting policy. If provided in a call to create or update, this field will be ignored.
"""
return pulumi.get(self, "mutation_record")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Required if the policy exists. The resource name for this policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationChannels")
def notification_channels(self) -> pulumi.Output[Sequence[str]]:
"""
Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the ListNotificationChannels method. The format of the entries in this field is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
"""
return pulumi.get(self, "notification_channels")
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> pulumi.Output[Mapping[str, str]]:
"""
User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.
"""
return pulumi.get(self, "user_labels")
@property
@pulumi.getter
def validity(self) -> pulumi.Output['outputs.StatusResponse']:
"""
Read-only description of how the alert policy is invalid. OK if the alert policy is valid. If not OK, the alert policy will not generate incidents.
"""
return pulumi.get(self, "validity")
| 65.766885
| 521
| 0.707225
| 3,794
| 30,187
| 5.475488
| 0.081708
| 0.059834
| 0.059449
| 0.022865
| 0.841822
| 0.801771
| 0.765572
| 0.748917
| 0.726774
| 0.692885
| 0
| 0.001761
| 0.209958
| 30,187
| 458
| 522
| 65.91048
| 0.869303
| 0.483685
| 0
| 0.426573
| 1
| 0
| 0.134796
| 0.022489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0.003497
| 0.027972
| 0.003497
| 0.27972
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3ae60858b64ba2ea8d594674120067c89db2313
| 1,115
|
py
|
Python
|
eqb/scripts/change-endian.py
|
cryptotechguru/tesseract
|
9b64c61483710c94390404c3d920c1059cbfcda7
|
[
"MIT"
] | null | null | null |
eqb/scripts/change-endian.py
|
cryptotechguru/tesseract
|
9b64c61483710c94390404c3d920c1059cbfcda7
|
[
"MIT"
] | 45
|
2019-02-05T17:17:18.000Z
|
2019-07-20T17:21:02.000Z
|
eqb/scripts/change-endian.py
|
cryptotechguru/tesseract
|
9b64c61483710c94390404c3d920c1059cbfcda7
|
[
"MIT"
] | 6
|
2019-02-01T12:30:48.000Z
|
2019-03-01T20:33:14.000Z
|
# This is a handy reverses the endianess of a given binary string in HEX
input = "020000000001017c037e163f8dfee4632a8cf6c87187d3cb61224e6dae8f4b0ed0fae3a38008570000000017160014c5729e3aaacb6a160fa79949a8d7f1e5cd1fbc51feffffff0288102c040000000017a914ed649576ad657747835d116611981c90113c074387005a62020000000017a914e62a29e7d756eb30c453ae022f315619fe8ddfbb8702483045022100b40db3a574a7254d60f8e64335d9bab60ff986ad7fe1c0ad06dcfc4ba896e16002201bbf15e25b0334817baa34fd02ebe90c94af2d65226c9302a60a96e8357c0da50121034f889691dacb4b7152f42f566095a8c2cec6482d2fc0a16f87f59691e7e37824df000000"
def test():
assert reverse("") == ""
assert reverse("F") == "F"
assert reverse("FF") == "FF"
assert reverse("00FF") == "FF00"
assert reverse("AA00FF") == "FF00AA"
assert reverse("AB01EF") == "EF01AB"
assert reverse("b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f1963") == "63194f18be0af63f2c6bc9dc0f777cbefed3d9415c4af83f3ee3a3d669c00cb5"
def reverse(input):
res = "".join(reversed([input[i:i+2] for i in range(0, len(input), 2)]))
return res
if __name__ == "__main__":
test()
print(reverse(input))
| 48.478261
| 507
| 0.833184
| 71
| 1,115
| 12.971831
| 0.605634
| 0.098806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.412281
| 0.079821
| 1,115
| 23
| 508
| 48.478261
| 0.48538
| 0.06278
| 0
| 0
| 0
| 0
| 0.641762
| 0.597701
| 0
| 1
| 0
| 0
| 0.466667
| 1
| 0.133333
| false
| 0
| 0
| 0
| 0.2
| 0.066667
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3e62cbd20285e2288a6d50a123c377745e43d52
| 125
|
py
|
Python
|
tests/test_utils/helpers.py
|
innoviz-sw-infra/rapid-env
|
acc5e1e461af42b5fbb7024c0b79d4315c206fe2
|
[
"MIT"
] | 1
|
2021-02-15T20:55:49.000Z
|
2021-02-15T20:55:49.000Z
|
tests/test_utils/helpers.py
|
innoviz-sw-infra/rapid-env
|
acc5e1e461af42b5fbb7024c0b79d4315c206fe2
|
[
"MIT"
] | null | null | null |
tests/test_utils/helpers.py
|
innoviz-sw-infra/rapid-env
|
acc5e1e461af42b5fbb7024c0b79d4315c206fe2
|
[
"MIT"
] | null | null | null |
from pathlib import Path
def tmp_folder(test_filename):
return Path(__file__).parent / 'tmp' / Path(test_filename).stem
| 25
| 67
| 0.76
| 18
| 125
| 4.888889
| 0.722222
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136
| 125
| 5
| 67
| 25
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
43427e1561417d0bb0264993654a5f5a6e63d618
| 5,830
|
py
|
Python
|
tests/core/contracts/test_contract_example.py
|
iamdefinitelyahuman/web3.py
|
7cc996723841895b9cc4feac354bc06d711dee05
|
[
"MIT"
] | 2
|
2019-09-27T09:33:10.000Z
|
2019-10-09T10:34:04.000Z
|
tests/core/contracts/test_contract_example.py
|
iamdefinitelyahuman/web3.py
|
7cc996723841895b9cc4feac354bc06d711dee05
|
[
"MIT"
] | null | null | null |
tests/core/contracts/test_contract_example.py
|
iamdefinitelyahuman/web3.py
|
7cc996723841895b9cc4feac354bc06d711dee05
|
[
"MIT"
] | 2
|
2019-02-26T23:01:31.000Z
|
2019-03-03T02:10:57.000Z
|
# This file is used by the documentation as an example of how to write unit tests with web3.py
import pytest
from web3 import (
EthereumTesterProvider,
Web3,
)
@pytest.fixture
def tester_provider():
return EthereumTesterProvider()
@pytest.fixture
def eth_tester(tester_provider):
return tester_provider.ethereum_tester
@pytest.fixture
def w3(tester_provider):
return Web3(tester_provider)
@pytest.fixture
def foo_contract(eth_tester, w3):
# For simplicity of this example we statically define the
# contract code here. You might read your contracts from a
# file, or something else to test with in your own code
#
# pragma solidity^0.5.3;
#
# contract Foo {
#
# string public bar;
# event barred(string _bar);
#
# constructor() public {
# bar = "hello world";
# }
#
# function setBar(string memory _bar) public {
# bar = _bar;
# emit barred(_bar);
# }
#
# }
deploy_address = eth_tester.get_accounts()[0]
abi = """[{"anonymous":false,"inputs":[{"indexed":false,"name":"_bar","type":"string"}],"name":"barred","type":"event"},{"constant":false,"inputs":[{"name":"_bar","type":"string"}],"name":"setBar","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"constant":true,"inputs":[],"name":"bar","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"}]""" # noqa: E501
# This bytecode is the output of compiling with
# solc version:0.5.3+commit.10d17f24.Emscripten.clang
bytecode = """608060405234801561001057600080fd5b506040805190810160405280600b81526020017f68656c6c6f20776f726c640000000000000000000000000000000000000000008152506000908051906020019061005c929190610062565b50610107565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106100a357805160ff19168380011785556100d1565b828001600101855582156100d1579182015b828111156100d05782518255916020019190600101906100b5565b5b5090506100de91906100e2565b5090565b61010491905b808211156101005760008160009055506001016100e8565b5090565b90565b6103bb806101166000396000f3fe608060405234801561001057600080fd5b5060043610610053576000357c01000000000000000000000000000000000000000000000000000000009004806397bc14aa14610058578063febb0f7e14610113575b600080fd5b6101116004803603602081101561006e57600080fd5b810190808035906020019064010000000081111561008b57600080fd5b82018360208201111561009d57600080fd5b803590602001918460018302840111640100000000831117156100bf57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610196565b005b61011b61024c565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561015b578082015181840152602081019050610140565b50505050905090810190601f1680156101885780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b80600090805190602001906101ac9291906102ea565b507f5f71ad82e16f082de5ff496b140e2fbc8621eeb37b36d59b185c3f1364bbd529816040518080602001828103825283818151815260200191508051906020019080838360005b8381101561020f5780820151818401526020810190506101f4565b50505050905090810190601f16801561023c5780820380516001836020036101000a031916815260200191505b509250505060405180910390a150565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156102e25780601f106102b7576101008083540402835291602001916102e2565b820191906000526020600020905b8154815290600101906020018083116102c557829003601f168201915b505050505081565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061032b57805160ff1916838001178555610359565b82800160010185558215610359579182015b8281111561035857825182559160200191906001019061033d565b5b509050610366919061036a565b5090565b61038c91905b80821115610388576000816000905550600101610370565b5090565b9056fea165627a7a72305820ae6ca683d45ee8a71bba45caee29e4815147cd308f772c853a20dfe08214dbb50029""" # noqa: E501
# Create our contract class.
FooContract = w3.eth.contract(abi=abi, bytecode=bytecode)
# issue a transaction to deploy the contract.
tx_hash = FooContract.constructor().transact({
'from': deploy_address,
})
# wait for the transaction to be mined
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, 180)
# instantiate and return an instance of our contract.
return FooContract(tx_receipt.contractAddress)
def test_initial_greeting(foo_contract):
hw = foo_contract.caller.bar()
assert hw == "hello world"
def test_can_update_greeting(w3, foo_contract):
# send transaction that updates the greeting
tx_hash = foo_contract.functions.setBar(
"testing contracts is easy",
).transact({
'from': w3.eth.accounts[1],
})
w3.eth.waitForTransactionReceipt(tx_hash, 180)
# verify that the contract is now using the updated greeting
hw = foo_contract.caller.bar()
assert hw == "testing contracts is easy"
def test_updating_greeting_emits_event(w3, foo_contract):
# send transaction that updates the greeting
tx_hash = foo_contract.functions.setBar(
"testing contracts is easy",
).transact({
'from': w3.eth.accounts[1],
})
receipt = w3.eth.waitForTransactionReceipt(tx_hash, 180)
# get all of the `barred` logs for the contract
logs = foo_contract.events.barred.getLogs()
assert len(logs) == 1
# verify that the log's data matches the expected value
event = logs[0]
assert event.blockHash == receipt.blockHash
assert event.args._bar == "testing contracts is easy"
| 56.057692
| 2,501
| 0.808062
| 415
| 5,830
| 11.243373
| 0.363855
| 0.021217
| 0.013716
| 0.01886
| 0.123232
| 0.096657
| 0.088298
| 0.055722
| 0.055722
| 0.055722
| 0
| 0.450466
| 0.116981
| 5,830
| 103
| 2,502
| 56.601942
| 0.455905
| 0.178731
| 0
| 0.346939
| 0
| 0.020408
| 0.648148
| 0.622264
| 0
| 1
| 0
| 0
| 0.102041
| 1
| 0.142857
| false
| 0
| 0.040816
| 0.061224
| 0.265306
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a2b286dc1ee965b45c86a9e18ce924fd29316f2
| 24,269
|
py
|
Python
|
tests/test_checkpoint_syslog_rfc5424.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
tests/test_checkpoint_syslog_rfc5424.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
tests/test_checkpoint_syslog_rfc5424.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
# Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
import random
from jinja2 import Environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
env = Environment()
# Test Anti Malware
# <134>1 2021-02-08T10:19:34Z gw-02bd87 CheckPoint 26203 - [sc4s@2620 action="Detect" flags="311552" ifdir="outbound" ifname="eth0" loguid="{0xbbf1236f,0xd5d32253,0xc1bcfade,0x3753c3e6}" origin="10.160.99.101" originsicname="cn={{ host }},o=gw-02bd87..4zrt7d" sequencenum="1" time="1612779574" version="5" __policy_id_tag="product=VPN-1 & FireWall-1[db_tag={93CEED8D-9ADE-6343-8B89-54FB5A068DC3};mgmt=gw-02bd87;date=1610491680;policy_name=Standard\]" confidence_level="5" dst="91.195.240.13" http_host="update-help.com" lastupdatetime="1612779738" log_id="2" malware_action="Communication with C&C site" malware_rule_id="{A2B8ED86-C9D0-4B0E-9334-C3CFA223CFC2}" method="GET" packet_capture_name="src-10.160.59.141.cap" packet_capture_time="1612779677" packet_capture_unique_id="time1612779574.id1c3adad8.blade04" policy="Standard" policy_time="1612776132" product="Anti Malware" protection_id="00591E0A5" protection_name="APT_RampantKitten.TC.ah" protection_type="URL reputation" proto="6" proxy_src_ip="10.160.59.141" received_bytes="44245" resource="http://update-help.com/" s_port="54470" scope="10.160.59.141" sent_bytes="2624" service="80" session_id="{0x60211036,0x0,0xb3d6e900,0xc68052fb}" severity="4" smartdefense_profile="Optimized" src="10.160.59.141" suppressed_logs="6" layer_name="Standard Threat Prevention" layer_uuid="{75CC4D40-8C8C-4CD6-AF25-51063A9D2AD1}" malware_rule_id="{A2B8ED86-C9D0-4B0E-9334-C3CFA223CFC2}" smartdefense_profile="Optimized" user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36" vendor_list="Check Point ThreatCloud" web_client_type="Chrome"]
def test_checkpoint_syslog_anti_malware(
record_property, setup_wordlist, setup_splunk, setup_sc4s
):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions for Checkpoint
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ iso }} {{ host }} CheckPoint 26203 - [sc4s@2620 action=\"Detect\" flags=\"311552\" ifdir=\"outbound\" ifname=\"eth0\" loguid=\"{0xbbf1236f,0xd5d32253,0xc1bcfade,0x3753c3e6}\" origin=\"10.160.99.101\" originsicname=\"cn={{ host }},o=gw-02bd87..4zrt7d\" sequencenum=\"1\" time=\"{{ epoch }}\" version=\"5\" __policy_id_tag=\"product=VPN-1 & FireWall-1[db_tag={93CEED8D-9ADE-6343-8B89-54FB5A068DC3};mgmt=gw-02bd87;date=1610491680;policy_name=Standard\]\" confidence_level=\"5\" dst=\"91.195.240.13\" http_host=\"update-help.com\" lastupdatetime=\"1612779738\" log_id=\"2\" malware_action=\"Communication with C&C site\" malware_rule_id=\"{A2B8ED86-C9D0-4B0E-9334-C3CFA223CFC2}\" method=\"GET\" packet_capture_name=\"src-10.160.59.141.cap\" packet_capture_time=\"1612779677\" packet_capture_unique_id=\"time1612779574.id1c3adad8.blade04\" policy=\"Standard\" policy_time=\"1612776132\" product=\"Anti Malware\" protection_id=\"00591E0A5\" protection_name=\"APT_RampantKitten.TC.ah\" protection_type=\"URL reputation\" proto=\"6\" proxy_src_ip=\"10.160.59.141\" received_bytes=\"44245\" resource=\"http://update-help.com/\" s_port=\"54470\" scope=\"10.160.59.141\" sent_bytes=\"2624\" service=\"80\" session_id=\"{0x60211036,0x0,0xb3d6e900,0xc68052fb}\" severity=\"4\" smartdefense_profile=\"Optimized\" src=\"10.160.59.141\" suppressed_logs=\"6\" layer_name=\"Standard Threat Prevention\" layer_uuid=\"{75CC4D40-8C8C-4CD6-AF25-51063A9D2AD1}\" malware_rule_id=\"{A2B8ED86-C9D0-4B0E-9334-C3CFA223CFC2}\" smartdefense_profile=\"Optimized\" user_agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36\" vendor_list=\"Check Point ThreatCloud\" web_client_type=\"Chrome\"]"
)
message = mt.render(mark="<134>1", host=host, bsd=bsd, iso=iso, epoch=epoch)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netids host="{{ host }}" sourcetype="cp_log:syslog" source="checkpoint:ids_malware"'
)
search = st.render(
epoch=epoch, bsd=bsd, host=host
)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# Test Threat Emulation
# <134>1 2021-02-08T10:19:34Z gw-02bd87 CheckPoint 26203 - [sc4s@2620 action="Accept" flags="280832" ifdir="inbound" ifname="eth0" loguid="{0x4b397cf0,0x530e24fb,0x1b71ea26,0x27225237}" origin="10.160.99.101" originsicname="cn={{ host }},o=gw-02bd87..4zrt7d" sequencenum="5" time="1612815085" version="5" __policy_id_tag="product=VPN-1 & FireWall-1[db_tag={93CEED8D-9ADE-6343-8B89-54FB5A068DC3};mgmt=gw-02bd87;date=1610491680;policy_name=Standard\]" analyzed_on="Check Point Threat Cloud" confidence_level="0" content_length="456201" content_type="application/octet-stream" dst="173.194.184.234" emulated_on="Win7 64b,Office 2010,Adobe 11" http_host="r5---sn-p5qlsndd.gvt1.com" http_server="downloads" http_status="206" lastupdatetime="1612815085" log_id="4000" log_uid="{3C6AD7C2-72C9-6146-BDD0-BC61D8C2720D}" malware_rule_id="{A2B8ED86-C9D0-4B0E-9334-C3CFA223CFC2}" method="GET" policy="Standard" policy_time="1612783608" product="Threat Emulation" protection_type="HTTPEmulation" proto="6" protocol="HTTP" proxy_src_ip="10.160.59.141" resource="dummy_resource" s_port="54750" scope="10.160.59.141" service="80" session_id="{0x3c6ad7c2,0x72c96146,0xbdd0bc61,0xd8c2720d}" severity="0" sig_id="0" smartdefense_profile="Optimized" src="10.160.59.141" te_verdict_determined_by="Win7 64b,Office 2010,Adobe 11: trusted source. " layer_name="Standard Threat Prevention" layer_uuid="{75CC4D40-8C8C-4CD6-AF25-51063A9D2AD1}" malware_rule_id="{A2B8ED86-C9D0-4B0E-9334-C3CFA223CFC2}" smartdefense_profile="Optimized" user_agent="Microsoft BITS/7.8" verdict="Benign" web_client_type="Other: Microsoft BITS\/7.8"]
def test_checkpoint_syslog_threat_emulation(
record_property, setup_wordlist, setup_splunk, setup_sc4s
):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ iso }} {{ host }} CheckPoint 26203 - [sc4s@2620 action=\"Accept\" flags=\"280832\" ifdir=\"inbound\" ifname=\"eth0\" loguid=\"{0x4b397cf0,0x530e24fb,0x1b71ea26,0x27225237}\" origin=\"10.160.99.101\" originsicname=\"cn={{ host }},o=gw-02bd87..4zrt7d\" sequencenum=\"5\" time=\"{{ epoch }}\" version=\"5\" __policy_id_tag=\"product=VPN-1 & FireWall-1[db_tag={93CEED8D-9ADE-6343-8B89-54FB5A068DC3};mgmt=gw-02bd87;date=1610491680;policy_name=Standard\]\" analyzed_on=\"Check Point Threat Cloud\" confidence_level=\"0\" content_length=\"456201\" content_type=\"application/octet-stream\" dst=\"173.194.184.234\" emulated_on=\"Win7 64b,Office 2010,Adobe 11\" http_host=\"r5---sn-p5qlsndd.gvt1.com\" http_server=\"downloads\" http_status=\"206\" lastupdatetime=\"1612815085\" log_id=\"4000\" log_uid=\"{3C6AD7C2-72C9-6146-BDD0-BC61D8C2720D}\" malware_rule_id=\"{A2B8ED86-C9D0-4B0E-9334-C3CFA223CFC2}\" method=\"GET\" policy=\"Standard\" policy_time=\"1612783608\" product=\"Threat Emulation\" protection_type=\"HTTPEmulation\" proto=\"6\" protocol=\"HTTP\" proxy_src_ip=\"10.160.59.141\" resource=\"dummy_resource\" s_port=\"54750\" scope=\"10.160.59.141\" service=\"80\" session_id=\"{0x3c6ad7c2,0x72c96146,0xbdd0bc61,0xd8c2720d}\" severity=\"0\" sig_id=\"0\" smartdefense_profile=\"Optimized\" src=\"10.160.59.141\" te_verdict_determined_by=\"Win7 64b,Office 2010,Adobe 11: trusted source. \" layer_name=\"Standard Threat Prevention\" layer_uuid=\"{75CC4D40-8C8C-4CD6-AF25-51063A9D2AD1}\" malware_rule_id=\"{A2B8ED86-C9D0-4B0E-9334-C3CFA223CFC2}\" smartdefense_profile=\"Optimized\" user_agent=\"Microsoft BITS/7.8\" verdict=\"Benign\" web_client_type=\"Other: Microsoft BITS\/7.8\"]"
)
message = mt.render(mark="<134>1", host=host, bsd=bsd, iso=iso, epoch=epoch)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netids host="{{ host }}" sourcetype="cp_log:syslog" source="checkpoint:ids_malware"'
)
search = st.render(
epoch=epoch, bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset
)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# Test URL Filtering
# <134>1 2021-02-08T10:19:34Z gw-02bd87 CheckPoint 26203 - [sc4s@2620 flags="166216" ifdir="outbound" loguid="{0x6021fc5b,0x1,0x6563a00a,0x335f665b}" origin="10.160.99.101" originsicname="cn={{ host }},o=gw-02bd87..4zrt7d" sequencenum="2" time="1612840025" version="5" db_ver="21020901" description="Gateway was updated with database version: 3022101." product="URL Filtering" severity="1" update_status="updated"]
def test_checkpoint_syslog_url_filtering(
record_property, setup_wordlist, setup_splunk, setup_sc4s
):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ iso }} {{ host }} CheckPoint 26203 - [sc4s@2620 flags=\"166216\" ifdir=\"outbound\" loguid=\"{0x6021fc5b,0x1,0x6563a00a,0x335f665b}\" origin=\"10.160.99.101\" originsicname=\"cn={{ host }},o=gw-02bd87..4zrt7d\" sequencenum=\"2\" time=\"{{ epoch }}\" version=\"5\" db_ver=\"21020901\" description=\"Gateway was updated with database version: 3022101.\" product=\"URL Filtering\" severity=\"1\" update_status=\"updated\"]"
)
message = mt.render(mark="<134>1", host=host, bsd=bsd, iso=iso, epoch=epoch)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netproxy host="{{ host }}" sourcetype="cp_log:syslog" source="checkpoint:web"'
)
search = st.render(
epoch=epoch, bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset
)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# Test VPN-1 & FireWall-1
# <134>1 2021-02-08T10:19:34Z gw-02bd87 CheckPoint 26203 - [sc4s@2620 action="Accept" flags="810244" ifdir="inbound" ifname="eth0" logid="0" loguid="{0x4d4d455b,0x35b8a7f2,0xdf15314d,0x5765225e}" origin="10.160.99.101" originsicname="cn={{ host }},o=gw-02bd87..4zrt7d" sequencenum="74" time="1612518129" version="5" __policy_id_tag="product=VPN-1 & FireWall-1[db_tag={93CEED8D-9ADE-6343-8B89-54FB5A068DC3};mgmt=gw-02bd87;date=1610491680;policy_name=Standard\]" dst="10.160.99.101" hll_key="9901336306766781296" inzone="Internal" layer_name="Network" layer_name="Web" layer_uuid="f5cec687-05e5-4573-b1dc-08119f24cbc9" layer_uuid="d9050599-e213-4537-b7b5-3d203031a58f" match_id="1" match_id="16777217" parent_rule="0" parent_rule="0" rule_action="Accept" rule_action="Accept" rule_name="Cleanup rule" rule_uid="d7a2b9f5-9c83-4ea4-b22d-a07db9d24490" rule_uid="c8c796c4-64ce-4c4d-a9db-0534737f89d9" outzone="Local" product="VPN-1 & FireWall-1" proto="17" s_port="443" service="26796" src="8.8.8.8"]
def test_checkpoint_syslog_vpn_and_firewall(
record_property, setup_wordlist, setup_splunk, setup_sc4s
):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions for Checkpoint
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ iso }} {{ host }} CheckPoint 26203 - [sc4s@2620 action=\"Accept\" flags=\"810244\" ifdir=\"inbound\" ifname=\"eth0\" logid=\"0\" loguid=\"{0x4d4d455b,0x35b8a7f2,0xdf15314d,0x5765225e}\" origin=\"10.160.99.101\" originsicname=\"cn={{ host }},o=gw-02bd87..4zrt7d\" sequencenum=\"74\" time=\"{{ epoch }}\" version=\"5\" __policy_id_tag=\"product=VPN-1 & FireWall-1[db_tag={93CEED8D-9ADE-6343-8B89-54FB5A068DC3};mgmt=gw-02bd87;date=1610491680;policy_name=Standard\]\" dst=\"10.160.99.101\" hll_key=\"9901336306766781296\" inzone=\"Internal\" layer_name=\"Network\" layer_name=\"Web\" layer_uuid=\"f5cec687-05e5-4573-b1dc-08119f24cbc9\" layer_uuid=\"d9050599-e213-4537-b7b5-3d203031a58f\" match_id=\"1\" match_id=\"16777217\" parent_rule=\"0\" parent_rule=\"0\" rule_action=\"Accept\" rule_action=\"Accept\" rule_name=\"Cleanup rule\" rule_uid=\"d7a2b9f5-9c83-4ea4-b22d-a07db9d24490\" rule_uid=\"c8c796c4-64ce-4c4d-a9db-0534737f89d9\" outzone=\"Local\" product=\"VPN-1 & FireWall-1\" proto=\"17\" s_port=\"443\" service=\"26796\" src=\"8.8.8.8\"]"
)
message = mt.render(mark="<134>1", host=host, bsd=bsd, iso=iso, epoch=epoch)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netfw host="{{ host }}" sourcetype="cp_log:syslog" source="checkpoint:firewall"'
)
search = st.render(
epoch=epoch, bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset
)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# Test WEB_API_INTERNAL
# <134>1 2021-02-08T10:19:34Z gw-02bd87 CheckPoint 26203 - [sc4s@2620 action="Accept" flags="163872" ifdir="outbound" loguid="{0x60251375,0x0,0x6563a00a,0x34bbe8bb}" origin="10.160.99.101" originsicname="cn={{ host }},o=gw-02bd87..4zrt7d" sequencenum="1" time="1613042548" version="5" additional_info="Authentication method: Password based application token" administrator="admin" client_ip="10.160.99.102" machine="10.160.99.102" operation="Log In" operation_number="10" product="WEB_API_INTERNAL" subject="Administrator Login"]
def test_checkpoint_syslog_web_api_internal(
record_property, setup_wordlist, setup_splunk, setup_sc4s
):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ iso }} {{ host }} CheckPoint 26203 - [sc4s@2620 action=\"Accept\" flags=\"163872\" ifdir=\"outbound\" loguid=\"{0x60251375,0x0,0x6563a00a,0x34bbe8bb}\" origin=\"10.160.99.101\" originsicname=\"cn={{ host }},o=gw-02bd87..4zrt7d\" sequencenum=\"1\" time=\"{{ epoch }}\" version=\"5\" additional_info=\"Authentication method: Password based application token\" administrator=\"admin\" client_ip=\"10.160.99.102\" machine=\"10.160.99.102\" operation=\"Log In\" operation_number=\"10\" product=\"WEB_API_INTERNAL\" subject=\"Administrator Login\"]"
)
message = mt.render(mark="<134>1", host=host, bsd=bsd, iso=iso, epoch=epoch)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netops host="{{ host }}" sourcetype="cp_log:syslog" source="checkpoint:audit"'
)
search = st.render(
epoch=epoch, bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset
)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# Test iOS Profiles
# <134>1 2021-02-08T10:19:34Z gw-02bd87 CheckPoint 26203 - [sc4s@2620 flags="131072" ifdir="inbound" loguid="{0x60215107,0x169a,0xd10617ac,0x4468886}" origin="10.1.46.86" sequencenum="4138" time="1612795822" version="5" calc_geo_location="calc_geo_location0" client_name="SandBlast Mobile Protect" client_version="2.72.8.3943" dashboard_orig="dashboard_orig0" device_identification="4624" email_address="email_address44" hardware_model="iPhone / iPhone 8" host_type="Mobile" incident_time="2018-06-03T17:33:09Z" jailbreak_message="False" mdm_id="E726405B-4BCF-46C6-8D1B-6F1A71E67D5D" os_name="IPhone" os_version="11.3.1" phone_number="phone_number24" product="iOS Profiles" protection_type="Global proxy" severity="0" src_user_name="Mike Johnson1" status="Removed"]
def test_checkpoint_syslog_iOS_profiles(
record_property, setup_wordlist, setup_splunk, setup_sc4s
):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ iso }} {{ host }} CheckPoint 26203 - [sc4s@2620 flags=\"131072\" ifdir=\"inbound\" loguid=\"{0x60215102,0x269a,0xd20617ac,0x2468886}\" origin=\"10.1.46.86\" sequencenum=\"4138\" time=\"{{ epoch }}\" version=\"5\" calc_geo_location=\"calc_geo_location0\" client_name=\"SandBlast Mobile Protect\" client_version=\"2.72.8.3943\" dashboard_orig=\"dashboard_orig0\" device_identification=\"4624\" email_address=\"email_address44\" hardware_model=\"iPhone / iPhone 8\" host_type=\"Mobile\" incident_time=\"2018-06-03T17:33:09Z\" jailbreak_message=\"False\" mdm_id=\"E726405B-4BCF-46C6-8D1B-6F1A71E67D5D\" os_name=\"IPhone\" os_version=\"11.3.1\" phone_number=\"phone_number24\" product=\"iOS Profiles\" protection_type=\"Global proxy\" severity=\"0\" src_user_name=\"Mike Johnson1\" status=\"Removed\"]"
)
message = mt.render(mark="<134>1", host=host, bsd=bsd, iso=iso, epoch=epoch)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netops host="{{ host }}" sourcetype="cp_log:syslog" source="checkpoint:network"'
)
search = st.render(
epoch=epoch, bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset
)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
# Test Endpoint Compliance
# <134>1 2021-02-08T10:19:34Z gw-02bd87 CheckPoint 26203 - [sc4s@2620 flags="131072" ifdir="inbound" loguid="{0x60215107,0x169a,0xd10617ac,0x4468886}" origin="10.1.46.86" sequencenum="4138" time="1612795822" version="5" calc_geo_location="calc_geo_location0" client_name="SandBlast Mobile Protect" client_version="2.72.8.3943" dashboard_orig="dashboard_orig0" device_identification="4624" email_address="email_address44" hardware_model="iPhone / iPhone 8" host_type="Mobile" incident_time="2018-06-03T17:33:09Z" jailbreak_message="False" mdm_id="E726405B-4BCF-46C6-8D1B-6F1A71E67D5D" os_name="IPhone" os_version="11.3.1" phone_number="phone_number24" product="Endpoint Compliance" protection_type="Global proxy" severity="0" src_user_name="Mike Johnson1" status="Removed"]
def test_checkpoint_syslog_Endpoint_Compliance(
record_property, setup_wordlist, setup_splunk, setup_sc4s
):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ iso }} {{ host }} CheckPoint 26203 - [sc4s@2620 flags=\"131072\" ifdir=\"inbound\" loguid=\"{0x60215107,0x169a,0xd10617ac,0x4468886}\" origin=\"10.1.46.86\" sequencenum=\"4138\" time=\"{{ epoch }}\" version=\"5\" calc_geo_location=\"calc_geo_location0\" client_name=\"SandBlast Mobile Protect\" client_version=\"2.72.8.3943\" dashboard_orig=\"dashboard_orig0\" device_identification=\"4624\" email_address=\"email_address44\" hardware_model=\"iPhone / iPhone 8\" host_type=\"Mobile\" incident_time=\"2018-06-03T17:33:09Z\" jailbreak_message=\"False\" mdm_id=\"E726405B-4BCF-46C6-8D1B-6F1A71E67D5D\" os_name=\"IPhone\" os_version=\"11.3.1\" phone_number=\"phone_number24\" product=\"Endpoint Compliance\" protection_type=\"Global proxy\" severity=\"0\" src_user_name=\"Mike Johnson1\" status=\"Removed\"]"
)
message = mt.render(mark="<134>1", host=host, bsd=bsd, iso=iso, epoch=epoch)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netops host="{{ host }}" sourcetype="cp_log:syslog" source="checkpoint:endpoint"'
)
search = st.render(
epoch=epoch, bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset
)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
#Test Mobile Access
#<134>1 2021-02-08T14:50:06Z r81-t279-leui-main-take-2 CheckPoint 2182 - [sc4s@2620 flags="131072" ifdir="inbound" loguid="{0x60215106,0xb,0xd10617ac,0x4468886}" origin="10.2.46.86" sequencenum="12" time="1612795806" version="5" app_repackaged="False" app_sig_id="3343cf41cb8736ad452453276b4f7c806ab83143eca0b3ad1e1bc6045e37f6a9" app_version="3.1.15" appi_name="iPGMail" calc_geo_location="calc_geo_location0" client_name="SandBlast Mobile Protect" client_version="2.73.0.3968" dashboard_orig="dashboard_orig0" device_identification="4768" email_address="email_address0" hardware_model="iPhone / iPhone 5S" host_type="Mobile" incident_time="2018-06-04T00:03:41Z" jailbreak_message="False" mdm_id="F2FCB053-5C28-4917-9FED-4821349B86A5" os_name="IPhone" os_version="11.4" phone_number="phone_number0" product="Mobile Access" protection_type="Backup Tool" severity="0" src_user_name="Allen Newsom" status="Installed"
def test_checkpoint_syslog_Mobile_Access(
record_property, setup_wordlist, setup_splunk, setup_sc4s
):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }} {{ iso }} {{ host }} CheckPoint 26203 - [sc4s@2620 flags=\"131072\" ifdir=\"inbound\" loguid=\"{0x60215106,0xb,0xd10617ac,0x4468886}\" origin=\"10.2.46.86\" sequencenum=\"12\" time=\"{{ epoch }}\" version=\"5\" app_repackaged=\"False\" app_sig_id=\"3343cf41cb8736ad452453276b4f7c806ab83143eca0b3ad1e1bc6045e37f6a9\" app_version=\"3.1.15\" appi_name=\"iPGMail\" calc_geo_location=\"calc_geo_location0\" client_name=\"SandBlast Mobile Protect\" client_version=\"2.73.0.3968\" dashboard_orig=\"dashboard_orig0\" device_identification=\"4768\" email_address=\"email_address0\" hardware_model=\"iPhone / iPhone 5S\" host_type=\"Mobile\" incident_time=\"2018-06-04T00:03:41Z\" jailbreak_message=\"False\" mdm_id=\"F2FCB053-5C28-4917-9FED-4821349B86A5\" os_name=\"IPhone\" os_version=\"11.4\" phone_number=\"phone_number0\" product=\"Mobile Access\" protection_type=\"Backup Tool\" severity=\"0\" src_user_name=\"Allen Newsom\" status=\"Installed\"]"
)
message = mt.render(mark="<134>1", host=host, bsd=bsd, iso=iso, epoch=epoch)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netops host="{{ host }}" sourcetype="cp_log:syslog" source="checkpoint:network"'
)
search = st.render(
epoch=epoch, bsd=bsd, host=host, date=date, time=time, tzoffset=tzoffset
)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
| 81.989865
| 1,755
| 0.729202
| 3,290
| 24,269
| 5.213678
| 0.149848
| 0.026118
| 0.006529
| 0.02332
| 0.955168
| 0.952486
| 0.952486
| 0.952486
| 0.94724
| 0.94724
| 0
| 0.144271
| 0.100622
| 24,269
| 296
| 1,756
| 81.989865
| 0.641591
| 0.332193
| 0
| 0.731579
| 0
| 0.042105
| 0.292388
| 0.041955
| 0
| 0
| 0.022306
| 0
| 0.042105
| 1
| 0.042105
| false
| 0.005263
| 0.026316
| 0
| 0.068421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4a8cd0392044598c8790c300ef0c6685d1117089
| 197
|
py
|
Python
|
cryptoxlib/clients/binance/functions.py
|
Belugary/cryptoxlib-aio
|
5eb9a997d1be24bfdb92164086894b657c22ea2a
|
[
"MIT"
] | null | null | null |
cryptoxlib/clients/binance/functions.py
|
Belugary/cryptoxlib-aio
|
5eb9a997d1be24bfdb92164086894b657c22ea2a
|
[
"MIT"
] | null | null | null |
cryptoxlib/clients/binance/functions.py
|
Belugary/cryptoxlib-aio
|
5eb9a997d1be24bfdb92164086894b657c22ea2a
|
[
"MIT"
] | null | null | null |
from cryptoxlib.Pair import Pair
def map_pair(pair: Pair) -> str:
return f"{pair.base}{pair.quote}"
def map_ws_pair(pair: Pair) -> str:
return f"{pair.base.lower()}{pair.quote.lower()}"
| 21.888889
| 53
| 0.675127
| 32
| 197
| 4.0625
| 0.40625
| 0.246154
| 0.184615
| 0.230769
| 0.461538
| 0.461538
| 0.461538
| 0.461538
| 0
| 0
| 0
| 0
| 0.147208
| 197
| 9
| 53
| 21.888889
| 0.77381
| 0
| 0
| 0
| 0
| 0
| 0.313131
| 0.313131
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
4abcf6e4522570d12b4b67cef7289071769aba82
| 1,155
|
py
|
Python
|
tests/test_site_parser/test_is_month_year_in_future.py
|
PavliukKonstantin/wallpaper-downloader
|
104b5ca1bfdb26b9132f7619406d4e756eb3654c
|
[
"MIT"
] | null | null | null |
tests/test_site_parser/test_is_month_year_in_future.py
|
PavliukKonstantin/wallpaper-downloader
|
104b5ca1bfdb26b9132f7619406d4e756eb3654c
|
[
"MIT"
] | null | null | null |
tests/test_site_parser/test_is_month_year_in_future.py
|
PavliukKonstantin/wallpaper-downloader
|
104b5ca1bfdb26b9132f7619406d4e756eb3654c
|
[
"MIT"
] | null | null | null |
from wallpaper_downloader import site_parser
def test_with_month_year_in_future(get_page_html_from_file):
"""
Test '_is_month_year_in_future' function of site_parser module.
Function is tested with the month and year more than the newest
month and year in the HTML.
Args:
get_page_html_from_file (Fixture): fixture that return HTML.
"""
page_html = get_page_html_from_file("first_main_page.html")
month_year_in_future = site_parser.is_month_year_in_future(
page_html,
"september",
"2020",
)
assert month_year_in_future is True
def test_with_month_year_in_past(get_page_html_from_file):
"""
Test '_is_month_year_in_future' function of site_parser module.
Function is tested with the month and year less than the newest
month and year in the HTML.
Args:
get_page_html_from_file (Fixture): fixture that return HTML.
"""
page_html = get_page_html_from_file("first_main_page.html")
month_year_in_future = site_parser.is_month_year_in_future(
page_html,
"july",
"2020",
)
assert month_year_in_future is False
| 28.875
| 68
| 0.719481
| 176
| 1,155
| 4.278409
| 0.232955
| 0.095618
| 0.146082
| 0.203187
| 0.895086
| 0.895086
| 0.836653
| 0.759628
| 0.759628
| 0.759628
| 0
| 0.008879
| 0.219913
| 1,155
| 39
| 69
| 29.615385
| 0.826859
| 0.395671
| 0
| 0.470588
| 0
| 0
| 0.096063
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
434b572f8482f2442f2ba37325f0bc94b3faf01d
| 131
|
py
|
Python
|
test/test_config.py
|
cnut1648/torcherist
|
2e1f7a878814126d6faff81e7214ea35ae1d0902
|
[
"MIT"
] | null | null | null |
test/test_config.py
|
cnut1648/torcherist
|
2e1f7a878814126d6faff81e7214ea35ae1d0902
|
[
"MIT"
] | null | null | null |
test/test_config.py
|
cnut1648/torcherist
|
2e1f7a878814126d6faff81e7214ea35ae1d0902
|
[
"MIT"
] | null | null | null |
from torcherist.config import dataset_dir
from pathlib import Path
def test_dataset_dir():
assert Path(dataset_dir).exists()
| 18.714286
| 41
| 0.793893
| 19
| 131
| 5.263158
| 0.631579
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137405
| 131
| 6
| 42
| 21.833333
| 0.884956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
436446a3b6cb24406ea7fd04ec72be98be8b2502
| 6,503
|
py
|
Python
|
DemoTestUnit/tests/__init__.py
|
appukuttan-shailesh/DemoTestUnit
|
bd0430fdb730ad18492f3cd32b1af039fa9fe093
|
[
"BSD-3-Clause"
] | 1
|
2021-05-05T15:44:13.000Z
|
2021-05-05T15:44:13.000Z
|
DemoTestUnit/tests/__init__.py
|
appukuttan-shailesh/DemoTestUnit
|
bd0430fdb730ad18492f3cd32b1af039fa9fe093
|
[
"BSD-3-Clause"
] | null | null | null |
DemoTestUnit/tests/__init__.py
|
appukuttan-shailesh/DemoTestUnit
|
bd0430fdb730ad18492f3cd32b1af039fa9fe093
|
[
"BSD-3-Clause"
] | null | null | null |
import sciunit
import efel
import numpy
import os
import json
import matplotlib
# To avoid figures being plotted on screen (we wish to save to file directly)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import DemoTestUnit.capabilities as cap
# ===============================================================================
class RestingPotential(sciunit.Test):
"""Test the cell's resting membrane potential"""
score_type = sciunit.scores.ZScore
description = ("Test the cell's resting membrane potential")
def __init__(self,
observation={'mean':None, 'std':None},
name="Resting Membrane Potential Test"):
self.required_capabilities += (cap.SomaProducesMembranePotential,)
sciunit.Test.__init__(self, observation, name)
def validate_observation(self, observation):
try:
assert len(observation.keys()) == 2
for key, val in observation.items():
assert key in ["mean", "std"]
assert (isinstance(val, int) or isinstance(val, float))
except Exception:
raise sciunit.errors.ObservationError(
("Observation must return a dictionary of the form:"
"{'mean': NUM1, 'std': NUM2}"))
def generate_prediction(self, model):
self.trace = model.get_soma_membrane_potential(tstop=50.0)
prediction = numpy.mean(self.trace['V'])
return prediction
def compute_score(self, observation, prediction):
score = sciunit.scores.ZScore.compute(observation, prediction)
return score
def bind_score(self, score, model, observation, prediction):
self.figures = []
self.target_dir = os.path.join("./validation_results", self.name, model.name)
if not os.path.exists(self.target_dir):
os.makedirs(self.target_dir)
# create relevant output files
# 1. JSON data: observation, prediction, score, run_times
validation_data = {
"observation": observation,
"prediction": prediction,
"score": score.score,
}
with open(os.path.join(self.target_dir, 'basic_data.json'), 'w') as f:
json.dump(validation_data, f, indent=4)
self.figures.append(os.path.join(self.target_dir, 'basic_data.json'))
# 2. JSON data: save Vm vs t traces
with open(os.path.join(self.target_dir, 'trace_data.json'), 'w') as f:
json.dump(self.trace, f, indent=4)
self.figures.append(os.path.join(self.target_dir, 'trace_data.json'))
# 3. Vm traces as PDF
fig = plt.figure()
plt.plot(self.trace["T"], self.trace["V"])
plt.title("Somatic Vm vs t")
plt.xlabel("Time (ms)")
plt.ylabel("Membrane potential (mV)")
plt.show()
fig.savefig(os.path.join(self.target_dir, "trace_plot.pdf"))
self.figures.append(os.path.join(self.target_dir, "trace_plot.pdf"))
score.related_data["figures"] = self.figures
return score
# ===============================================================================
class InputResistance(sciunit.Test):
"""Test the cell's input resistance"""
score_type = sciunit.scores.ZScore
description = ("Test the cell's input resistance")
def __init__(self,
observation={'mean':None, 'std':None},
name="Input Resistance Test"):
self.required_capabilities += (cap.SomaReceivesStepCurrent, cap.SomaProducesMembranePotential,)
sciunit.Test.__init__(self, observation, name)
def validate_observation(self, observation):
try:
assert len(observation.keys()) == 2
for key, val in observation.items():
assert key in ["mean", "std"]
assert (isinstance(val, int) or isinstance(val, float))
except Exception:
raise sciunit.errors.ObservationError(
("Observation must return a dictionary of the form:"
"{'mean': NUM1, 'std': NUM2}"))
def generate_prediction(self, model):
efel.reset()
model.inject_soma_square_current(current={'delay':20.0,
'duration':50.0,
'amplitude':-5.0})
self.trace = model.get_soma_membrane_potential_eFEL_format(tstop=100.0,
start=20.0,
stop =70.0)
efel.setDoubleSetting('stimulus_current', -5.0)
prediction = efel.getFeatureValues([self.trace], ['ohmic_input_resistance_vb_ssse'])[0]["ohmic_input_resistance_vb_ssse"][0]
return prediction
def compute_score(self, observation, prediction):
score = sciunit.scores.ZScore.compute(observation, prediction)
return score
def bind_score(self, score, model, observation, prediction):
self.figures = []
self.target_dir = os.path.join("./validation_results", self.name, model.name)
if not os.path.exists(self.target_dir):
os.makedirs(self.target_dir)
# create relevant output files
# 1. JSON data: observation, prediction, score, run_times
validation_data = {
"observation": observation,
"prediction": prediction,
"score": score.score,
}
with open(os.path.join(self.target_dir, 'basic_data.json'), 'w') as f:
json.dump(validation_data, f, indent=4)
self.figures.append(os.path.join(self.target_dir, 'basic_data.json'))
# 2. JSON data: save Vm vs t traces
with open(os.path.join(self.target_dir, 'trace_data.json'), 'w') as f:
json.dump(self.trace, f, indent=4)
self.figures.append(os.path.join(self.target_dir, 'trace_data.json'))
# 3. Vm traces as PDF
fig = plt.figure()
plt.plot(self.trace["T"], self.trace["V"])
plt.title("Somatic Vm vs t")
plt.xlabel("Time (ms)")
plt.ylabel("Membrane potential (mV)")
plt.show()
fig.savefig(os.path.join(self.target_dir, "trace_plot.pdf"))
self.figures.append(os.path.join(self.target_dir, "trace_plot.pdf"))
score.related_data["figures"] = self.figures
return score
# ===============================================================================
| 41.158228
| 132
| 0.578964
| 736
| 6,503
| 4.995924
| 0.21875
| 0.048953
| 0.063639
| 0.045689
| 0.855861
| 0.839815
| 0.807724
| 0.770737
| 0.770737
| 0.748436
| 0
| 0.008639
| 0.270183
| 6,503
| 157
| 133
| 41.420382
| 0.766119
| 0.103029
| 0
| 0.745763
| 0
| 0
| 0.136293
| 0.010325
| 0
| 0
| 0
| 0
| 0.050847
| 1
| 0.084746
| false
| 0
| 0.067797
| 0
| 0.254237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43719e6fa7f86469e2b1b30bad9d6c30894edf10
| 242
|
py
|
Python
|
locale/pot/api/plotting/_autosummary/pyvista-Renderer-view_isometric-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 4
|
2020-08-07T08:19:19.000Z
|
2020-12-04T09:51:11.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-Plotter-view_isometric-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 19
|
2020-08-06T00:24:30.000Z
|
2022-03-30T19:22:24.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-Renderer-view_isometric-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 1
|
2021-03-09T07:50:40.000Z
|
2021-03-09T07:50:40.000Z
|
# Isometric view.
#
from pyvista import demos
pl = demos.orientation_plotter()
pl.view_isometric()
pl.show()
#
# Negative isometric view.
#
from pyvista import demos
pl = demos.orientation_plotter()
pl.view_isometric(negative=True)
pl.show()
| 17.285714
| 32
| 0.768595
| 33
| 242
| 5.515152
| 0.363636
| 0.142857
| 0.186813
| 0.263736
| 0.824176
| 0.824176
| 0.824176
| 0.824176
| 0.824176
| 0.824176
| 0
| 0
| 0.115702
| 242
| 13
| 33
| 18.615385
| 0.850467
| 0.165289
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
43d76fe6d9fe09270d06f396178f20835aa7311a
| 3,084
|
py
|
Python
|
test/test_wierd_molecule.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_wierd_molecule.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_wierd_molecule.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
text = """
-- StrEd --
26 28 0 0 0 0 0 0 0 0999 V2000
-3.7710 3.0165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
6.0165 0.1960 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
3.5750 -4.1335 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
8.4665 -12.6040 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
6.0165 3.0165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
13.4540 -1.3165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1.1290 0.1960 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
8.4665 -1.3165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1.1290 3.0165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
10.9165 0.1960 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
6.0165 -5.5500 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1.1290 -8.3750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
8.4665 4.4165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
10.9165 3.0165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-3.7710 0.1960 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
8.4665 -9.7875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
3.5750 -9.7875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.3210 4.4165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.3210 -1.3165 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
6.0165 -8.3750 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
12.0415 -3.8540 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
16.0000 -2.8165 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
14.9625 1.2250 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0
1.1290 -5.5500 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
3.5750 4.4165 0.0000 S 0 0 0 0 0 0 0 0 0 0 0 0
3.5750 -1.3165 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
2 26 1 0 0 0 0
3 26 1 0 0 0 0
25 9 1 0 0 0 0
5 2 2 0 0 0 0
6 10 1 0 0 0 0
7 26 1 0 0 0 0
8 2 1 0 0 0 0
9 7 2 0 0 0 0
10 8 2 0 0 0 0
11 3 1 0 0 0 0
24 3 2 0 0 0 0
13 5 1 0 0 0 0
14 10 1 0 0 0 0
23 6 1 0 0 0 0
22 6 1 0 0 0 0
21 6 1 0 0 0 0
20 11 1 0 0 0 0
19 7 1 0 0 0 0
18 9 1 0 0 0 0
17 20 1 0 0 0 0
16 20 1 0 0 0 0
15 19 2 0 0 0 0
12 17 1 0 0 0 0
4 16 1 0 0 0 0
1 15 1 0 0 0 0
25 5 1 0 0 0 0
1 18 2 0 0 0 0
13 14 2 0 0 0 0
M END
> <IDNUMBER> (ST000063)
ST000063
> <SALTDATA> (ST000063)
HCL
> <LogP> (ST000063)
4.2819
> <Solubility> (ST000063)
0.87574
> <SUPPLIER> (ST000063)
TimTec
> <NATURAL> (ST000063)
SEMI-NATURAL
$$$$
"""
import StringIO
from frowns.MDL import mdlin
from frowns.Utils import SaturatedRings, StereoFinder
last = None
for i in range(1000):
file = StringIO.StringIO(text)
reader = mdlin(file)
m = reader.next()
saturation = SaturatedRings.getSaturationScore(m)
if last is not None:
assert last == saturation
last = saturation
| 31.469388
| 69
| 0.446822
| 770
| 3,084
| 1.78961
| 0.127273
| 0.545718
| 0.698839
| 0.772134
| 0.604499
| 0.554427
| 0.466618
| 0.461538
| 0.461538
| 0.461538
| 0
| 0.654393
| 0.498054
| 3,084
| 97
| 70
| 31.793814
| 0.235788
| 0
| 0
| 0.02381
| 0
| 0.309524
| 0.877432
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.035714
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
78f6592fd4a9e26bf4935ccf2f2944421dc07281
| 21,090
|
py
|
Python
|
polyaxon_schemas/ml/layers/pooling.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
polyaxon_schemas/ml/layers/pooling.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
polyaxon_schemas/ml/layers/pooling.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields, validate
from polyaxon_schemas.fields import ObjectOrListObject
from polyaxon_schemas.ml.layers.base import BaseLayerConfig, BaseLayerSchema
class MaxPooling1DSchema(BaseLayerSchema):
pool_size = fields.Int(default=2, missing=2, allow_none=True)
strides = fields.Int(default=None, missing=None)
padding = fields.Str(default='valid', missing='valid',
validate=validate.OneOf(['same', 'valid']))
@staticmethod
def schema_config():
return MaxPooling1DConfig
class MaxPooling1DConfig(BaseLayerConfig):
"""Max pooling operation for temporal data.
Args:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, downsampled_steps, features)`.
Polyaxonfile usage:
```yaml
MaxPooling1D:
pool_size: 2
```
"""
IDENTIFIER = 'MaxPooling1D'
SCHEMA = MaxPooling1DSchema
def __init__(self, pool_size=2, strides=None, padding='valid', **kwargs):
super(MaxPooling1DConfig, self).__init__(**kwargs)
self.pool_size = pool_size
self.strides = strides
self.padding = padding
class AveragePooling1DSchema(BaseLayerSchema):
pool_size = fields.Int(default=2, missing=2, allow_none=True)
strides = fields.Int(default=None, missing=None)
padding = fields.Str(default='valid', missing='valid',
validate=validate.OneOf(['same', 'valid']))
@staticmethod
def schema_config():
return AveragePooling1DConfig
class AveragePooling1DConfig(BaseLayerConfig):
"""Average pooling for temporal data.
Args:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, downsampled_steps, features)`.
Polyaxonfile usage:
```yaml
AveragePooling1D:
pool_size: 2
```
"""
IDENTIFIER = 'AveragePooling1D'
SCHEMA = AveragePooling1DSchema
def __init__(self, pool_size=2, strides=None, padding='valid', **kwargs):
super(AveragePooling1DConfig, self).__init__(**kwargs)
self.pool_size = pool_size
self.strides = strides
self.padding = padding
class MaxPooling2DSchema(BaseLayerSchema):
pool_size = ObjectOrListObject(fields.Int, min=2, max=2, default=(2, 2), missing=(2, 2))
strides = ObjectOrListObject(fields.Int, min=2, max=2, default=None, missing=None)
padding = fields.Str(default='valid', missing='valid',
validate=validate.OneOf(['same', 'valid']))
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
@staticmethod
def schema_config():
return MaxPooling2DConfig
class MaxPooling2DConfig(BaseLayerConfig):
"""Max pooling operation for spatial data.
Args:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
Polyaxonfile usage:
```yaml
MaxPooling2D:
pool_size: [2, 2]
```
"""
IDENTIFIER = 'MaxPooling2D'
SCHEMA = MaxPooling2DSchema
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs):
super(MaxPooling2DConfig, self).__init__(**kwargs)
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.data_format = data_format
class AveragePooling2DSchema(BaseLayerSchema):
pool_size = ObjectOrListObject(fields.Int, min=2, max=2, default=(2, 2), missing=(2, 2))
strides = ObjectOrListObject(fields.Int, min=2, max=2, default=None, missing=None)
padding = fields.Str(default='valid', missing='valid',
validate=validate.OneOf(['same', 'valid']))
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
@staticmethod
def schema_config():
return AveragePooling2DConfig
class AveragePooling2DConfig(BaseLayerConfig):
"""Average pooling operation for spatial data.
Args:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
Polyaxonfile usage:
```yaml
AveragePooling2D:
pool_size: [2, 2]
```
"""
IDENTIFIER = 'AveragePooling2D'
SCHEMA = AveragePooling2DSchema
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs):
super(AveragePooling2DConfig, self).__init__(**kwargs)
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.data_format = data_format
class MaxPooling3DSchema(BaseLayerSchema):
pool_size = ObjectOrListObject(fields.Int, min=3, max=3, default=(2, 2, 2), missing=(2, 2, 2))
strides = ObjectOrListObject(fields.Int, min=3, max=3, default=None, missing=None)
padding = fields.Str(default='valid', missing='valid',
validate=validate.OneOf(['same', 'valid']))
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
@staticmethod
def schema_config():
return MaxPooling3DConfig
class MaxPooling3DConfig(BaseLayerConfig):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Args:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Polyaxonfile usage:
```yaml
MaxPooling3D:
pool_size: [2, 2, 2]
```
"""
IDENTIFIER = 'MaxPooling3D'
SCHEMA = MaxPooling3DSchema
def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None,
**kwargs):
super(MaxPooling3DConfig, self).__init__(**kwargs)
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.data_format = data_format
class AveragePooling3DSchema(BaseLayerSchema):
pool_size = ObjectOrListObject(fields.Int, min=3, max=3, default=(2, 2, 2), missing=(2, 2, 2))
strides = ObjectOrListObject(fields.Int, min=3, max=3, default=None, missing=None)
padding = fields.Str(default='valid', missing='valid',
validate=validate.OneOf(['same', 'valid']))
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
@staticmethod
def schema_config():
return AveragePooling3DConfig
class AveragePooling3DConfig(BaseLayerConfig):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Args:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Polyaxonfile usage:
```yaml
AveragePooling3D:
pool_size: [2, 2, 2]
```
"""
IDENTIFIER = 'AveragePooling3D'
SCHEMA = AveragePooling3DSchema
def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None,
**kwargs):
super(AveragePooling3DConfig, self).__init__(**kwargs)
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.data_format = data_format
class GlobalAveragePooling1DSchema(BaseLayerSchema):
@staticmethod
def schema_config():
return GlobalAveragePooling1DConfig
class GlobalAveragePooling1DConfig(BaseLayerConfig):
"""Global average pooling operation for temporal data.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
2D tensor with shape:
`(batch_size, channels)`
Polyaxonfile usage:
```yaml
GlobalAveragePooling1D:
```
"""
IDENTIFIER = 'GlobalAveragePooling1D'
SCHEMA = GlobalAveragePooling1DSchema
class GlobalMaxPooling1DSchema(BaseLayerSchema):
@staticmethod
def schema_config():
return GlobalMaxPooling1DConfig
class GlobalMaxPooling1DConfig(BaseLayerConfig):
"""Global max pooling operation for temporal data.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
2D tensor with shape:
`(batch_size, channels)`
Polyaxonfile usage:
```yaml
GlobalMaxPooling1D:
```
"""
IDENTIFIER = 'GlobalMaxPooling1D'
SCHEMA = GlobalMaxPooling1DSchema
class GlobalAveragePooling2DSchema(BaseLayerSchema):
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
@staticmethod
def schema_config():
return GlobalAveragePooling2DConfig
class GlobalAveragePooling2DConfig(BaseLayerConfig):
"""Global average pooling operation for spatial data.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
Polyaxonfile usage:
```yaml
GlobalAveragePooling2D:
```
"""
IDENTIFIER = 'GlobalAveragePooling2D'
SCHEMA = GlobalAveragePooling2DSchema
def __init__(self, data_format=None, **kwargs):
super(GlobalAveragePooling2DConfig, self).__init__(**kwargs)
self.data_format = data_format
class GlobalMaxPooling2DSchema(BaseLayerSchema):
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
@staticmethod
def schema_config():
return GlobalMaxPooling2DConfig
class GlobalMaxPooling2DConfig(BaseLayerConfig):
"""Global max pooling operation for spatial data.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
Polyaxonfile usage:
```yaml
GlobalMaxPooling2D:
```
"""
IDENTIFIER = 'GlobalMaxPooling2D'
SCHEMA = GlobalMaxPooling2DSchema
def __init__(self, data_format=None, **kwargs):
super(GlobalMaxPooling2DConfig, self).__init__(**kwargs)
self.data_format = data_format
class GlobalAveragePooling3DSchema(BaseLayerSchema):
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
@staticmethod
def schema_config():
return GlobalAveragePooling3DConfig
class GlobalAveragePooling3DConfig(BaseLayerConfig):
"""Global Average pooling operation for 3D data.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
Polyaxonfile usage:
```yaml
GlobalAveragePooling3D:
```
"""
IDENTIFIER = 'GlobalAveragePooling3D'
SCHEMA = GlobalAveragePooling3DSchema
def __init__(self, data_format=None, **kwargs):
super(GlobalAveragePooling3DConfig, self).__init__(**kwargs)
self.data_format = data_format
class GlobalMaxPooling3DSchema(BaseLayerSchema):
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
@staticmethod
def schema_config():
return GlobalMaxPooling3DConfig
class GlobalMaxPooling3DConfig(BaseLayerConfig):
"""Global Max pooling operation for 3D data.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
Polyaxonfile usage:
```yaml
GlobalMaxPooling3D:
```
"""
IDENTIFIER = 'GlobalMaxPooling3D'
SCHEMA = GlobalMaxPooling3DSchema
def __init__(self, data_format=None, **kwargs):
super(GlobalMaxPooling3DConfig, self).__init__(**kwargs)
self.data_format = data_format
| 34.57377
| 100
| 0.640161
| 2,300
| 21,090
| 5.699565
| 0.074348
| 0.048821
| 0.055534
| 0.054924
| 0.830498
| 0.821726
| 0.797849
| 0.796323
| 0.78244
| 0.772599
| 0
| 0.017703
| 0.263442
| 21,090
| 609
| 101
| 34.630542
| 0.826188
| 0.531721
| 0
| 0.579545
| 0
| 0
| 0.065316
| 0.007643
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.022727
| 0.068182
| 0.636364
| 0.005682
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
6017149597a57bba43fa126d9072ae42e915475a
| 113
|
py
|
Python
|
stests/generators/wg_110/__main__.py
|
goral09/stests
|
4de26485535cadf1b708188a7133a976536ccba3
|
[
"Apache-2.0"
] | 4
|
2020-03-10T15:28:17.000Z
|
2021-10-02T11:41:17.000Z
|
stests/generators/wg_110/__main__.py
|
goral09/stests
|
4de26485535cadf1b708188a7133a976536ccba3
|
[
"Apache-2.0"
] | 1
|
2020-03-25T11:31:44.000Z
|
2020-03-25T11:31:44.000Z
|
stests/generators/wg_110/__main__.py
|
goral09/stests
|
4de26485535cadf1b708188a7133a976536ccba3
|
[
"Apache-2.0"
] | 9
|
2020-02-25T18:43:42.000Z
|
2021-08-10T17:08:42.000Z
|
from stests.generators import launcher
from stests.generators.wg_110 import meta
launcher.start_generator(meta)
| 22.6
| 41
| 0.858407
| 16
| 113
| 5.9375
| 0.625
| 0.210526
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029126
| 0.088496
| 113
| 4
| 42
| 28.25
| 0.893204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
601d388c16db649508774b8e5773da3981815e8b
| 812
|
py
|
Python
|
NoiseGenerator/INoise.py
|
johnsbuck/MapGeneration
|
0022442995772bc2ec56210a3d6465f7d766ad4d
|
[
"MIT"
] | 5
|
2019-06-02T22:52:26.000Z
|
2019-07-18T22:51:19.000Z
|
NoiseGenerator/INoise.py
|
johnsbuck/MapGeneration
|
0022442995772bc2ec56210a3d6465f7d766ad4d
|
[
"MIT"
] | 1
|
2022-02-22T17:48:41.000Z
|
2022-02-22T17:48:41.000Z
|
NoiseGenerator/INoise.py
|
johnsbuck/MapGeneration
|
0022442995772bc2ec56210a3d6465f7d766ad4d
|
[
"MIT"
] | 1
|
2019-06-03T07:55:10.000Z
|
2019-06-03T07:55:10.000Z
|
from abc import ABCMeta, abstractmethod
class INoise(metaclass=ABCMeta):
def __init__(self):
raise NotImplementedError("This object is an interface that has no implementation.")
@property
@abstractmethod
def NOISE_LIST(self):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise1d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise2d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
@abstractmethod
def noise3d(self, point, frequency):
raise NotImplementedError("This object is an interface that has no implementation.")
| 32.48
| 92
| 0.727833
| 91
| 812
| 6.43956
| 0.32967
| 0.204778
| 0.238908
| 0.290102
| 0.790102
| 0.790102
| 0.790102
| 0.790102
| 0.790102
| 0.790102
| 0
| 0.004637
| 0.203202
| 812
| 24
| 93
| 33.833333
| 0.901082
| 0
| 0
| 0.529412
| 0
| 0
| 0.33867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0
| 0.058824
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
602a107a4a64b9f18529140899aa6fbbf3740561
| 26,315
|
py
|
Python
|
test/test_blackboard.py
|
ryanstwrt/multi_agent_blackboard_system
|
b8f6ab71dfe0742a6f690de19b97d10504fc1768
|
[
"MIT"
] | 1
|
2021-08-02T10:29:35.000Z
|
2021-08-02T10:29:35.000Z
|
test/test_blackboard.py
|
ryanstwrt/multi_agent_blackboard_system
|
b8f6ab71dfe0742a6f690de19b97d10504fc1768
|
[
"MIT"
] | 10
|
2020-03-14T07:39:34.000Z
|
2021-11-03T22:55:28.000Z
|
test/test_blackboard.py
|
ryanstwrt/multi_agent_blackboard_system
|
b8f6ab71dfe0742a6f690de19b97d10504fc1768
|
[
"MIT"
] | 1
|
2021-07-18T14:43:10.000Z
|
2021-07-18T14:43:10.000Z
|
import osbrain
from osbrain import run_nameserver
from osbrain import run_agent
import numpy as np
import mabs.bb.blackboard as blackboard
import mabs.ka.base as ka
import time
import os
import h5py
from collections.abc import Iterable
def test_blackboard_init_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
assert bb.get_attr('agent_addrs') == {}
assert bb.get_attr('_agent_writing') == False
assert bb.get_attr('_new_entry') == False
assert bb.get_attr('archive_name') == 'blackboard_archive.h5'
assert bb.get_attr('_sleep_limit') == 10
assert bb.get_attr('abstract_lvls') == {}
assert bb.get_attr('abstract_lvls_format') == {}
assert bb.get_attr('_ka_to_execute') == (None, 0)
assert bb.get_attr('_trigger_event') == 0
assert bb.get_attr('_kaar') == {}
assert bb.get_attr('_pub_trigger_alias') == 'trigger'
ns.shutdown()
time.sleep(0.05)
def test_add_abstract_lvl():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
assert bb.get_attr('abstract_lvls') == {}
assert bb.get_attr('abstract_lvls_format') == {}
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
assert bb.get_attr('abstract_lvls') == {'level 1': {}}
assert bb.get_attr('abstract_lvls_format') == {'level 1': {'entry 1': str, 'entry 2': bool, 'entry 3': int}}
ns.shutdown()
time.sleep(0.05)
def test_add_panel():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': int})
assert bb.get_attr('abstract_lvls') == {'level 1': {}}
bb.add_panel(1, ['panel_a', 'panel_b', 'panel_c'])
assert bb.get_attr('abstract_lvls') == {'level 1': {'panel_a': {},'panel_b': {},'panel_c': {}}}
assert bb.get_attr('abstract_lvls_format') == {'level 1': {'panel_a': {'entry 1': str, 'entry 2': int},
'panel_b': {'entry 1': str, 'entry 2': int},
'panel_c': {'entry 1': str, 'entry 2': int}}}
bb.update_abstract_lvl(1, 'test_name', {'entry 1': 'foo', 'entry 2': 5})
assert bb.get_attr('abstract_lvls') == {'level 1': {'panel_a': {},'panel_b': {},'panel_c': {}}}
bb.update_abstract_lvl(1, 'test_name', {'entry 1': 'foo', 'entry 2': 5}, panel='panel_a')
assert bb.get_attr('abstract_lvls') == {'level 1': {'panel_a': {'test_name': {'entry 1': 'foo', 'entry 2': 5}},'panel_b': {},'panel_c': {}}}
ns.shutdown()
time.sleep(0.05)
def test_connect_executor():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.set_attr(agent_addrs={'test':{}})
bb.connect_executor('test')
assert bb.get_attr('agent_addrs')['test']['executor'][0] == 'executor_test'
ns.shutdown()
time.sleep(0.05)
def test_connect_executor_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_base = run_agent(name='ka_b', base=ka.KaBase)
ka_base.add_blackboard(bb)
ka_base.connect_executor()
assert bb.get_attr('agent_addrs')['ka_b']['executor'][0] == 'executor_ka_b'
ns.shutdown()
time.sleep(0.05)
def test_connect_trigger_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_base = run_agent(name='ka_b', base=ka.KaBase)
ka_base.add_blackboard(bb)
ka_base.connect_trigger()
assert bb.get_attr('agent_addrs')['ka_b']['trigger_response'][0] == 'trigger_response_ka_b'
ns.shutdown()
time.sleep(0.05)
def test_connect_shutdown():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.set_attr(agent_addrs={'test':{}})
bb.connect_shutdown('test')
assert bb.get_attr('agent_addrs')['test']['shutdown'][0] == 'shutdown_test'
ns.shutdown()
time.sleep(0.05)
def test_connect_shutdown_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_b = run_agent(name='ka', base=ka.KaBase)
ka_b.add_blackboard(bb)
ka_b.connect_shutdown()
assert bb.get_attr('agent_addrs')['ka']['shutdown'][0] == 'shutdown_ka'
ns.shutdown()
time.sleep(0.05)
def test_connect_writer():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.set_attr(agent_addrs={'test':{}})
bb.connect_writer('test')
assert bb.get_attr('agent_addrs')['test']['writer'][0] == 'writer_test'
ns.shutdown()
time.sleep(0.05)
def test_connect_writer_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_b = run_agent(name='ka', base=ka.KaBase)
ka_b1 = run_agent(name='ka_b1', base=ka.KaBase)
bb.set_attr(agent_addrs={'ka':{}, 'ka_b1': {}})
ka_b.add_blackboard(bb)
ka_b.connect_writer()
ka_b1.add_blackboard(bb)
ka_b1.connect_writer()
assert bb.get_attr('agent_addrs')['ka']['writer'][0] == 'writer_ka'
assert bb.get_attr('agent_addrs')['ka_b1']['writer'][0] == 'writer_ka_b1'
ns.shutdown()
time.sleep(0.05)
def test_connect_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.connect_agent(ka.KaBase, 'base')
agents = bb.get_attr('agent_addrs')
assert [x for x in agents.keys()] == ['base']
assert ns.agents() == ['blackboard', 'base']
base = ns.proxy('base')
assert bb.get_attr('agent_addrs')['base']['executor'] == (base.get_attr('_executor_alias'), base.get_attr('_executor_addr'))
assert bb.get_attr('agent_addrs')['base']['trigger_response'] == (base.get_attr('_trigger_response_alias'), base.get_attr('_trigger_response_addr'))
assert bb.get_attr('agent_addrs')['base']['shutdown'] == (base.get_attr('_shutdown_alias'), base.get_attr('_shutdown_addr'))
assert bb.get_attr('agent_addrs')['base']['writer'] == (base.get_attr('_writer_alias'), base.get_attr('_writer_addr'))
ns.shutdown()
time.sleep(0.05)
def test_controller():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_b = run_agent(name='ka_b', base=ka.KaBase)
ka_b1 = run_agent(name='ka_b1', base=ka.KaBase)
bb.set_attr(_kaar={0: {}})
ka_b.add_blackboard(bb)
ka_b.connect_trigger()
ka_b1.add_blackboard(bb)
ka_b1.connect_trigger()
ka_b1.set_attr(_trigger_val=2)
bb.publish_trigger()
time.sleep(0.05)
bb.controller()
assert bb.get_attr('_kaar') == {0: {}, 1: {'ka_b': 0, 'ka_b1': 2}}
assert bb.get_attr('_ka_to_execute') == ('ka_b1', 2)
ns.shutdown()
time.sleep(0.05)
def test_diagnostics_agent_present():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
assert bb.diagnostics_agent_present('blank') == False
bb.connect_agent(ka.KaBase, 'ka_b')
ka_b = ns.proxy('ka_b')
assert bb.diagnostics_agent_present('ka_b') == True
assert ns.agents() == ['blackboard', 'ka_b']
bb.set_attr(_ka_to_execute=('ka_b',1))
bb.send_executor()
assert bb.diagnostics_agent_present('ka_b') == False
assert ns.agents() == ['blackboard']
ns.shutdown()
time.sleep(0.05)
def test_diagnostics_replace_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.set_attr(required_agents=[ka.KaBase])
bb.connect_agent(ka.KaBase, 'ka_b')
ka_b = ns.proxy('ka_b')
assert ns.agents() == ['blackboard', 'ka_b']
bb.diagnostics_replace_agent()
assert ns.agents() == ['blackboard', 'ka_b']
bb.send('shutdown_ka_b', 'message')
time.sleep(0.05)
assert ns.agents() == ['blackboard']
bb.diagnostics_replace_agent()
assert ns.agents() == ['blackboard', 'ka_b']
bb.set_attr(_ka_to_execute=('ka_b',1))
bb.send_executor()
bb.diagnostics_replace_agent()
assert ns.agents() == ['blackboard', 'ka_b']
ns.shutdown()
time.sleep(0.05)
def test_get_blackbaord():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': int})
bb.add_panel(1, ['panel_a', 'panel_b', 'panel_c'])
bb.update_abstract_lvl(1, 'test_name', {'entry 1': 'foo', 'entry 2': 5})
bb.update_abstract_lvl(1, 'test_name', {'entry 1': 'foo', 'entry 2': 5}, panel='panel_a')
assert bb.get_blackboard() == {'level 1': {'panel_a': {'test_name': {'entry 1': 'foo', 'entry 2': 5}},'panel_b': {},'panel_c': {}}}
ns.shutdown()
time.sleep(0.05)
def test_get_kaar():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.set_attr(_kaar={1: {'ka': 0, 'ka2': 1}})
assert bb.get_kaar() == {1: {'ka': 0, 'ka2': 1}}
ns.shutdown()
time.sleep(0.05)
def test_get_current_trigger_value():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.set_attr(_trigger_event=10)
assert bb.get_current_trigger_value() == 10
ns.shutdown()
time.sleep(0.05)
def test_send_executor():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_b = run_agent(name='ka_b', base=ka.KaBase)
ka_b.add_blackboard(bb)
ka_b.connect_trigger()
ka_b.connect_executor()
bb.set_attr(_ka_to_execute=('ka_b',1))
try:
bb.send_executor()
except NotImplementedError:
pass
ns.shutdown()
time.sleep(0.05)
def test_remove_bb_entry():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
bb.add_abstract_lvl(2, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
bb.add_panel(2, ['new', 'old'])
bb.update_abstract_lvl(1, 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
bb.update_abstract_lvl(2, 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}, panel='new')
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1' : {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}, 'level 2': {'new' : {'core_1' : {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}, 'old': {}}}
bb.remove_bb_entry(1, 'core_1')
bb.remove_bb_entry(2, 'core_1', panel='new')
assert bb.get_attr('abstract_lvls') == {'level 1': {}, 'level 2': {'new':{}, 'old':{}}}
ns.shutdown()
time.sleep(0.05)
def test_remove_bb_entry_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
bb.add_abstract_lvl(2, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
ka_base = run_agent(name='ka', base=ka.KaBase)
ka_base1 = run_agent(name='ka1', base=ka.KaBase)
ka_base.add_blackboard(bb)
ka_base.connect_writer()
ka_base.set_attr(bb_lvl=1)
ka_base1.add_blackboard(bb)
ka_base1.connect_writer()
ka_base1.set_attr(bb_lvl=2)
ka_base.write_to_bb(ka_base.get_attr('bb_lvl'), 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
ka_base1.write_to_bb(ka_base1.get_attr('bb_lvl'), 'core_2', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}},
'level 2': {'core_2': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
bb.remove_bb_entry(1, 'core_1')
assert bb.get_attr('abstract_lvls') == {'level 1': {},
'level 2': {'core_2': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
ns.shutdown()
time.sleep(0.05)
def test_update_abstract_lvl():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
bb.update_abstract_lvl(1, 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1' : {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
bb.update_abstract_lvl(1, 'core_2', {'entry 1': 'test', 'entry 2': False, 'entry 4': 2})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1' : {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
bb.update_abstract_lvl(1, 'core_2', {'entry 1': 'test', 'entry 2': False, 'entry 3': False})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1' : {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
bb.update_abstract_lvl(1, 'core_2', {'entry 1': 'test_2', 'entry 2': True, 'entry 3': 6})
assert bb.get_attr('abstract_lvls') == {'level 1':
{'core_1' : {'entry 1': 'test', 'entry 2': False, 'entry 3': 2},
'core_2': {'entry 1': 'test_2', 'entry 2': True, 'entry 3': 6}}}
ns.shutdown()
time.sleep(0.05)
def test_update_abstract_lvl_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_base = run_agent(name='ka', base=ka.KaBase)
ka_base.add_blackboard(bb)
ka_base.connect_writer()
ka_base.set_attr(bb_lvl=1)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
ka_base.write_to_bb(ka_base.get_attr('bb_lvl'), 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
ns.shutdown()
time.sleep(0.05)
def test_update_abstract_lvl_overwrite():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
bb.update_abstract_lvl(1, 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1' : {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
bb.update_abstract_lvl(1, 'core_1', {'entry 1': 'testing', 'entry 2': True, 'entry 3': 5})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1' : {'entry 1': 'testing', 'entry 2': True, 'entry 3': 5}}}
ns.shutdown()
time.sleep(0.05)
def test_update_abstract_lvl_mult():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
bb.add_abstract_lvl(2, {'entry 1': float, 'entry 2': str})
bb.add_abstract_lvl(3, {'entry 3': {'foo': float, 'spam': float}})
bb.update_abstract_lvl(1, 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
bb.update_abstract_lvl(1, 'core_2', {'entry 1': 'test_2', 'entry 2': True, 'entry 3': 6})
bb.update_abstract_lvl(2, 'core_2', {'entry 1': 1.2, 'entry 2': 'testing'})
bb.update_abstract_lvl(3, 'core_2', {'entry 3': {'foo': 1.1, 'spam': 3.2}})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2},
'core_2': {'entry 1': 'test_2', 'entry 2': True, 'entry 3': 6}},
'level 2': {'core_2': {'entry 1': 1.2, 'entry 2': 'testing'}},
'level 3': {'core_2': {'entry 3': {'foo': 1.1, 'spam': 3.2}}}}
ns.shutdown()
time.sleep(0.05)
def test_update_abstract_lvl_multi_agent():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_base = run_agent(name='ka', base=ka.KaBase)
ka_base1 = run_agent(name='ka1', base=ka.KaBase)
ka_base.add_blackboard(bb)
ka_base.connect_writer()
ka_base.set_attr(bb_lvl=1)
ka_base1.add_blackboard(bb)
ka_base1.connect_writer()
ka_base1.set_attr(bb_lvl=2)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
bb.add_abstract_lvl(2, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
ka_base.write_to_bb(ka_base.get_attr('bb_lvl'), 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
ka_base1.write_to_bb(ka_base1.get_attr('bb_lvl'), 'core_2',{'entry 1': 'test', 'entry 2': False, 'entry 3': 2} )
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}},
'level 2': {'core_2': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
ns.shutdown()
time.sleep(0.05)
def test_rewrite_bb_entry():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
ka_base = run_agent(name='ka', base=ka.KaBase)
ka_base1 = run_agent(name='ka1', base=ka.KaBase)
ka_base.add_blackboard(bb)
ka_base.connect_writer()
ka_base.set_attr(bb_lvl=1)
ka_base1.add_blackboard(bb)
ka_base1.connect_writer()
ka_base1.set_attr(bb_lvl=2)
bb.add_abstract_lvl(1, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
bb.add_abstract_lvl(2, {'entry 1': str, 'entry 2': bool, 'entry 3': int})
ka_base.write_to_bb(ka_base.get_attr('bb_lvl'), 'core_1', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
ka_base1.write_to_bb(ka_base1.get_attr('bb_lvl'), 'core_2', {'entry 1': 'test', 'entry 2': False, 'entry 3': 2})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}},
'level 2': {'core_2': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}}}
ka_base1.write_to_bb(ka_base1.get_attr('bb_lvl'), 'core_2', {'entry 1': 'test_new', 'entry 2': True, 'entry 3': 5})
assert bb.get_attr('abstract_lvls') == {'level 1': {'core_1': {'entry 1': 'test', 'entry 2': False, 'entry 3': 2}},
'level 2': {'core_2': {'entry 1': 'test_new', 'entry 2': True, 'entry 3': 5}}}
ns.shutdown()
time.sleep(0.05)
def test_write_to_h5():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
raw_data = {'test_1': (1,1,1), 'test_2': 0.0, 'test_3': 1}
bb.add_abstract_lvl(1, {'entry 1': tuple, 'entry 2': bool})
bb.add_abstract_lvl(2, {'entry 1': int, 'entry 2': float})
bb.add_abstract_lvl(4, {'entry 1': {'test 1': {'nested_test': int}}})
bb.add_abstract_lvl(3, {'entry 1': {'test_1': tuple, 'test_2': float, 'test_3': int}, 'entry 2': str, 'entry 3': list})
bb.add_panel(1, ['new','old'])
bb.update_abstract_lvl(1, 'core_2', {'entry 1': (1,1,0), 'entry 2': True}, panel='new')
bb.update_abstract_lvl(2, 'core_2', {'entry 1': 1, 'entry 2': 1.2})
bb.update_abstract_lvl(3, 'core_2', {'entry 1': raw_data, 'entry 2': 'test', 'entry 3': [1,2,3]})
bb.update_abstract_lvl(4, 'core_4', {'entry 1': {'test 1': {'nested_test': 3}}})
time.sleep(0.05)
bb.write_to_h5()
abs_lvls = bb.get_attr('abstract_lvls')
bb_archive = h5py.File('blackboard_archive.h5', 'r+')
for k,v in bb_archive.items():
assert k in abs_lvls.keys()
for k1,v1 in v.items():
assert k1 in abs_lvls[k].keys()
for k2,v2 in v1.items():
assert k2 in abs_lvls[k][k1].keys()
if type(v2) == h5py.Group:
for k3,v3 in v2.items():
if isinstance(v3, h5py._hl.group.Group):
assert abs_lvls[k][k1][k2][k3]['nested_test'] == v3['nested_test'][0]
elif isinstance(v3[0], Iterable):
assert list(abs_lvls[k][k1][k2][k3]) == list(v3[0])
else:
assert abs_lvls[k][k1][k2][k3] == v3[0]
elif type(v2[0]) == np.bytes_:
assert abs_lvls[k][k1][k2] == v2[0].decode('UTF-8')
else:
assert np.array(abs_lvls[k][k1][k2]).all() == v2[0].all()
bb_archive.close()
os.remove('blackboard_archive.h5')
ns.shutdown()
time.sleep(0.05)
def test_load_h5():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb_h5 = run_agent(name='blackboard1', base=blackboard.Blackboard)
bb_h5_2 = run_agent(name='blackboard2', base=blackboard.Blackboard)
bb_h5.set_attr(archive_name='blackboard_archive.h5')
bb_h5_2.set_attr(archive_name='blackboard_archive.h5')
raw_data = {'test_1': (1,1,1), 'test_2': 0.0, 'test_3': 1}
bb.add_abstract_lvl(1, {'entry 1': tuple, 'entry 2': bool})
bb.add_panel(1, ['new','old'])
bb.add_abstract_lvl(2, {'entry 1': int, 'entry 2': float})
bb.add_abstract_lvl(3, {'entry 1': {'test_1': tuple, 'test_2': float, 'test_3': int}, 'entry 2': str, 'entry 3': list})
bb.add_abstract_lvl(4, {'entry 1': {'test 1': {'nested_test': int}}})
bb.update_abstract_lvl(1, 'core_1', {'entry 1': (1,1,0), 'entry 2': True}, panel = 'new')
bb.update_abstract_lvl(1, 'core_2', {'entry 1': (1,1,0), 'entry 2': True}, panel = 'old')
bb.update_abstract_lvl(1, 'core_3', {'entry 1': (1,1,0), 'entry 2': True}, panel = 'old')
bb.update_abstract_lvl(2, 'core_2', {'entry 1': 1, 'entry 2': 1.2})
bb.update_abstract_lvl(3, 'core_3', {'entry 1': raw_data, 'entry 2': 'test', 'entry 3': [1.1,2.0,3.0]})
bb.update_abstract_lvl(4, 'core_4', {'entry 1': {'test 1': {'nested_test': 3}}})
time.sleep(0.05)
bb.write_to_h5()
time.sleep(3)
bb_h5.load_h5(panels={1: ['new','old']})
bb_h5_bb = bb_h5.get_attr('abstract_lvls')
bb_bb = bb.get_attr('abstract_lvls')
assert bb_h5_bb == bb_bb
bb.update_abstract_lvl(2, 'core_3', {'entry 1': 1, 'entry 2': 1.2})
bb.remove_bb_entry(2, 'core_2')
bb.remove_bb_entry(1, 'core_1', panel='new')
bb.write_to_h5()
time.sleep(3)
bb_h5_2.load_h5(panels={1: ['new','old']})
bb_h5_bb = bb_h5_2.get_attr('abstract_lvls')
bb_bb = bb.get_attr('abstract_lvls')
assert bb_h5_bb == bb_bb
ns.shutdown()
os.remove('blackboard_archive.h5')
time.sleep(0.05)
def test_h5_group_writer():
"""
Function cannot current be isolated to test, cannot pickle an H5 file to send to the agent
"""
pass
# ns = run_nameserver()
# bb = run_agent(name='blackboard', base=blackboard.Blackboard)
#bb.add_abstract_lvl(2, {'entry 1': int, 'entry 2': float})
#bb.update_abstract_lvl(2, 'core_2', {'entry 1': 1, 'entry 2': 1.2})
#bb.write_to_h5()
#time.sleep(1)
# h5 = h5py.File('blackboard_archive.h5', 'w')
# h5.create_group('level 1')
# h5_group = h5['level 1']
# bb.h5_group_writer(h5_group, 'entry 1', {'a': 1, 'b': 3})
# assert h5['level 1']['entry 1'] == {'a': 1, 'b': 3}
# ns.shutdown()
# os.remove('blackboard_archive.h5')
# time.sleep(0.05)
def test_connect_sub_blackboard():
try:
ns = run_nameserver()
except OSError:
time.sleep(0.5)
ns = run_nameserver()
bb = run_agent(name='blackboard', base=blackboard.Blackboard)
bb.connect_sub_blackboard('sub_bb', blackboard.Blackboard)
sub_bb = bb.get_attr('_sub_bbs')
assert [x for x in sub_bb.keys()] == ['sub_bb']
sub_bb = sub_bb['sub_bb']
assert sub_bb.get_attr('name') == 'sub_bb'
assert sub_bb.get_attr('archive_name') == 'sub_bb.h5'
ns.shutdown()
time.sleep(0.05)
| 38.248547
| 218
| 0.595554
| 3,880
| 26,315
| 3.80232
| 0.04201
| 0.037823
| 0.042703
| 0.04982
| 0.869721
| 0.840236
| 0.821663
| 0.784654
| 0.74961
| 0.722633
| 0
| 0.041791
| 0.224359
| 26,315
| 687
| 219
| 38.304221
| 0.681005
| 0.023789
| 0
| 0.706093
| 0
| 0
| 0.18597
| 0.007482
| 0
| 0
| 0
| 0
| 0.137993
| 1
| 0.053763
| false
| 0.003584
| 0.017921
| 0
| 0.071685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60454ec6a6a94119f4e986c8195f34e072d0686c
| 304
|
py
|
Python
|
apps/manager/purpleserver/manager/views/__init__.py
|
rcknr/purplship-server
|
f8ec35af3da870fada0e989c20a8349c958c637c
|
[
"ECL-2.0",
"Apache-2.0"
] | 12
|
2020-02-03T08:11:21.000Z
|
2021-04-13T02:00:38.000Z
|
apps/manager/purpleserver/manager/views/__init__.py
|
rcknr/purplship-server
|
f8ec35af3da870fada0e989c20a8349c958c637c
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-02-12T00:25:08.000Z
|
2021-04-20T10:31:59.000Z
|
apps/manager/purpleserver/manager/views/__init__.py
|
rcknr/purplship-server
|
f8ec35af3da870fada0e989c20a8349c958c637c
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2020-02-03T08:10:50.000Z
|
2021-04-13T15:17:12.000Z
|
import purpleserver.manager.views.addresses
import purpleserver.manager.views.parcels
import purpleserver.manager.views.shipments
import purpleserver.manager.views.trackers
import purpleserver.manager.views.customs
import purpleserver.manager.views.pickups
from purpleserver.manager.router import router
| 38
| 46
| 0.881579
| 36
| 304
| 7.444444
| 0.333333
| 0.496269
| 0.559701
| 0.671642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 304
| 7
| 47
| 43.428571
| 0.930556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
607d7ac14e09a241229bd8a04619b937123594c1
| 337
|
py
|
Python
|
stock/admin.py
|
flakesrc/wpensar-test
|
3b70a0a4d510554260090d1d0a64d1c2b1848e65
|
[
"MIT"
] | null | null | null |
stock/admin.py
|
flakesrc/wpensar-test
|
3b70a0a4d510554260090d1d0a64d1c2b1848e65
|
[
"MIT"
] | null | null | null |
stock/admin.py
|
flakesrc/wpensar-test
|
3b70a0a4d510554260090d1d0a64d1c2b1848e65
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# from .models import StockModel
# @admin.register(StockModel)
# class StockModelAdmin(admin.ModelAdmin):
# fields = ('name', 'quantity', 'price', 'avg_price')
# list_display = ('name', 'quantity', 'price', 'avg_price', 'created')
# list_filter = ('name', 'quantity', 'price', 'avg_price')
| 33.7
| 74
| 0.670623
| 37
| 337
| 5.972973
| 0.540541
| 0.162896
| 0.230769
| 0.271493
| 0.339367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148368
| 337
| 9
| 75
| 37.444444
| 0.770035
| 0.857567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6095f89b4131153ba0555e8031d9a98b02cbee63
| 226
|
py
|
Python
|
tests/__init__.py
|
akram256/authors_heaven
|
23bc769fc1f03da391426eaf00ec14c15fdeff04
|
[
"BSD-3-Clause"
] | null | null | null |
tests/__init__.py
|
akram256/authors_heaven
|
23bc769fc1f03da391426eaf00ec14c15fdeff04
|
[
"BSD-3-Clause"
] | null | null | null |
tests/__init__.py
|
akram256/authors_heaven
|
23bc769fc1f03da391426eaf00ec14c15fdeff04
|
[
"BSD-3-Clause"
] | null | null | null |
user1 = {
"user": {
"username": "akram",
"email": "akram.mukasa@andela.com",
"password": "Akram@100555"
}
}
login1 = {"user": {"email": "akram.mukasa@andela.com", "password": "Akram@100555"}}
| 20.545455
| 83
| 0.530973
| 22
| 226
| 5.454545
| 0.5
| 0.166667
| 0.266667
| 0.366667
| 0.733333
| 0.733333
| 0.733333
| 0.733333
| 0
| 0
| 0
| 0.081395
| 0.238938
| 226
| 10
| 84
| 22.6
| 0.616279
| 0
| 0
| 0
| 0
| 0
| 0.52
| 0.204444
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.25
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
60c8b4dc01bcdc2425e024246b46f0e4e334acd8
| 73,135
|
py
|
Python
|
infoblox_netmri/api/broker/v3_6_0/vrrp_router_stat_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/vrrp_router_stat_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/vrrp_router_stat_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..broker import Broker
class VrrpRouterStatBroker(Broker):
controller = "vrrp_router_stats"
def show(self, **kwargs):
"""Shows the details for the specified vrrp router stat.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vrrp router stat methods. The listed methods will be called on each vrrp router stat returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vrrp_router_stat: The vrrp router stat identified by the specified VrrpRouterStatsID.
:rtype vrrp_router_stat: VrrpRouterStat
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available vrrp router stats. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the vrrp router stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the vrrp router stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vrrp router stat methods. The listed methods will be called on each vrrp router stat returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VrrpRouterStatsID
:param sort: The data field(s) to use for sorting the output. Default is VrrpRouterStatsID. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VrrpRouterStat. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vrrp_router_stats: An array of the VrrpRouterStat objects that match the specified input criteria.
:rtype vrrp_router_stats: Array of VrrpRouterStat
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available vrrp router stats matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The date and time the record was last modified in NetMRI.
:type EndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the local interface for this Vrrp Router Statistics table entry.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the local interface for this Vrrp Router Statistics table entry.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier of Iprg member in the vrrp router statistics.
:type IprgMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMemberID: The internal NetMRI identifier of Iprg member in the vrrp router statistics.
:type IprgMemberID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgNumber: The unique IprgNumber in the Vrrp router.
:type IprgNumber: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgNumber: The unique IprgNumber in the Vrrp router.
:type IprgNumber: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StartTime: The date and time the record was initially created in NetMRI.
:type StartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The date and time the record was initially created in NetMRI.
:type StartTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAddressListErrors: The number of address list errors in the Vrrp router statistic
:type VrrpAddressListErrors: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAddressListErrors: The number of address list errors in the Vrrp router statistic
:type VrrpAddressListErrors: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAdvertiseIntervalErrors: The total number of interval errors in the Vrrp Router Statistics.
:type VrrpAdvertiseIntervalErrors: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAdvertiseIntervalErrors: The total number of interval errors in the Vrrp Router Statistics.
:type VrrpAdvertiseIntervalErrors: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics.
:type VrrpAdvertiseRcvd: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics.
:type VrrpAdvertiseRcvd: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics.
:type VrrpAuthFailures: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics.
:type VrrpAuthFailures: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpAuthTypeMismatch: The mismatch authentication type.
:type VrrpAuthTypeMismatch: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpAuthTypeMismatch: The mismatch authentication type.
:type VrrpAuthTypeMismatch: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpBecomeMaster: The master of the Vrrp Router Statistics.
:type VrrpBecomeMaster: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpBecomeMaster: The master of the Vrrp Router Statistics.
:type VrrpBecomeMaster: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics.
:type VrrpInvalidAuthType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics.
:type VrrpInvalidAuthType: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpInvalidTypePktsRcvd: The packet received with Invalid Type.
:type VrrpInvalidTypePktsRcvd: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpInvalidTypePktsRcvd: The packet received with Invalid Type.
:type VrrpInvalidTypePktsRcvd: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpIpTtlErrors: The total number of IP address error occurred in the Vrrp Router Statistics.
:type VrrpIpTtlErrors: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpIpTtlErrors: The total number of IP address error occurred in the Vrrp Router Statistics.
:type VrrpIpTtlErrors: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpPacketLengthErrors: The number of packet length errors in the Vrrp Router Statistics.
:type VrrpPacketLengthErrors: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpPacketLengthErrors: The number of packet length errors in the Vrrp Router Statistics.
:type VrrpPacketLengthErrors: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpPriorityZeroPktsRcvd: The packet received with priority zero.
:type VrrpPriorityZeroPktsRcvd: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpPriorityZeroPktsRcvd: The packet received with priority zero.
:type VrrpPriorityZeroPktsRcvd: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpPriorityZeroPktsSent: The packet sent with priority zero.
:type VrrpPriorityZeroPktsSent: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpPriorityZeroPktsSent: The packet sent with priority zero.
:type VrrpPriorityZeroPktsSent: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP index for the local interface for this Vrrp router statistics table entry.
:type ifIndex: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP index for the local interface for this Vrrp router statistics table entry.
:type ifIndex: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the vrrp router stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the vrrp router stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vrrp router stat methods. The listed methods will be called on each vrrp router stat returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VrrpRouterStatsID
:param sort: The data field(s) to use for sorting the output. Default is VrrpRouterStatsID. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VrrpRouterStat. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against vrrp router stats, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DeviceID, EndTime, InterfaceID, IprgMemberID, IprgNumber, StartTime, VrrpAddressListErrors, VrrpAdvertiseIntervalErrors, VrrpAdvertiseRcvd, VrrpAuthFailures, VrrpAuthTypeMismatch, VrrpBecomeMaster, VrrpInvalidAuthType, VrrpInvalidTypePktsRcvd, VrrpIpTtlErrors, VrrpPacketLengthErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpRouterStatsID, ifIndex.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vrrp_router_stats: An array of the VrrpRouterStat objects that match the specified input criteria.
:rtype vrrp_router_stats: Array of VrrpRouterStat
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available vrrp router stats matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DeviceID, EndTime, InterfaceID, IprgMemberID, IprgNumber, StartTime, VrrpAddressListErrors, VrrpAdvertiseIntervalErrors, VrrpAdvertiseRcvd, VrrpAuthFailures, VrrpAuthTypeMismatch, VrrpBecomeMaster, VrrpInvalidAuthType, VrrpInvalidTypePktsRcvd, VrrpIpTtlErrors, VrrpPacketLengthErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpRouterStatsID, ifIndex.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which Vrrp Routes statistics information was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EndTime: The operator to apply to the field EndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EndTime: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EndTime: If op_EndTime is specified, the field named in this input will be compared to the value in EndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EndTime must be specified if op_EndTime is specified.
:type val_f_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EndTime: If op_EndTime is specified, this value will be compared to the value in EndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EndTime must be specified if op_EndTime is specified.
:type val_c_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the local interface for this Vrrp Router Statistics table entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMemberID: The operator to apply to the field IprgMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMemberID: The internal NetMRI identifier of Iprg member in the vrrp router statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMemberID: If op_IprgMemberID is specified, the field named in this input will be compared to the value in IprgMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMemberID must be specified if op_IprgMemberID is specified.
:type val_f_IprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMemberID: If op_IprgMemberID is specified, this value will be compared to the value in IprgMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMemberID must be specified if op_IprgMemberID is specified.
:type val_c_IprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgNumber: The operator to apply to the field IprgNumber. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgNumber: The unique IprgNumber in the Vrrp router. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgNumber: If op_IprgNumber is specified, the field named in this input will be compared to the value in IprgNumber using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgNumber must be specified if op_IprgNumber is specified.
:type val_f_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgNumber: If op_IprgNumber is specified, this value will be compared to the value in IprgNumber using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgNumber must be specified if op_IprgNumber is specified.
:type val_c_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StartTime: The operator to apply to the field StartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartTime: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StartTime: If op_StartTime is specified, the field named in this input will be compared to the value in StartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartTime must be specified if op_StartTime is specified.
:type val_f_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StartTime: If op_StartTime is specified, this value will be compared to the value in StartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartTime must be specified if op_StartTime is specified.
:type val_c_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpAddressListErrors: The operator to apply to the field VrrpAddressListErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAddressListErrors: The number of address list errors in the Vrrp router statistic For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpAddressListErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpAddressListErrors: If op_VrrpAddressListErrors is specified, the field named in this input will be compared to the value in VrrpAddressListErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAddressListErrors must be specified if op_VrrpAddressListErrors is specified.
:type val_f_VrrpAddressListErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAddressListErrors: If op_VrrpAddressListErrors is specified, this value will be compared to the value in VrrpAddressListErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAddressListErrors must be specified if op_VrrpAddressListErrors is specified.
:type val_c_VrrpAddressListErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpAdvertiseIntervalErrors: The operator to apply to the field VrrpAdvertiseIntervalErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAdvertiseIntervalErrors: The total number of interval errors in the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpAdvertiseIntervalErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpAdvertiseIntervalErrors: If op_VrrpAdvertiseIntervalErrors is specified, the field named in this input will be compared to the value in VrrpAdvertiseIntervalErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAdvertiseIntervalErrors must be specified if op_VrrpAdvertiseIntervalErrors is specified.
:type val_f_VrrpAdvertiseIntervalErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAdvertiseIntervalErrors: If op_VrrpAdvertiseIntervalErrors is specified, this value will be compared to the value in VrrpAdvertiseIntervalErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAdvertiseIntervalErrors must be specified if op_VrrpAdvertiseIntervalErrors is specified.
:type val_c_VrrpAdvertiseIntervalErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpAdvertiseRcvd: The operator to apply to the field VrrpAdvertiseRcvd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpAdvertiseRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpAdvertiseRcvd: If op_VrrpAdvertiseRcvd is specified, the field named in this input will be compared to the value in VrrpAdvertiseRcvd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAdvertiseRcvd must be specified if op_VrrpAdvertiseRcvd is specified.
:type val_f_VrrpAdvertiseRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAdvertiseRcvd: If op_VrrpAdvertiseRcvd is specified, this value will be compared to the value in VrrpAdvertiseRcvd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAdvertiseRcvd must be specified if op_VrrpAdvertiseRcvd is specified.
:type val_c_VrrpAdvertiseRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpAuthFailures: The operator to apply to the field VrrpAuthFailures. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpAuthFailures: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpAuthFailures: If op_VrrpAuthFailures is specified, the field named in this input will be compared to the value in VrrpAuthFailures using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAuthFailures must be specified if op_VrrpAuthFailures is specified.
:type val_f_VrrpAuthFailures: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAuthFailures: If op_VrrpAuthFailures is specified, this value will be compared to the value in VrrpAuthFailures using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAuthFailures must be specified if op_VrrpAuthFailures is specified.
:type val_c_VrrpAuthFailures: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpAuthTypeMismatch: The operator to apply to the field VrrpAuthTypeMismatch. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAuthTypeMismatch: The mismatch authentication type. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpAuthTypeMismatch: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpAuthTypeMismatch: If op_VrrpAuthTypeMismatch is specified, the field named in this input will be compared to the value in VrrpAuthTypeMismatch using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAuthTypeMismatch must be specified if op_VrrpAuthTypeMismatch is specified.
:type val_f_VrrpAuthTypeMismatch: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAuthTypeMismatch: If op_VrrpAuthTypeMismatch is specified, this value will be compared to the value in VrrpAuthTypeMismatch using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAuthTypeMismatch must be specified if op_VrrpAuthTypeMismatch is specified.
:type val_c_VrrpAuthTypeMismatch: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpBecomeMaster: The operator to apply to the field VrrpBecomeMaster. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpBecomeMaster: The master of the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpBecomeMaster: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpBecomeMaster: If op_VrrpBecomeMaster is specified, the field named in this input will be compared to the value in VrrpBecomeMaster using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpBecomeMaster must be specified if op_VrrpBecomeMaster is specified.
:type val_f_VrrpBecomeMaster: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpBecomeMaster: If op_VrrpBecomeMaster is specified, this value will be compared to the value in VrrpBecomeMaster using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpBecomeMaster must be specified if op_VrrpBecomeMaster is specified.
:type val_c_VrrpBecomeMaster: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpInvalidAuthType: The operator to apply to the field VrrpInvalidAuthType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpInvalidAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpInvalidAuthType: If op_VrrpInvalidAuthType is specified, the field named in this input will be compared to the value in VrrpInvalidAuthType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpInvalidAuthType must be specified if op_VrrpInvalidAuthType is specified.
:type val_f_VrrpInvalidAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpInvalidAuthType: If op_VrrpInvalidAuthType is specified, this value will be compared to the value in VrrpInvalidAuthType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpInvalidAuthType must be specified if op_VrrpInvalidAuthType is specified.
:type val_c_VrrpInvalidAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpInvalidTypePktsRcvd: The operator to apply to the field VrrpInvalidTypePktsRcvd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpInvalidTypePktsRcvd: The packet received with Invalid Type. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpInvalidTypePktsRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpInvalidTypePktsRcvd: If op_VrrpInvalidTypePktsRcvd is specified, the field named in this input will be compared to the value in VrrpInvalidTypePktsRcvd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpInvalidTypePktsRcvd must be specified if op_VrrpInvalidTypePktsRcvd is specified.
:type val_f_VrrpInvalidTypePktsRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpInvalidTypePktsRcvd: If op_VrrpInvalidTypePktsRcvd is specified, this value will be compared to the value in VrrpInvalidTypePktsRcvd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpInvalidTypePktsRcvd must be specified if op_VrrpInvalidTypePktsRcvd is specified.
:type val_c_VrrpInvalidTypePktsRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpIpTtlErrors: The operator to apply to the field VrrpIpTtlErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpIpTtlErrors: The total number of IP address error occurred in the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpIpTtlErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpIpTtlErrors: If op_VrrpIpTtlErrors is specified, the field named in this input will be compared to the value in VrrpIpTtlErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpIpTtlErrors must be specified if op_VrrpIpTtlErrors is specified.
:type val_f_VrrpIpTtlErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpIpTtlErrors: If op_VrrpIpTtlErrors is specified, this value will be compared to the value in VrrpIpTtlErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpIpTtlErrors must be specified if op_VrrpIpTtlErrors is specified.
:type val_c_VrrpIpTtlErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpPacketLengthErrors: The operator to apply to the field VrrpPacketLengthErrors. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpPacketLengthErrors: The number of packet length errors in the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpPacketLengthErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpPacketLengthErrors: If op_VrrpPacketLengthErrors is specified, the field named in this input will be compared to the value in VrrpPacketLengthErrors using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpPacketLengthErrors must be specified if op_VrrpPacketLengthErrors is specified.
:type val_f_VrrpPacketLengthErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpPacketLengthErrors: If op_VrrpPacketLengthErrors is specified, this value will be compared to the value in VrrpPacketLengthErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpPacketLengthErrors must be specified if op_VrrpPacketLengthErrors is specified.
:type val_c_VrrpPacketLengthErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpPriorityZeroPktsRcvd: The operator to apply to the field VrrpPriorityZeroPktsRcvd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpPriorityZeroPktsRcvd: The packet received with priority zero. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpPriorityZeroPktsRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpPriorityZeroPktsRcvd: If op_VrrpPriorityZeroPktsRcvd is specified, the field named in this input will be compared to the value in VrrpPriorityZeroPktsRcvd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpPriorityZeroPktsRcvd must be specified if op_VrrpPriorityZeroPktsRcvd is specified.
:type val_f_VrrpPriorityZeroPktsRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpPriorityZeroPktsRcvd: If op_VrrpPriorityZeroPktsRcvd is specified, this value will be compared to the value in VrrpPriorityZeroPktsRcvd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpPriorityZeroPktsRcvd must be specified if op_VrrpPriorityZeroPktsRcvd is specified.
:type val_c_VrrpPriorityZeroPktsRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpPriorityZeroPktsSent: The operator to apply to the field VrrpPriorityZeroPktsSent. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpPriorityZeroPktsSent: The packet sent with priority zero. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpPriorityZeroPktsSent: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpPriorityZeroPktsSent: If op_VrrpPriorityZeroPktsSent is specified, the field named in this input will be compared to the value in VrrpPriorityZeroPktsSent using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpPriorityZeroPktsSent must be specified if op_VrrpPriorityZeroPktsSent is specified.
:type val_f_VrrpPriorityZeroPktsSent: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpPriorityZeroPktsSent: If op_VrrpPriorityZeroPktsSent is specified, this value will be compared to the value in VrrpPriorityZeroPktsSent using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpPriorityZeroPktsSent must be specified if op_VrrpPriorityZeroPktsSent is specified.
:type val_c_VrrpPriorityZeroPktsSent: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpRouterStatsID: The operator to apply to the field VrrpRouterStatsID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpRouterStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpRouterStatsID: If op_VrrpRouterStatsID is specified, the field named in this input will be compared to the value in VrrpRouterStatsID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpRouterStatsID must be specified if op_VrrpRouterStatsID is specified.
:type val_f_VrrpRouterStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpRouterStatsID: If op_VrrpRouterStatsID is specified, this value will be compared to the value in VrrpRouterStatsID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpRouterStatsID must be specified if op_VrrpRouterStatsID is specified.
:type val_c_VrrpRouterStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifIndex: The operator to apply to the field ifIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifIndex: The SNMP index for the local interface for this Vrrp router statistics table entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifIndex: If op_ifIndex is specified, the field named in this input will be compared to the value in ifIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifIndex must be specified if op_ifIndex is specified.
:type val_f_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifIndex: If op_ifIndex is specified, this value will be compared to the value in ifIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifIndex must be specified if op_ifIndex is specified.
:type val_c_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the vrrp router stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the vrrp router stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vrrp router stat methods. The listed methods will be called on each vrrp router stat returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VrrpRouterStatsID
:param sort: The data field(s) to use for sorting the output. Default is VrrpRouterStatsID. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VrrpRouterStat. Valid values are VrrpRouterStatsID, DeviceID, IprgMemberID, InterfaceID, StartTime, EndTime, ifIndex, IprgNumber, VrrpBecomeMaster, VrrpAdvertiseRcvd, VrrpAdvertiseIntervalErrors, VrrpAuthFailures, VrrpIpTtlErrors, VrrpPriorityZeroPktsRcvd, VrrpPriorityZeroPktsSent, VrrpInvalidTypePktsRcvd, VrrpAddressListErrors, VrrpInvalidAuthType, VrrpAuthTypeMismatch, VrrpPacketLengthErrors. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vrrp_router_stats: An array of the VrrpRouterStat objects that match the specified input criteria.
:rtype vrrp_router_stats: Array of VrrpRouterStat
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def infradevice(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VrrpRouterStatsID: The internal NetMRI identifier of the Vrrp Router Statistics.
:type VrrpRouterStatsID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
| 54.700823
| 779
| 0.622657
| 8,525
| 73,135
| 5.291965
| 0.037302
| 0.069158
| 0.044953
| 0.071818
| 0.955424
| 0.95354
| 0.921177
| 0.905905
| 0.900718
| 0.898812
| 0
| 0.004226
| 0.297915
| 73,135
| 1,337
| 780
| 54.700823
| 0.874384
| 0.828837
| 0
| 0
| 0
| 0
| 0.051556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.066667
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
60e56c48b77c6786ea743f159a03dfe65fcc11eb
| 78
|
py
|
Python
|
goje_scrapper/__init__.py
|
alifzl/goje_scrapper
|
c831809f0696375d509869db347a79d86d939a86
|
[
"MIT"
] | 6
|
2021-04-05T08:35:06.000Z
|
2021-07-03T22:32:58.000Z
|
goje_scrapper/__init__.py
|
alifzl/goje_scrapper
|
c831809f0696375d509869db347a79d86d939a86
|
[
"MIT"
] | null | null | null |
goje_scrapper/__init__.py
|
alifzl/goje_scrapper
|
c831809f0696375d509869db347a79d86d939a86
|
[
"MIT"
] | null | null | null |
from goje_scrapper.goje import Goje
from goje_scrapper.goje import GojeScraper
| 39
| 42
| 0.884615
| 12
| 78
| 5.583333
| 0.416667
| 0.238806
| 0.477612
| 0.597015
| 0.776119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089744
| 78
| 2
| 42
| 39
| 0.943662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
880dce770c08abfb66574f326206c6b4a3919be7
| 1,039
|
py
|
Python
|
units/volume/gallons.py
|
putridparrot/PyUnits
|
4f1095c6fc0bee6ba936921c391913dbefd9307c
|
[
"MIT"
] | null | null | null |
units/volume/gallons.py
|
putridparrot/PyUnits
|
4f1095c6fc0bee6ba936921c391913dbefd9307c
|
[
"MIT"
] | null | null | null |
units/volume/gallons.py
|
putridparrot/PyUnits
|
4f1095c6fc0bee6ba936921c391913dbefd9307c
|
[
"MIT"
] | null | null | null |
# <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
def to_millilitres(value):
return value * 4546.091879
def to_litres(value):
return value * 4.546091879
def to_kilolitres(value):
return value * 0.0045460918799
def to_teaspoons(value):
return value * 768.0
def to_tablespoons(value):
return value * 256.0
def to_quarts(value):
return value * 4.0
def to_pints(value):
return value * 8.0
def to_fluid_ounces(value):
return value * 160.0
def to_u_s_teaspoons(value):
return value / 0.00108421072977394606
def to_u_s_tablespoons(value):
return value / 0.003252632189321838592
def to_u_s_quarts(value):
return value / 0.20816846011659767808
def to_u_s_pints(value):
return value / 0.10408423005829883904
def to_u_s_gallons(value):
return value / 0.83267384046639071232
def to_u_s_fluid_ounces(value):
return value / 0.006505264378643677184
def to_u_s_cups(value):
return value / 0.052042115029149417472
| 27.342105
| 62
| 0.772859
| 160
| 1,039
| 4.825
| 0.31875
| 0.09715
| 0.310881
| 0.176166
| 0.069948
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224719
| 0.143407
| 1,039
| 37
| 63
| 28.081081
| 0.642697
| 0.143407
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.